js/src/jsgc.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jsgc.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,5388 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +/*
    1.11 + * This code implements an incremental mark-and-sweep garbage collector, with
    1.12 + * most sweeping carried out in the background on a parallel thread.
    1.13 + *
    1.14 + * Full vs. zone GC
    1.15 + * ----------------
    1.16 + *
    1.17 + * The collector can collect all zones at once, or a subset. These types of
    1.18 + * collection are referred to as a full GC and a zone GC respectively.
    1.19 + *
    1.20 + * The atoms zone is only collected in a full GC since objects in any zone may
    1.21 + * have pointers to atoms, and these are not recorded in the cross compartment
    1.22 + * pointer map. Also, the atoms zone is not collected if any thread has an
    1.23 + * AutoKeepAtoms instance on the stack, or there are any exclusive threads using
    1.24 + * the runtime.
    1.25 + *
    1.26 + * It is possible for an incremental collection that started out as a full GC to
    1.27 + * become a zone GC if new zones are created during the course of the
    1.28 + * collection.
    1.29 + *
    1.30 + * Incremental collection
    1.31 + * ----------------------
    1.32 + *
    1.33 + * For a collection to be carried out incrementally the following conditions
    1.34 + * must be met:
    1.35 + *  - the collection must be run by calling js::GCSlice() rather than js::GC()
    1.36 + *  - the GC mode must have been set to JSGC_MODE_INCREMENTAL with
    1.37 + *    JS_SetGCParameter()
    1.38 + *  - no thread may have an AutoKeepAtoms instance on the stack
    1.39 + *  - all native objects that have their own trace hook must indicate that they
    1.40 + *    implement read and write barriers with the JSCLASS_IMPLEMENTS_BARRIERS
    1.41 + *    flag
    1.42 + *
    1.43 + * The last condition is an engine-internal mechanism to ensure that incremental
    1.44 + * collection is not carried out without the correct barriers being implemented.
    1.45 + * For more information see 'Incremental marking' below.
    1.46 + *
    1.47 + * If the collection is not incremental, all foreground activity happens inside
    1.48 + * a single call to GC() or GCSlice(). However the collection is not complete
    1.49 + * until the background sweeping activity has finished.
    1.50 + *
    1.51 + * An incremental collection proceeds as a series of slices, interleaved with
    1.52 + * mutator activity, i.e. running JavaScript code. Slices are limited by a time
    1.53 + * budget. The slice finishes as soon as possible after the requested time has
    1.54 + * passed.
    1.55 + *
    1.56 + * Collector states
    1.57 + * ----------------
    1.58 + *
    1.59 + * The collector proceeds through the following states, the current state being
    1.60 + * held in JSRuntime::gcIncrementalState:
    1.61 + *
    1.62 + *  - MARK_ROOTS - marks the stack and other roots
    1.63 + *  - MARK       - incrementally marks reachable things
    1.64 + *  - SWEEP      - sweeps zones in groups and continues marking unswept zones
    1.65 + *
    1.66 + * The MARK_ROOTS activity always takes place in the first slice. The next two
    1.67 + * states can take place over one or more slices.
    1.68 + *
    1.69 + * In other words an incremental collection proceeds like this:
    1.70 + *
    1.71 + * Slice 1:   MARK_ROOTS: Roots pushed onto the mark stack.
    1.72 + *            MARK:       The mark stack is processed by popping an element,
    1.73 + *                        marking it, and pushing its children.
    1.74 + *
    1.75 + *          ... JS code runs ...
    1.76 + *
    1.77 + * Slice 2:   MARK:       More mark stack processing.
    1.78 + *
    1.79 + *          ... JS code runs ...
    1.80 + *
    1.81 + * Slice n-1: MARK:       More mark stack processing.
    1.82 + *
    1.83 + *          ... JS code runs ...
    1.84 + *
    1.85 + * Slice n:   MARK:       Mark stack is completely drained.
    1.86 + *            SWEEP:      Select first group of zones to sweep and sweep them.
    1.87 + *
    1.88 + *          ... JS code runs ...
    1.89 + *
    1.90 + * Slice n+1: SWEEP:      Mark objects in unswept zones that were newly
    1.91 + *                        identified as alive (see below). Then sweep more zone
    1.92 + *                        groups.
    1.93 + *
    1.94 + *          ... JS code runs ...
    1.95 + *
    1.96 + * Slice n+2: SWEEP:      Mark objects in unswept zones that were newly
    1.97 + *                        identified as alive. Then sweep more zone groups.
    1.98 + *
    1.99 + *          ... JS code runs ...
   1.100 + *
   1.101 + * Slice m:   SWEEP:      Sweeping is finished, and background sweeping
   1.102 + *                        started on the helper thread.
   1.103 + *
   1.104 + *          ... JS code runs, remaining sweeping done on background thread ...
   1.105 + *
   1.106 + * When background sweeping finishes the GC is complete.
   1.107 + *
   1.108 + * Incremental marking
   1.109 + * -------------------
   1.110 + *
   1.111 + * Incremental collection requires close collaboration with the mutator (i.e.,
   1.112 + * JS code) to guarantee correctness.
   1.113 + *
   1.114 + *  - During an incremental GC, if a memory location (except a root) is written
   1.115 + *    to, then the value it previously held must be marked. Write barriers
   1.116 + *    ensure this.
   1.117 + *
   1.118 + *  - Any object that is allocated during incremental GC must start out marked.
   1.119 + *
   1.120 + *  - Roots are marked in the first slice and hence don't need write barriers.
   1.121 + *    Roots are things like the C stack and the VM stack.
   1.122 + *
   1.123 + * The problem that write barriers solve is that between slices the mutator can
   1.124 + * change the object graph. We must ensure that it cannot do this in such a way
   1.125 + * that makes us fail to mark a reachable object (marking an unreachable object
   1.126 + * is tolerable).
   1.127 + *
   1.128 + * We use a snapshot-at-the-beginning algorithm to do this. This means that we
   1.129 + * promise to mark at least everything that is reachable at the beginning of
   1.130 + * collection. To implement it we mark the old contents of every non-root memory
   1.131 + * location written to by the mutator while the collection is in progress, using
   1.132 + * write barriers. This is described in gc/Barrier.h.
   1.133 + *
   1.134 + * Incremental sweeping
   1.135 + * --------------------
   1.136 + *
   1.137 + * Sweeping is difficult to do incrementally because object finalizers must be
   1.138 + * run at the start of sweeping, before any mutator code runs. The reason is
   1.139 + * that some objects use their finalizers to remove themselves from caches. If
   1.140 + * mutator code was allowed to run after the start of sweeping, it could observe
   1.141 + * the state of the cache and create a new reference to an object that was just
   1.142 + * about to be destroyed.
   1.143 + *
   1.144 + * Sweeping all finalizable objects in one go would introduce long pauses, so
   1.145 + * instead sweeping broken up into groups of zones. Zones which are not yet
   1.146 + * being swept are still marked, so the issue above does not apply.
   1.147 + *
   1.148 + * The order of sweeping is restricted by cross compartment pointers - for
   1.149 + * example say that object |a| from zone A points to object |b| in zone B and
   1.150 + * neither object was marked when we transitioned to the SWEEP phase. Imagine we
   1.151 + * sweep B first and then return to the mutator. It's possible that the mutator
   1.152 + * could cause |a| to become alive through a read barrier (perhaps it was a
   1.153 + * shape that was accessed via a shape table). Then we would need to mark |b|,
   1.154 + * which |a| points to, but |b| has already been swept.
   1.155 + *
   1.156 + * So if there is such a pointer then marking of zone B must not finish before
   1.157 + * marking of zone A.  Pointers which form a cycle between zones therefore
   1.158 + * restrict those zones to being swept at the same time, and these are found
   1.159 + * using Tarjan's algorithm for finding the strongly connected components of a
   1.160 + * graph.
   1.161 + *
   1.162 + * GC things without finalizers, and things with finalizers that are able to run
   1.163 + * in the background, are swept on the background thread. This accounts for most
   1.164 + * of the sweeping work.
   1.165 + *
   1.166 + * Reset
   1.167 + * -----
   1.168 + *
   1.169 + * During incremental collection it is possible, although unlikely, for
   1.170 + * conditions to change such that incremental collection is no longer safe. In
   1.171 + * this case, the collection is 'reset' by ResetIncrementalGC(). If we are in
   1.172 + * the mark state, this just stops marking, but if we have started sweeping
   1.173 + * already, we continue until we have swept the current zone group. Following a
   1.174 + * reset, a new non-incremental collection is started.
   1.175 + */
   1.176 +
   1.177 +#include "jsgcinlines.h"
   1.178 +
   1.179 +#include "mozilla/ArrayUtils.h"
   1.180 +#include "mozilla/DebugOnly.h"
   1.181 +#include "mozilla/MemoryReporting.h"
   1.182 +#include "mozilla/Move.h"
   1.183 +
   1.184 +#include <string.h>     /* for memset used when DEBUG */
   1.185 +#ifndef XP_WIN
   1.186 +# include <unistd.h>
   1.187 +#endif
   1.188 +
   1.189 +#include "jsapi.h"
   1.190 +#include "jsatom.h"
   1.191 +#include "jscntxt.h"
   1.192 +#include "jscompartment.h"
   1.193 +#include "jsobj.h"
   1.194 +#include "jsscript.h"
   1.195 +#include "jstypes.h"
   1.196 +#include "jsutil.h"
   1.197 +#include "jswatchpoint.h"
   1.198 +#include "jsweakmap.h"
   1.199 +#ifdef XP_WIN
   1.200 +# include "jswin.h"
   1.201 +#endif
   1.202 +#include "prmjtime.h"
   1.203 +
   1.204 +#include "gc/FindSCCs.h"
   1.205 +#include "gc/GCInternals.h"
   1.206 +#include "gc/Marking.h"
   1.207 +#include "gc/Memory.h"
   1.208 +#ifdef JS_ION
   1.209 +# include "jit/BaselineJIT.h"
   1.210 +#endif
   1.211 +#include "jit/IonCode.h"
   1.212 +#include "js/SliceBudget.h"
   1.213 +#include "vm/Debugger.h"
   1.214 +#include "vm/ForkJoin.h"
   1.215 +#include "vm/ProxyObject.h"
   1.216 +#include "vm/Shape.h"
   1.217 +#include "vm/String.h"
   1.218 +#include "vm/TraceLogging.h"
   1.219 +#include "vm/WrapperObject.h"
   1.220 +
   1.221 +#include "jsobjinlines.h"
   1.222 +#include "jsscriptinlines.h"
   1.223 +
   1.224 +#include "vm/Stack-inl.h"
   1.225 +#include "vm/String-inl.h"
   1.226 +
   1.227 +using namespace js;
   1.228 +using namespace js::gc;
   1.229 +
   1.230 +using mozilla::ArrayEnd;
   1.231 +using mozilla::DebugOnly;
   1.232 +using mozilla::Maybe;
   1.233 +using mozilla::Swap;
   1.234 +
   1.235 +/* Perform a Full GC every 20 seconds if MaybeGC is called */
   1.236 +static const uint64_t GC_IDLE_FULL_SPAN = 20 * 1000 * 1000;
   1.237 +
   1.238 +/* Increase the IGC marking slice time if we are in highFrequencyGC mode. */
   1.239 +static const int IGC_MARK_SLICE_MULTIPLIER = 2;
   1.240 +
   1.241 +#if defined(ANDROID) || defined(MOZ_B2G)
   1.242 +static const int MAX_EMPTY_CHUNK_COUNT = 2;
   1.243 +#else
   1.244 +static const int MAX_EMPTY_CHUNK_COUNT = 30;
   1.245 +#endif
   1.246 +
   1.247 +/* This array should be const, but that doesn't link right under GCC. */
   1.248 +const AllocKind gc::slotsToThingKind[] = {
   1.249 +    /* 0 */  FINALIZE_OBJECT0,  FINALIZE_OBJECT2,  FINALIZE_OBJECT2,  FINALIZE_OBJECT4,
   1.250 +    /* 4 */  FINALIZE_OBJECT4,  FINALIZE_OBJECT8,  FINALIZE_OBJECT8,  FINALIZE_OBJECT8,
   1.251 +    /* 8 */  FINALIZE_OBJECT8,  FINALIZE_OBJECT12, FINALIZE_OBJECT12, FINALIZE_OBJECT12,
   1.252 +    /* 12 */ FINALIZE_OBJECT12, FINALIZE_OBJECT16, FINALIZE_OBJECT16, FINALIZE_OBJECT16,
   1.253 +    /* 16 */ FINALIZE_OBJECT16
   1.254 +};
   1.255 +
   1.256 +static_assert(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT,
   1.257 +              "We have defined a slot count for each kind.");
   1.258 +
   1.259 +const uint32_t Arena::ThingSizes[] = {
   1.260 +    sizeof(JSObject),           /* FINALIZE_OBJECT0             */
   1.261 +    sizeof(JSObject),           /* FINALIZE_OBJECT0_BACKGROUND  */
   1.262 +    sizeof(JSObject_Slots2),    /* FINALIZE_OBJECT2             */
   1.263 +    sizeof(JSObject_Slots2),    /* FINALIZE_OBJECT2_BACKGROUND  */
   1.264 +    sizeof(JSObject_Slots4),    /* FINALIZE_OBJECT4             */
   1.265 +    sizeof(JSObject_Slots4),    /* FINALIZE_OBJECT4_BACKGROUND  */
   1.266 +    sizeof(JSObject_Slots8),    /* FINALIZE_OBJECT8             */
   1.267 +    sizeof(JSObject_Slots8),    /* FINALIZE_OBJECT8_BACKGROUND  */
   1.268 +    sizeof(JSObject_Slots12),   /* FINALIZE_OBJECT12            */
   1.269 +    sizeof(JSObject_Slots12),   /* FINALIZE_OBJECT12_BACKGROUND */
   1.270 +    sizeof(JSObject_Slots16),   /* FINALIZE_OBJECT16            */
   1.271 +    sizeof(JSObject_Slots16),   /* FINALIZE_OBJECT16_BACKGROUND */
   1.272 +    sizeof(JSScript),           /* FINALIZE_SCRIPT              */
   1.273 +    sizeof(LazyScript),         /* FINALIZE_LAZY_SCRIPT         */
   1.274 +    sizeof(Shape),              /* FINALIZE_SHAPE               */
   1.275 +    sizeof(BaseShape),          /* FINALIZE_BASE_SHAPE          */
   1.276 +    sizeof(types::TypeObject),  /* FINALIZE_TYPE_OBJECT         */
   1.277 +    sizeof(JSFatInlineString),  /* FINALIZE_FAT_INLINE_STRING   */
   1.278 +    sizeof(JSString),           /* FINALIZE_STRING              */
   1.279 +    sizeof(JSExternalString),   /* FINALIZE_EXTERNAL_STRING     */
   1.280 +    sizeof(jit::JitCode),       /* FINALIZE_JITCODE             */
   1.281 +};
   1.282 +
   1.283 +#define OFFSET(type) uint32_t(sizeof(ArenaHeader) + (ArenaSize - sizeof(ArenaHeader)) % sizeof(type))
   1.284 +
   1.285 +const uint32_t Arena::FirstThingOffsets[] = {
   1.286 +    OFFSET(JSObject),           /* FINALIZE_OBJECT0             */
   1.287 +    OFFSET(JSObject),           /* FINALIZE_OBJECT0_BACKGROUND  */
   1.288 +    OFFSET(JSObject_Slots2),    /* FINALIZE_OBJECT2             */
   1.289 +    OFFSET(JSObject_Slots2),    /* FINALIZE_OBJECT2_BACKGROUND  */
   1.290 +    OFFSET(JSObject_Slots4),    /* FINALIZE_OBJECT4             */
   1.291 +    OFFSET(JSObject_Slots4),    /* FINALIZE_OBJECT4_BACKGROUND  */
   1.292 +    OFFSET(JSObject_Slots8),    /* FINALIZE_OBJECT8             */
   1.293 +    OFFSET(JSObject_Slots8),    /* FINALIZE_OBJECT8_BACKGROUND  */
   1.294 +    OFFSET(JSObject_Slots12),   /* FINALIZE_OBJECT12            */
   1.295 +    OFFSET(JSObject_Slots12),   /* FINALIZE_OBJECT12_BACKGROUND */
   1.296 +    OFFSET(JSObject_Slots16),   /* FINALIZE_OBJECT16            */
   1.297 +    OFFSET(JSObject_Slots16),   /* FINALIZE_OBJECT16_BACKGROUND */
   1.298 +    OFFSET(JSScript),           /* FINALIZE_SCRIPT              */
   1.299 +    OFFSET(LazyScript),         /* FINALIZE_LAZY_SCRIPT         */
   1.300 +    OFFSET(Shape),              /* FINALIZE_SHAPE               */
   1.301 +    OFFSET(BaseShape),          /* FINALIZE_BASE_SHAPE          */
   1.302 +    OFFSET(types::TypeObject),  /* FINALIZE_TYPE_OBJECT         */
   1.303 +    OFFSET(JSFatInlineString),  /* FINALIZE_FAT_INLINE_STRING   */
   1.304 +    OFFSET(JSString),           /* FINALIZE_STRING              */
   1.305 +    OFFSET(JSExternalString),   /* FINALIZE_EXTERNAL_STRING     */
   1.306 +    OFFSET(jit::JitCode),       /* FINALIZE_JITCODE             */
   1.307 +};
   1.308 +
   1.309 +#undef OFFSET
   1.310 +
   1.311 +/*
   1.312 + * Finalization order for incrementally swept things.
   1.313 + */
   1.314 +
   1.315 +static const AllocKind FinalizePhaseStrings[] = {
   1.316 +    FINALIZE_EXTERNAL_STRING
   1.317 +};
   1.318 +
   1.319 +static const AllocKind FinalizePhaseScripts[] = {
   1.320 +    FINALIZE_SCRIPT,
   1.321 +    FINALIZE_LAZY_SCRIPT
   1.322 +};
   1.323 +
   1.324 +static const AllocKind FinalizePhaseJitCode[] = {
   1.325 +    FINALIZE_JITCODE
   1.326 +};
   1.327 +
   1.328 +static const AllocKind * const FinalizePhases[] = {
   1.329 +    FinalizePhaseStrings,
   1.330 +    FinalizePhaseScripts,
   1.331 +    FinalizePhaseJitCode
   1.332 +};
   1.333 +static const int FinalizePhaseCount = sizeof(FinalizePhases) / sizeof(AllocKind*);
   1.334 +
   1.335 +static const int FinalizePhaseLength[] = {
   1.336 +    sizeof(FinalizePhaseStrings) / sizeof(AllocKind),
   1.337 +    sizeof(FinalizePhaseScripts) / sizeof(AllocKind),
   1.338 +    sizeof(FinalizePhaseJitCode) / sizeof(AllocKind)
   1.339 +};
   1.340 +
   1.341 +static const gcstats::Phase FinalizePhaseStatsPhase[] = {
   1.342 +    gcstats::PHASE_SWEEP_STRING,
   1.343 +    gcstats::PHASE_SWEEP_SCRIPT,
   1.344 +    gcstats::PHASE_SWEEP_JITCODE
   1.345 +};
   1.346 +
   1.347 +/*
   1.348 + * Finalization order for things swept in the background.
   1.349 + */
   1.350 +
   1.351 +static const AllocKind BackgroundPhaseObjects[] = {
   1.352 +    FINALIZE_OBJECT0_BACKGROUND,
   1.353 +    FINALIZE_OBJECT2_BACKGROUND,
   1.354 +    FINALIZE_OBJECT4_BACKGROUND,
   1.355 +    FINALIZE_OBJECT8_BACKGROUND,
   1.356 +    FINALIZE_OBJECT12_BACKGROUND,
   1.357 +    FINALIZE_OBJECT16_BACKGROUND
   1.358 +};
   1.359 +
   1.360 +static const AllocKind BackgroundPhaseStrings[] = {
   1.361 +    FINALIZE_FAT_INLINE_STRING,
   1.362 +    FINALIZE_STRING
   1.363 +};
   1.364 +
   1.365 +static const AllocKind BackgroundPhaseShapes[] = {
   1.366 +    FINALIZE_SHAPE,
   1.367 +    FINALIZE_BASE_SHAPE,
   1.368 +    FINALIZE_TYPE_OBJECT
   1.369 +};
   1.370 +
   1.371 +static const AllocKind * const BackgroundPhases[] = {
   1.372 +    BackgroundPhaseObjects,
   1.373 +    BackgroundPhaseStrings,
   1.374 +    BackgroundPhaseShapes
   1.375 +};
   1.376 +static const int BackgroundPhaseCount = sizeof(BackgroundPhases) / sizeof(AllocKind*);
   1.377 +
   1.378 +static const int BackgroundPhaseLength[] = {
   1.379 +    sizeof(BackgroundPhaseObjects) / sizeof(AllocKind),
   1.380 +    sizeof(BackgroundPhaseStrings) / sizeof(AllocKind),
   1.381 +    sizeof(BackgroundPhaseShapes) / sizeof(AllocKind)
   1.382 +};
   1.383 +
   1.384 +#ifdef DEBUG
   1.385 +void
   1.386 +ArenaHeader::checkSynchronizedWithFreeList() const
   1.387 +{
   1.388 +    /*
   1.389 +     * Do not allow to access the free list when its real head is still stored
   1.390 +     * in FreeLists and is not synchronized with this one.
   1.391 +     */
   1.392 +    JS_ASSERT(allocated());
   1.393 +
   1.394 +    /*
   1.395 +     * We can be called from the background finalization thread when the free
   1.396 +     * list in the zone can mutate at any moment. We cannot do any
   1.397 +     * checks in this case.
   1.398 +     */
   1.399 +    if (IsBackgroundFinalized(getAllocKind()) && zone->runtimeFromAnyThread()->gcHelperThread.onBackgroundThread())
   1.400 +        return;
   1.401 +
   1.402 +    FreeSpan firstSpan = FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
   1.403 +    if (firstSpan.isEmpty())
   1.404 +        return;
   1.405 +    const FreeSpan *list = zone->allocator.arenas.getFreeList(getAllocKind());
   1.406 +    if (list->isEmpty() || firstSpan.arenaAddress() != list->arenaAddress())
   1.407 +        return;
   1.408 +
   1.409 +    /*
   1.410 +     * Here this arena has free things, FreeList::lists[thingKind] is not
   1.411 +     * empty and also points to this arena. Thus they must the same.
   1.412 +     */
   1.413 +    JS_ASSERT(firstSpan.isSameNonEmptySpan(list));
   1.414 +}
   1.415 +#endif
   1.416 +
   1.417 +/* static */ void
   1.418 +Arena::staticAsserts()
   1.419 +{
   1.420 +    static_assert(JS_ARRAY_LENGTH(ThingSizes) == FINALIZE_LIMIT, "We have defined all thing sizes.");
   1.421 +    static_assert(JS_ARRAY_LENGTH(FirstThingOffsets) == FINALIZE_LIMIT, "We have defined all offsets.");
   1.422 +}
   1.423 +
   1.424 +void
   1.425 +Arena::setAsFullyUnused(AllocKind thingKind)
   1.426 +{
   1.427 +    FreeSpan entireList;
   1.428 +    entireList.first = thingsStart(thingKind);
   1.429 +    uintptr_t arenaAddr = aheader.arenaAddress();
   1.430 +    entireList.last = arenaAddr | ArenaMask;
   1.431 +    aheader.setFirstFreeSpan(&entireList);
   1.432 +}
   1.433 +
   1.434 +template<typename T>
   1.435 +inline bool
   1.436 +Arena::finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize)
   1.437 +{
   1.438 +    /* Enforce requirements on size of T. */
   1.439 +    JS_ASSERT(thingSize % CellSize == 0);
   1.440 +    JS_ASSERT(thingSize <= 255);
   1.441 +
   1.442 +    JS_ASSERT(aheader.allocated());
   1.443 +    JS_ASSERT(thingKind == aheader.getAllocKind());
   1.444 +    JS_ASSERT(thingSize == aheader.getThingSize());
   1.445 +    JS_ASSERT(!aheader.hasDelayedMarking);
   1.446 +    JS_ASSERT(!aheader.markOverflow);
   1.447 +    JS_ASSERT(!aheader.allocatedDuringIncremental);
   1.448 +
   1.449 +    uintptr_t thing = thingsStart(thingKind);
   1.450 +    uintptr_t lastByte = thingsEnd() - 1;
   1.451 +
   1.452 +    FreeSpan nextFree(aheader.getFirstFreeSpan());
   1.453 +    nextFree.checkSpan();
   1.454 +
   1.455 +    FreeSpan newListHead;
   1.456 +    FreeSpan *newListTail = &newListHead;
   1.457 +    uintptr_t newFreeSpanStart = 0;
   1.458 +    bool allClear = true;
   1.459 +    DebugOnly<size_t> nmarked = 0;
   1.460 +    for (;; thing += thingSize) {
   1.461 +        JS_ASSERT(thing <= lastByte + 1);
   1.462 +        if (thing == nextFree.first) {
   1.463 +            JS_ASSERT(nextFree.last <= lastByte);
   1.464 +            if (nextFree.last == lastByte)
   1.465 +                break;
   1.466 +            JS_ASSERT(Arena::isAligned(nextFree.last, thingSize));
   1.467 +            if (!newFreeSpanStart)
   1.468 +                newFreeSpanStart = thing;
   1.469 +            thing = nextFree.last;
   1.470 +            nextFree = *nextFree.nextSpan();
   1.471 +            nextFree.checkSpan();
   1.472 +        } else {
   1.473 +            T *t = reinterpret_cast<T *>(thing);
   1.474 +            if (t->isMarked()) {
   1.475 +                allClear = false;
   1.476 +                nmarked++;
   1.477 +                if (newFreeSpanStart) {
   1.478 +                    JS_ASSERT(thing >= thingsStart(thingKind) + thingSize);
   1.479 +                    newListTail->first = newFreeSpanStart;
   1.480 +                    newListTail->last = thing - thingSize;
   1.481 +                    newListTail = newListTail->nextSpanUnchecked(thingSize);
   1.482 +                    newFreeSpanStart = 0;
   1.483 +                }
   1.484 +            } else {
   1.485 +                if (!newFreeSpanStart)
   1.486 +                    newFreeSpanStart = thing;
   1.487 +                t->finalize(fop);
   1.488 +                JS_POISON(t, JS_SWEPT_TENURED_PATTERN, thingSize);
   1.489 +            }
   1.490 +        }
   1.491 +    }
   1.492 +
   1.493 +    if (allClear) {
   1.494 +        JS_ASSERT(newListTail == &newListHead);
   1.495 +        JS_ASSERT(!newFreeSpanStart ||
   1.496 +                  newFreeSpanStart == thingsStart(thingKind));
   1.497 +        JS_EXTRA_POISON(data, JS_SWEPT_TENURED_PATTERN, sizeof(data));
   1.498 +        return true;
   1.499 +    }
   1.500 +
   1.501 +    newListTail->first = newFreeSpanStart ? newFreeSpanStart : nextFree.first;
   1.502 +    JS_ASSERT(Arena::isAligned(newListTail->first, thingSize));
   1.503 +    newListTail->last = lastByte;
   1.504 +
   1.505 +#ifdef DEBUG
   1.506 +    size_t nfree = 0;
   1.507 +    for (const FreeSpan *span = &newListHead; span != newListTail; span = span->nextSpan()) {
   1.508 +        span->checkSpan();
   1.509 +        JS_ASSERT(Arena::isAligned(span->first, thingSize));
   1.510 +        JS_ASSERT(Arena::isAligned(span->last, thingSize));
   1.511 +        nfree += (span->last - span->first) / thingSize + 1;
   1.512 +        JS_ASSERT(nfree + nmarked <= thingsPerArena(thingSize));
   1.513 +    }
   1.514 +    nfree += (newListTail->last + 1 - newListTail->first) / thingSize;
   1.515 +    JS_ASSERT(nfree + nmarked == thingsPerArena(thingSize));
   1.516 +#endif
   1.517 +    aheader.setFirstFreeSpan(&newListHead);
   1.518 +
   1.519 +    return false;
   1.520 +}
   1.521 +
   1.522 +/*
   1.523 + * Insert an arena into the list in appropriate position and update the cursor
   1.524 + * to ensure that any arena before the cursor is full.
   1.525 + */
   1.526 +void ArenaList::insert(ArenaHeader *a)
   1.527 +{
   1.528 +    JS_ASSERT(a);
   1.529 +    JS_ASSERT_IF(!head, cursor == &head);
   1.530 +    a->next = *cursor;
   1.531 +    *cursor = a;
   1.532 +    if (!a->hasFreeThings())
   1.533 +        cursor = &a->next;
   1.534 +}
   1.535 +
   1.536 +template<typename T>
   1.537 +static inline bool
   1.538 +FinalizeTypedArenas(FreeOp *fop,
   1.539 +                    ArenaHeader **src,
   1.540 +                    ArenaList &dest,
   1.541 +                    AllocKind thingKind,
   1.542 +                    SliceBudget &budget)
   1.543 +{
   1.544 +    /*
   1.545 +     * Finalize arenas from src list, releasing empty arenas and inserting the
   1.546 +     * others into dest in an appropriate position.
   1.547 +     */
   1.548 +
   1.549 +    /*
   1.550 +     * During parallel sections, we sometimes finalize the parallel arenas,
   1.551 +     * but in that case, we want to hold on to the memory in our arena
   1.552 +     * lists, not offer it up for reuse.
   1.553 +     */
   1.554 +    bool releaseArenas = !InParallelSection();
   1.555 +
   1.556 +    size_t thingSize = Arena::thingSize(thingKind);
   1.557 +
   1.558 +    while (ArenaHeader *aheader = *src) {
   1.559 +        *src = aheader->next;
   1.560 +        bool allClear = aheader->getArena()->finalize<T>(fop, thingKind, thingSize);
   1.561 +        if (!allClear)
   1.562 +            dest.insert(aheader);
   1.563 +        else if (releaseArenas)
   1.564 +            aheader->chunk()->releaseArena(aheader);
   1.565 +        else
   1.566 +            aheader->chunk()->recycleArena(aheader, dest, thingKind);
   1.567 +
   1.568 +        budget.step(Arena::thingsPerArena(thingSize));
   1.569 +        if (budget.isOverBudget())
   1.570 +            return false;
   1.571 +    }
   1.572 +
   1.573 +    return true;
   1.574 +}
   1.575 +
   1.576 +/*
   1.577 + * Finalize the list. On return al->cursor points to the first non-empty arena
   1.578 + * after the al->head.
   1.579 + */
   1.580 +static bool
   1.581 +FinalizeArenas(FreeOp *fop,
   1.582 +               ArenaHeader **src,
   1.583 +               ArenaList &dest,
   1.584 +               AllocKind thingKind,
   1.585 +               SliceBudget &budget)
   1.586 +{
   1.587 +    switch(thingKind) {
   1.588 +      case FINALIZE_OBJECT0:
   1.589 +      case FINALIZE_OBJECT0_BACKGROUND:
   1.590 +      case FINALIZE_OBJECT2:
   1.591 +      case FINALIZE_OBJECT2_BACKGROUND:
   1.592 +      case FINALIZE_OBJECT4:
   1.593 +      case FINALIZE_OBJECT4_BACKGROUND:
   1.594 +      case FINALIZE_OBJECT8:
   1.595 +      case FINALIZE_OBJECT8_BACKGROUND:
   1.596 +      case FINALIZE_OBJECT12:
   1.597 +      case FINALIZE_OBJECT12_BACKGROUND:
   1.598 +      case FINALIZE_OBJECT16:
   1.599 +      case FINALIZE_OBJECT16_BACKGROUND:
   1.600 +        return FinalizeTypedArenas<JSObject>(fop, src, dest, thingKind, budget);
   1.601 +      case FINALIZE_SCRIPT:
   1.602 +        return FinalizeTypedArenas<JSScript>(fop, src, dest, thingKind, budget);
   1.603 +      case FINALIZE_LAZY_SCRIPT:
   1.604 +        return FinalizeTypedArenas<LazyScript>(fop, src, dest, thingKind, budget);
   1.605 +      case FINALIZE_SHAPE:
   1.606 +        return FinalizeTypedArenas<Shape>(fop, src, dest, thingKind, budget);
   1.607 +      case FINALIZE_BASE_SHAPE:
   1.608 +        return FinalizeTypedArenas<BaseShape>(fop, src, dest, thingKind, budget);
   1.609 +      case FINALIZE_TYPE_OBJECT:
   1.610 +        return FinalizeTypedArenas<types::TypeObject>(fop, src, dest, thingKind, budget);
   1.611 +      case FINALIZE_STRING:
   1.612 +        return FinalizeTypedArenas<JSString>(fop, src, dest, thingKind, budget);
   1.613 +      case FINALIZE_FAT_INLINE_STRING:
   1.614 +        return FinalizeTypedArenas<JSFatInlineString>(fop, src, dest, thingKind, budget);
   1.615 +      case FINALIZE_EXTERNAL_STRING:
   1.616 +        return FinalizeTypedArenas<JSExternalString>(fop, src, dest, thingKind, budget);
   1.617 +      case FINALIZE_JITCODE:
   1.618 +#ifdef JS_ION
   1.619 +      {
   1.620 +        // JitCode finalization may release references on an executable
   1.621 +        // allocator that is accessed when requesting interrupts.
   1.622 +        JSRuntime::AutoLockForInterrupt lock(fop->runtime());
   1.623 +        return FinalizeTypedArenas<jit::JitCode>(fop, src, dest, thingKind, budget);
   1.624 +      }
   1.625 +#endif
   1.626 +      default:
   1.627 +        MOZ_ASSUME_UNREACHABLE("Invalid alloc kind");
   1.628 +    }
   1.629 +}
   1.630 +
   1.631 +static inline Chunk *
   1.632 +AllocChunk(JSRuntime *rt)
   1.633 +{
   1.634 +    return static_cast<Chunk *>(MapAlignedPages(rt, ChunkSize, ChunkSize));
   1.635 +}
   1.636 +
   1.637 +static inline void
   1.638 +FreeChunk(JSRuntime *rt, Chunk *p)
   1.639 +{
   1.640 +    UnmapPages(rt, static_cast<void *>(p), ChunkSize);
   1.641 +}
   1.642 +
   1.643 +inline bool
   1.644 +ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const
   1.645 +{
   1.646 +    /*
   1.647 +     * To minimize memory waste we do not want to run the background chunk
   1.648 +     * allocation if we have empty chunks or when the runtime needs just few
   1.649 +     * of them.
   1.650 +     */
   1.651 +    return rt->gcHelperThread.canBackgroundAllocate() &&
   1.652 +           emptyCount == 0 &&
   1.653 +           rt->gcChunkSet.count() >= 4;
   1.654 +}
   1.655 +
   1.656 +/* Must be called with the GC lock taken. */
   1.657 +inline Chunk *
   1.658 +ChunkPool::get(JSRuntime *rt)
   1.659 +{
   1.660 +    JS_ASSERT(this == &rt->gcChunkPool);
   1.661 +
   1.662 +    Chunk *chunk = emptyChunkListHead;
   1.663 +    if (chunk) {
   1.664 +        JS_ASSERT(emptyCount);
   1.665 +        emptyChunkListHead = chunk->info.next;
   1.666 +        --emptyCount;
   1.667 +    } else {
   1.668 +        JS_ASSERT(!emptyCount);
   1.669 +        chunk = Chunk::allocate(rt);
   1.670 +        if (!chunk)
   1.671 +            return nullptr;
   1.672 +        JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
   1.673 +    }
   1.674 +    JS_ASSERT(chunk->unused());
   1.675 +    JS_ASSERT(!rt->gcChunkSet.has(chunk));
   1.676 +
   1.677 +    if (wantBackgroundAllocation(rt))
   1.678 +        rt->gcHelperThread.startBackgroundAllocationIfIdle();
   1.679 +
   1.680 +    return chunk;
   1.681 +}
   1.682 +
   1.683 +/* Must be called either during the GC or with the GC lock taken. */
   1.684 +inline void
   1.685 +ChunkPool::put(Chunk *chunk)
   1.686 +{
   1.687 +    chunk->info.age = 0;
   1.688 +    chunk->info.next = emptyChunkListHead;
   1.689 +    emptyChunkListHead = chunk;
   1.690 +    emptyCount++;
   1.691 +}
   1.692 +
   1.693 +/* Must be called either during the GC or with the GC lock taken. */
   1.694 +Chunk *
   1.695 +ChunkPool::expire(JSRuntime *rt, bool releaseAll)
   1.696 +{
   1.697 +    JS_ASSERT(this == &rt->gcChunkPool);
   1.698 +
   1.699 +    /*
   1.700 +     * Return old empty chunks to the system while preserving the order of
   1.701 +     * other chunks in the list. This way, if the GC runs several times
   1.702 +     * without emptying the list, the older chunks will stay at the tail
   1.703 +     * and are more likely to reach the max age.
   1.704 +     */
   1.705 +    Chunk *freeList = nullptr;
   1.706 +    int freeChunkCount = 0;
   1.707 +    for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) {
   1.708 +        JS_ASSERT(emptyCount);
   1.709 +        Chunk *chunk = *chunkp;
   1.710 +        JS_ASSERT(chunk->unused());
   1.711 +        JS_ASSERT(!rt->gcChunkSet.has(chunk));
   1.712 +        JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE);
   1.713 +        if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE ||
   1.714 +            freeChunkCount++ > MAX_EMPTY_CHUNK_COUNT)
   1.715 +        {
   1.716 +            *chunkp = chunk->info.next;
   1.717 +            --emptyCount;
   1.718 +            chunk->prepareToBeFreed(rt);
   1.719 +            chunk->info.next = freeList;
   1.720 +            freeList = chunk;
   1.721 +        } else {
   1.722 +            /* Keep the chunk but increase its age. */
   1.723 +            ++chunk->info.age;
   1.724 +            chunkp = &chunk->info.next;
   1.725 +        }
   1.726 +    }
   1.727 +    JS_ASSERT_IF(releaseAll, !emptyCount);
   1.728 +    return freeList;
   1.729 +}
   1.730 +
   1.731 +static void
   1.732 +FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
   1.733 +{
   1.734 +    while (Chunk *chunk = chunkListHead) {
   1.735 +        JS_ASSERT(!chunk->info.numArenasFreeCommitted);
   1.736 +        chunkListHead = chunk->info.next;
   1.737 +        FreeChunk(rt, chunk);
   1.738 +    }
   1.739 +}
   1.740 +
   1.741 +void
   1.742 +ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
   1.743 +{
   1.744 +    FreeChunkList(rt, expire(rt, releaseAll));
   1.745 +}
   1.746 +
   1.747 +/* static */ Chunk *
   1.748 +Chunk::allocate(JSRuntime *rt)
   1.749 +{
   1.750 +    Chunk *chunk = AllocChunk(rt);
   1.751 +    if (!chunk)
   1.752 +        return nullptr;
   1.753 +    chunk->init(rt);
   1.754 +    rt->gcStats.count(gcstats::STAT_NEW_CHUNK);
   1.755 +    return chunk;
   1.756 +}
   1.757 +
   1.758 +/* Must be called with the GC lock taken. */
   1.759 +/* static */ inline void
   1.760 +Chunk::release(JSRuntime *rt, Chunk *chunk)
   1.761 +{
   1.762 +    JS_ASSERT(chunk);
   1.763 +    chunk->prepareToBeFreed(rt);
   1.764 +    FreeChunk(rt, chunk);
   1.765 +}
   1.766 +
   1.767 +inline void
   1.768 +Chunk::prepareToBeFreed(JSRuntime *rt)
   1.769 +{
   1.770 +    JS_ASSERT(rt->gcNumArenasFreeCommitted >= info.numArenasFreeCommitted);
   1.771 +    rt->gcNumArenasFreeCommitted -= info.numArenasFreeCommitted;
   1.772 +    rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK);
   1.773 +
   1.774 +#ifdef DEBUG
   1.775 +    /*
   1.776 +     * Let FreeChunkList detect a missing prepareToBeFreed call before it
   1.777 +     * frees chunk.
   1.778 +     */
   1.779 +    info.numArenasFreeCommitted = 0;
   1.780 +#endif
   1.781 +}
   1.782 +
   1.783 +void
   1.784 +Chunk::init(JSRuntime *rt)
   1.785 +{
   1.786 +    JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
   1.787 +
   1.788 +    /*
   1.789 +     * We clear the bitmap to guard against xpc_IsGrayGCThing being called on
   1.790 +     * uninitialized data, which would happen before the first GC cycle.
   1.791 +     */
   1.792 +    bitmap.clear();
   1.793 +
   1.794 +    /*
   1.795 +     * Decommit the arenas. We do this after poisoning so that if the OS does
   1.796 +     * not have to recycle the pages, we still get the benefit of poisoning.
   1.797 +     */
   1.798 +    decommitAllArenas(rt);
   1.799 +
   1.800 +    /* Initialize the chunk info. */
   1.801 +    info.age = 0;
   1.802 +    info.trailer.location = ChunkLocationTenuredHeap;
   1.803 +    info.trailer.runtime = rt;
   1.804 +
   1.805 +    /* The rest of info fields are initialized in PickChunk. */
   1.806 +}
   1.807 +
   1.808 +static inline Chunk **
   1.809 +GetAvailableChunkList(Zone *zone)
   1.810 +{
   1.811 +    JSRuntime *rt = zone->runtimeFromAnyThread();
   1.812 +    return zone->isSystem
   1.813 +           ? &rt->gcSystemAvailableChunkListHead
   1.814 +           : &rt->gcUserAvailableChunkListHead;
   1.815 +}
   1.816 +
   1.817 +inline void
   1.818 +Chunk::addToAvailableList(Zone *zone)
   1.819 +{
   1.820 +    insertToAvailableList(GetAvailableChunkList(zone));
   1.821 +}
   1.822 +
   1.823 +inline void
   1.824 +Chunk::insertToAvailableList(Chunk **insertPoint)
   1.825 +{
   1.826 +    JS_ASSERT(hasAvailableArenas());
   1.827 +    JS_ASSERT(!info.prevp);
   1.828 +    JS_ASSERT(!info.next);
   1.829 +    info.prevp = insertPoint;
   1.830 +    Chunk *insertBefore = *insertPoint;
   1.831 +    if (insertBefore) {
   1.832 +        JS_ASSERT(insertBefore->info.prevp == insertPoint);
   1.833 +        insertBefore->info.prevp = &info.next;
   1.834 +    }
   1.835 +    info.next = insertBefore;
   1.836 +    *insertPoint = this;
   1.837 +}
   1.838 +
   1.839 +inline void
   1.840 +Chunk::removeFromAvailableList()
   1.841 +{
   1.842 +    JS_ASSERT(info.prevp);
   1.843 +    *info.prevp = info.next;
   1.844 +    if (info.next) {
   1.845 +        JS_ASSERT(info.next->info.prevp == &info.next);
   1.846 +        info.next->info.prevp = info.prevp;
   1.847 +    }
   1.848 +    info.prevp = nullptr;
   1.849 +    info.next = nullptr;
   1.850 +}
   1.851 +
   1.852 +/*
   1.853 + * Search for and return the next decommitted Arena. Our goal is to keep
   1.854 + * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
   1.855 + * it to the most recently freed arena when we free, and forcing it to
   1.856 + * the last alloc + 1 when we allocate.
   1.857 + */
   1.858 +uint32_t
   1.859 +Chunk::findDecommittedArenaOffset()
   1.860 +{
   1.861 +    /* Note: lastFreeArenaOffset can be past the end of the list. */
   1.862 +    for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++)
   1.863 +        if (decommittedArenas.get(i))
   1.864 +            return i;
   1.865 +    for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++)
   1.866 +        if (decommittedArenas.get(i))
   1.867 +            return i;
   1.868 +    MOZ_ASSUME_UNREACHABLE("No decommitted arenas found.");
   1.869 +}
   1.870 +
   1.871 +ArenaHeader *
   1.872 +Chunk::fetchNextDecommittedArena()
   1.873 +{
   1.874 +    JS_ASSERT(info.numArenasFreeCommitted == 0);
   1.875 +    JS_ASSERT(info.numArenasFree > 0);
   1.876 +
   1.877 +    unsigned offset = findDecommittedArenaOffset();
   1.878 +    info.lastDecommittedArenaOffset = offset + 1;
   1.879 +    --info.numArenasFree;
   1.880 +    decommittedArenas.unset(offset);
   1.881 +
   1.882 +    Arena *arena = &arenas[offset];
   1.883 +    MarkPagesInUse(info.trailer.runtime, arena, ArenaSize);
   1.884 +    arena->aheader.setAsNotAllocated();
   1.885 +
   1.886 +    return &arena->aheader;
   1.887 +}
   1.888 +
   1.889 +inline ArenaHeader *
   1.890 +Chunk::fetchNextFreeArena(JSRuntime *rt)
   1.891 +{
   1.892 +    JS_ASSERT(info.numArenasFreeCommitted > 0);
   1.893 +    JS_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
   1.894 +    JS_ASSERT(info.numArenasFreeCommitted <= rt->gcNumArenasFreeCommitted);
   1.895 +
   1.896 +    ArenaHeader *aheader = info.freeArenasHead;
   1.897 +    info.freeArenasHead = aheader->next;
   1.898 +    --info.numArenasFreeCommitted;
   1.899 +    --info.numArenasFree;
   1.900 +    --rt->gcNumArenasFreeCommitted;
   1.901 +
   1.902 +    return aheader;
   1.903 +}
   1.904 +
   1.905 +ArenaHeader *
   1.906 +Chunk::allocateArena(Zone *zone, AllocKind thingKind)
   1.907 +{
   1.908 +    JS_ASSERT(hasAvailableArenas());
   1.909 +
   1.910 +    JSRuntime *rt = zone->runtimeFromAnyThread();
   1.911 +    if (!rt->isHeapMinorCollecting() && rt->gcBytes >= rt->gcMaxBytes)
   1.912 +        return nullptr;
   1.913 +
   1.914 +    ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0)
   1.915 +                           ? fetchNextFreeArena(rt)
   1.916 +                           : fetchNextDecommittedArena();
   1.917 +    aheader->init(zone, thingKind);
   1.918 +    if (MOZ_UNLIKELY(!hasAvailableArenas()))
   1.919 +        removeFromAvailableList();
   1.920 +
   1.921 +    rt->gcBytes += ArenaSize;
   1.922 +    zone->gcBytes += ArenaSize;
   1.923 +
   1.924 +    if (zone->gcBytes >= zone->gcTriggerBytes) {
   1.925 +        AutoUnlockGC unlock(rt);
   1.926 +        TriggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
   1.927 +    }
   1.928 +
   1.929 +    return aheader;
   1.930 +}
   1.931 +
   1.932 +inline void
   1.933 +Chunk::addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader)
   1.934 +{
   1.935 +    JS_ASSERT(!aheader->allocated());
   1.936 +    aheader->next = info.freeArenasHead;
   1.937 +    info.freeArenasHead = aheader;
   1.938 +    ++info.numArenasFreeCommitted;
   1.939 +    ++info.numArenasFree;
   1.940 +    ++rt->gcNumArenasFreeCommitted;
   1.941 +}
   1.942 +
   1.943 +void
   1.944 +Chunk::recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind)
   1.945 +{
   1.946 +    aheader->getArena()->setAsFullyUnused(thingKind);
   1.947 +    dest.insert(aheader);
   1.948 +}
   1.949 +
   1.950 +void
   1.951 +Chunk::releaseArena(ArenaHeader *aheader)
   1.952 +{
   1.953 +    JS_ASSERT(aheader->allocated());
   1.954 +    JS_ASSERT(!aheader->hasDelayedMarking);
   1.955 +    Zone *zone = aheader->zone;
   1.956 +    JSRuntime *rt = zone->runtimeFromAnyThread();
   1.957 +    AutoLockGC maybeLock;
   1.958 +    if (rt->gcHelperThread.sweeping())
   1.959 +        maybeLock.lock(rt);
   1.960 +
   1.961 +    JS_ASSERT(rt->gcBytes >= ArenaSize);
   1.962 +    JS_ASSERT(zone->gcBytes >= ArenaSize);
   1.963 +    if (rt->gcHelperThread.sweeping())
   1.964 +        zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize);
   1.965 +    rt->gcBytes -= ArenaSize;
   1.966 +    zone->gcBytes -= ArenaSize;
   1.967 +
   1.968 +    aheader->setAsNotAllocated();
   1.969 +    addArenaToFreeList(rt, aheader);
   1.970 +
   1.971 +    if (info.numArenasFree == 1) {
   1.972 +        JS_ASSERT(!info.prevp);
   1.973 +        JS_ASSERT(!info.next);
   1.974 +        addToAvailableList(zone);
   1.975 +    } else if (!unused()) {
   1.976 +        JS_ASSERT(info.prevp);
   1.977 +    } else {
   1.978 +        rt->gcChunkSet.remove(this);
   1.979 +        removeFromAvailableList();
   1.980 +        JS_ASSERT(info.numArenasFree == ArenasPerChunk);
   1.981 +        decommitAllArenas(rt);
   1.982 +        rt->gcChunkPool.put(this);
   1.983 +    }
   1.984 +}
   1.985 +
   1.986 +/* The caller must hold the GC lock. */
   1.987 +static Chunk *
   1.988 +PickChunk(Zone *zone)
   1.989 +{
   1.990 +    JSRuntime *rt = zone->runtimeFromAnyThread();
   1.991 +    Chunk **listHeadp = GetAvailableChunkList(zone);
   1.992 +    Chunk *chunk = *listHeadp;
   1.993 +    if (chunk)
   1.994 +        return chunk;
   1.995 +
   1.996 +    chunk = rt->gcChunkPool.get(rt);
   1.997 +    if (!chunk)
   1.998 +        return nullptr;
   1.999 +
  1.1000 +    rt->gcChunkAllocationSinceLastGC = true;
  1.1001 +
  1.1002 +    /*
  1.1003 +     * FIXME bug 583732 - chunk is newly allocated and cannot be present in
  1.1004 +     * the table so using ordinary lookupForAdd is suboptimal here.
  1.1005 +     */
  1.1006 +    GCChunkSet::AddPtr p = rt->gcChunkSet.lookupForAdd(chunk);
  1.1007 +    JS_ASSERT(!p);
  1.1008 +    if (!rt->gcChunkSet.add(p, chunk)) {
  1.1009 +        Chunk::release(rt, chunk);
  1.1010 +        return nullptr;
  1.1011 +    }
  1.1012 +
  1.1013 +    chunk->info.prevp = nullptr;
  1.1014 +    chunk->info.next = nullptr;
  1.1015 +    chunk->addToAvailableList(zone);
  1.1016 +
  1.1017 +    return chunk;
  1.1018 +}
  1.1019 +
  1.1020 +#ifdef JS_GC_ZEAL
  1.1021 +
  1.1022 +extern void
  1.1023 +js::SetGCZeal(JSRuntime *rt, uint8_t zeal, uint32_t frequency)
  1.1024 +{
  1.1025 +    if (rt->gcVerifyPreData)
  1.1026 +        VerifyBarriers(rt, PreBarrierVerifier);
  1.1027 +    if (rt->gcVerifyPostData)
  1.1028 +        VerifyBarriers(rt, PostBarrierVerifier);
  1.1029 +
  1.1030 +#ifdef JSGC_GENERATIONAL
  1.1031 +    if (rt->gcZeal_ == ZealGenerationalGCValue) {
  1.1032 +        MinorGC(rt, JS::gcreason::DEBUG_GC);
  1.1033 +        rt->gcNursery.leaveZealMode();
  1.1034 +    }
  1.1035 +
  1.1036 +    if (zeal == ZealGenerationalGCValue)
  1.1037 +        rt->gcNursery.enterZealMode();
  1.1038 +#endif
  1.1039 +
  1.1040 +    bool schedule = zeal >= js::gc::ZealAllocValue;
  1.1041 +    rt->gcZeal_ = zeal;
  1.1042 +    rt->gcZealFrequency = frequency;
  1.1043 +    rt->gcNextScheduled = schedule ? frequency : 0;
  1.1044 +}
  1.1045 +
  1.1046 +static bool
  1.1047 +InitGCZeal(JSRuntime *rt)
  1.1048 +{
  1.1049 +    const char *env = getenv("JS_GC_ZEAL");
  1.1050 +    if (!env)
  1.1051 +        return true;
  1.1052 +
  1.1053 +    int zeal = -1;
  1.1054 +    int frequency = JS_DEFAULT_ZEAL_FREQ;
  1.1055 +    if (strcmp(env, "help") != 0) {
  1.1056 +        zeal = atoi(env);
  1.1057 +        const char *p = strchr(env, ',');
  1.1058 +        if (p)
  1.1059 +            frequency = atoi(p + 1);
  1.1060 +    }
  1.1061 +
  1.1062 +    if (zeal < 0 || zeal > ZealLimit || frequency < 0) {
  1.1063 +        fprintf(stderr,
  1.1064 +                "Format: JS_GC_ZEAL=N[,F]\n"
  1.1065 +                "N indicates \"zealousness\":\n"
  1.1066 +                "  0: no additional GCs\n"
  1.1067 +                "  1: additional GCs at common danger points\n"
  1.1068 +                "  2: GC every F allocations (default: 100)\n"
  1.1069 +                "  3: GC when the window paints (browser only)\n"
  1.1070 +                "  4: Verify pre write barriers between instructions\n"
  1.1071 +                "  5: Verify pre write barriers between paints\n"
  1.1072 +                "  6: Verify stack rooting\n"
  1.1073 +                "  7: Collect the nursery every N nursery allocations\n"
  1.1074 +                "  8: Incremental GC in two slices: 1) mark roots 2) finish collection\n"
  1.1075 +                "  9: Incremental GC in two slices: 1) mark all 2) new marking and finish\n"
  1.1076 +                " 10: Incremental GC in multiple slices\n"
  1.1077 +                " 11: Verify post write barriers between instructions\n"
  1.1078 +                " 12: Verify post write barriers between paints\n"
  1.1079 +                " 13: Purge analysis state every F allocations (default: 100)\n");
  1.1080 +        return false;
  1.1081 +    }
  1.1082 +
  1.1083 +    SetGCZeal(rt, zeal, frequency);
  1.1084 +    return true;
  1.1085 +}
  1.1086 +
  1.1087 +#endif
  1.1088 +
  1.1089 +/* Lifetime for type sets attached to scripts containing observed types. */
  1.1090 +static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
  1.1091 +
  1.1092 +bool
  1.1093 +js_InitGC(JSRuntime *rt, uint32_t maxbytes)
  1.1094 +{
  1.1095 +    InitMemorySubsystem(rt);
  1.1096 +
  1.1097 +    if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
  1.1098 +        return false;
  1.1099 +
  1.1100 +    if (!rt->gcRootsHash.init(256))
  1.1101 +        return false;
  1.1102 +
  1.1103 +    if (!rt->gcHelperThread.init())
  1.1104 +        return false;
  1.1105 +
  1.1106 +    /*
  1.1107 +     * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
  1.1108 +     * for default backward API compatibility.
  1.1109 +     */
  1.1110 +    rt->gcMaxBytes = maxbytes;
  1.1111 +    rt->setGCMaxMallocBytes(maxbytes);
  1.1112 +
  1.1113 +#ifndef JS_MORE_DETERMINISTIC
  1.1114 +    rt->gcJitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
  1.1115 +#endif
  1.1116 +
  1.1117 +#ifdef JSGC_GENERATIONAL
  1.1118 +    if (!rt->gcNursery.init())
  1.1119 +        return false;
  1.1120 +
  1.1121 +    if (!rt->gcStoreBuffer.enable())
  1.1122 +        return false;
  1.1123 +#endif
  1.1124 +
  1.1125 +#ifdef JS_GC_ZEAL
  1.1126 +    if (!InitGCZeal(rt))
  1.1127 +        return false;
  1.1128 +#endif
  1.1129 +
  1.1130 +    return true;
  1.1131 +}
  1.1132 +
  1.1133 +static void
  1.1134 +RecordNativeStackTopForGC(JSRuntime *rt)
  1.1135 +{
  1.1136 +    ConservativeGCData *cgcd = &rt->conservativeGC;
  1.1137 +
  1.1138 +#ifdef JS_THREADSAFE
  1.1139 +    /* Record the stack top here only if we are called from a request. */
  1.1140 +    if (!rt->requestDepth)
  1.1141 +        return;
  1.1142 +#endif
  1.1143 +    cgcd->recordStackTop();
  1.1144 +}
  1.1145 +
  1.1146 +void
  1.1147 +js_FinishGC(JSRuntime *rt)
  1.1148 +{
  1.1149 +    /*
  1.1150 +     * Wait until the background finalization stops and the helper thread
  1.1151 +     * shuts down before we forcefully release any remaining GC memory.
  1.1152 +     */
  1.1153 +    rt->gcHelperThread.finish();
  1.1154 +
  1.1155 +#ifdef JS_GC_ZEAL
  1.1156 +    /* Free memory associated with GC verification. */
  1.1157 +    FinishVerifier(rt);
  1.1158 +#endif
  1.1159 +
  1.1160 +    /* Delete all remaining zones. */
  1.1161 +    if (rt->gcInitialized) {
  1.1162 +        for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.1163 +            for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
  1.1164 +                js_delete(comp.get());
  1.1165 +            js_delete(zone.get());
  1.1166 +        }
  1.1167 +    }
  1.1168 +
  1.1169 +    rt->zones.clear();
  1.1170 +
  1.1171 +    rt->gcSystemAvailableChunkListHead = nullptr;
  1.1172 +    rt->gcUserAvailableChunkListHead = nullptr;
  1.1173 +    if (rt->gcChunkSet.initialized()) {
  1.1174 +        for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
  1.1175 +            Chunk::release(rt, r.front());
  1.1176 +        rt->gcChunkSet.clear();
  1.1177 +    }
  1.1178 +
  1.1179 +    rt->gcChunkPool.expireAndFree(rt, true);
  1.1180 +
  1.1181 +    if (rt->gcRootsHash.initialized())
  1.1182 +        rt->gcRootsHash.clear();
  1.1183 +
  1.1184 +    rt->functionPersistentRooteds.clear();
  1.1185 +    rt->idPersistentRooteds.clear();
  1.1186 +    rt->objectPersistentRooteds.clear();
  1.1187 +    rt->scriptPersistentRooteds.clear();
  1.1188 +    rt->stringPersistentRooteds.clear();
  1.1189 +    rt->valuePersistentRooteds.clear();
  1.1190 +}
  1.1191 +
  1.1192 +template <typename T> struct BarrierOwner {};
  1.1193 +template <typename T> struct BarrierOwner<T *> { typedef T result; };
  1.1194 +template <> struct BarrierOwner<Value> { typedef HeapValue result; };
  1.1195 +
  1.1196 +template <typename T>
  1.1197 +static bool
  1.1198 +AddRoot(JSRuntime *rt, T *rp, const char *name, JSGCRootType rootType)
  1.1199 +{
  1.1200 +    /*
  1.1201 +     * Sometimes Firefox will hold weak references to objects and then convert
  1.1202 +     * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
  1.1203 +     * or ModifyBusyCount in workers). We need a read barrier to cover these
  1.1204 +     * cases.
  1.1205 +     */
  1.1206 +    if (rt->gcIncrementalState != NO_INCREMENTAL)
  1.1207 +        BarrierOwner<T>::result::writeBarrierPre(*rp);
  1.1208 +
  1.1209 +    return rt->gcRootsHash.put((void *)rp, RootInfo(name, rootType));
  1.1210 +}
  1.1211 +
  1.1212 +template <typename T>
  1.1213 +static bool
  1.1214 +AddRoot(JSContext *cx, T *rp, const char *name, JSGCRootType rootType)
  1.1215 +{
  1.1216 +    bool ok = AddRoot(cx->runtime(), rp, name, rootType);
  1.1217 +    if (!ok)
  1.1218 +        JS_ReportOutOfMemory(cx);
  1.1219 +    return ok;
  1.1220 +}
  1.1221 +
  1.1222 +bool
  1.1223 +js::AddValueRoot(JSContext *cx, Value *vp, const char *name)
  1.1224 +{
  1.1225 +    return AddRoot(cx, vp, name, JS_GC_ROOT_VALUE_PTR);
  1.1226 +}
  1.1227 +
  1.1228 +extern bool
  1.1229 +js::AddValueRootRT(JSRuntime *rt, js::Value *vp, const char *name)
  1.1230 +{
  1.1231 +    return AddRoot(rt, vp, name, JS_GC_ROOT_VALUE_PTR);
  1.1232 +}
  1.1233 +
  1.1234 +extern bool
  1.1235 +js::AddStringRoot(JSContext *cx, JSString **rp, const char *name)
  1.1236 +{
  1.1237 +    return AddRoot(cx, rp, name, JS_GC_ROOT_STRING_PTR);
  1.1238 +}
  1.1239 +
  1.1240 +extern bool
  1.1241 +js::AddObjectRoot(JSContext *cx, JSObject **rp, const char *name)
  1.1242 +{
  1.1243 +    return AddRoot(cx, rp, name, JS_GC_ROOT_OBJECT_PTR);
  1.1244 +}
  1.1245 +
  1.1246 +extern bool
  1.1247 +js::AddObjectRoot(JSRuntime *rt, JSObject **rp, const char *name)
  1.1248 +{
  1.1249 +    return AddRoot(rt, rp, name, JS_GC_ROOT_OBJECT_PTR);
  1.1250 +}
  1.1251 +
  1.1252 +extern bool
  1.1253 +js::AddScriptRoot(JSContext *cx, JSScript **rp, const char *name)
  1.1254 +{
  1.1255 +    return AddRoot(cx, rp, name, JS_GC_ROOT_SCRIPT_PTR);
  1.1256 +}
  1.1257 +
  1.1258 +extern JS_FRIEND_API(bool)
  1.1259 +js::AddRawValueRoot(JSContext *cx, Value *vp, const char *name)
  1.1260 +{
  1.1261 +    return AddRoot(cx, vp, name, JS_GC_ROOT_VALUE_PTR);
  1.1262 +}
  1.1263 +
  1.1264 +extern JS_FRIEND_API(void)
  1.1265 +js::RemoveRawValueRoot(JSContext *cx, Value *vp)
  1.1266 +{
  1.1267 +    RemoveRoot(cx->runtime(), vp);
  1.1268 +}
  1.1269 +
  1.1270 +void
  1.1271 +js::RemoveRoot(JSRuntime *rt, void *rp)
  1.1272 +{
  1.1273 +    rt->gcRootsHash.remove(rp);
  1.1274 +    rt->gcPoke = true;
  1.1275 +}
  1.1276 +
  1.1277 +typedef RootedValueMap::Range RootRange;
  1.1278 +typedef RootedValueMap::Entry RootEntry;
  1.1279 +typedef RootedValueMap::Enum RootEnum;
  1.1280 +
  1.1281 +static size_t
  1.1282 +ComputeTriggerBytes(Zone *zone, size_t lastBytes, size_t maxBytes, JSGCInvocationKind gckind)
  1.1283 +{
  1.1284 +    size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, zone->runtimeFromMainThread()->gcAllocationThreshold);
  1.1285 +    double trigger = double(base) * zone->gcHeapGrowthFactor;
  1.1286 +    return size_t(Min(double(maxBytes), trigger));
  1.1287 +}
  1.1288 +
  1.1289 +void
  1.1290 +Zone::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind)
  1.1291 +{
  1.1292 +    /*
  1.1293 +     * The heap growth factor depends on the heap size after a GC and the GC frequency.
  1.1294 +     * For low frequency GCs (more than 1sec between GCs) we let the heap grow to 150%.
  1.1295 +     * For high frequency GCs we let the heap grow depending on the heap size:
  1.1296 +     *   lastBytes < highFrequencyLowLimit: 300%
  1.1297 +     *   lastBytes > highFrequencyHighLimit: 150%
  1.1298 +     *   otherwise: linear interpolation between 150% and 300% based on lastBytes
  1.1299 +     */
  1.1300 +    JSRuntime *rt = runtimeFromMainThread();
  1.1301 +
  1.1302 +    if (!rt->gcDynamicHeapGrowth) {
  1.1303 +        gcHeapGrowthFactor = 3.0;
  1.1304 +    } else if (lastBytes < 1 * 1024 * 1024) {
  1.1305 +        gcHeapGrowthFactor = rt->gcLowFrequencyHeapGrowth;
  1.1306 +    } else {
  1.1307 +        JS_ASSERT(rt->gcHighFrequencyHighLimitBytes > rt->gcHighFrequencyLowLimitBytes);
  1.1308 +        uint64_t now = PRMJ_Now();
  1.1309 +        if (rt->gcLastGCTime && rt->gcLastGCTime + rt->gcHighFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > now) {
  1.1310 +            if (lastBytes <= rt->gcHighFrequencyLowLimitBytes) {
  1.1311 +                gcHeapGrowthFactor = rt->gcHighFrequencyHeapGrowthMax;
  1.1312 +            } else if (lastBytes >= rt->gcHighFrequencyHighLimitBytes) {
  1.1313 +                gcHeapGrowthFactor = rt->gcHighFrequencyHeapGrowthMin;
  1.1314 +            } else {
  1.1315 +                double k = (rt->gcHighFrequencyHeapGrowthMin - rt->gcHighFrequencyHeapGrowthMax)
  1.1316 +                           / (double)(rt->gcHighFrequencyHighLimitBytes - rt->gcHighFrequencyLowLimitBytes);
  1.1317 +                gcHeapGrowthFactor = (k * (lastBytes - rt->gcHighFrequencyLowLimitBytes)
  1.1318 +                                     + rt->gcHighFrequencyHeapGrowthMax);
  1.1319 +                JS_ASSERT(gcHeapGrowthFactor <= rt->gcHighFrequencyHeapGrowthMax
  1.1320 +                          && gcHeapGrowthFactor >= rt->gcHighFrequencyHeapGrowthMin);
  1.1321 +            }
  1.1322 +            rt->gcHighFrequencyGC = true;
  1.1323 +        } else {
  1.1324 +            gcHeapGrowthFactor = rt->gcLowFrequencyHeapGrowth;
  1.1325 +            rt->gcHighFrequencyGC = false;
  1.1326 +        }
  1.1327 +    }
  1.1328 +    gcTriggerBytes = ComputeTriggerBytes(this, lastBytes, rt->gcMaxBytes, gckind);
  1.1329 +}
  1.1330 +
  1.1331 +void
  1.1332 +Zone::reduceGCTriggerBytes(size_t amount)
  1.1333 +{
  1.1334 +    JS_ASSERT(amount > 0);
  1.1335 +    JS_ASSERT(gcTriggerBytes >= amount);
  1.1336 +    if (gcTriggerBytes - amount < runtimeFromAnyThread()->gcAllocationThreshold * gcHeapGrowthFactor)
  1.1337 +        return;
  1.1338 +    gcTriggerBytes -= amount;
  1.1339 +}
  1.1340 +
  1.1341 +Allocator::Allocator(Zone *zone)
  1.1342 +  : zone_(zone)
  1.1343 +{}
  1.1344 +
  1.1345 +inline void
  1.1346 +GCMarker::delayMarkingArena(ArenaHeader *aheader)
  1.1347 +{
  1.1348 +    if (aheader->hasDelayedMarking) {
  1.1349 +        /* Arena already scheduled to be marked later */
  1.1350 +        return;
  1.1351 +    }
  1.1352 +    aheader->setNextDelayedMarking(unmarkedArenaStackTop);
  1.1353 +    unmarkedArenaStackTop = aheader;
  1.1354 +    markLaterArenas++;
  1.1355 +}
  1.1356 +
  1.1357 +void
  1.1358 +GCMarker::delayMarkingChildren(const void *thing)
  1.1359 +{
  1.1360 +    const Cell *cell = reinterpret_cast<const Cell *>(thing);
  1.1361 +    cell->arenaHeader()->markOverflow = 1;
  1.1362 +    delayMarkingArena(cell->arenaHeader());
  1.1363 +}
  1.1364 +
  1.1365 +inline void
  1.1366 +ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
  1.1367 +{
  1.1368 +    for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
  1.1369 +        FreeSpan *headSpan = &freeLists[i];
  1.1370 +        if (!headSpan->isEmpty()) {
  1.1371 +            ArenaHeader *aheader = headSpan->arenaHeader();
  1.1372 +            aheader->allocatedDuringIncremental = true;
  1.1373 +            rt->gcMarker.delayMarkingArena(aheader);
  1.1374 +        }
  1.1375 +    }
  1.1376 +}
  1.1377 +
  1.1378 +static inline void
  1.1379 +PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
  1.1380 +{
  1.1381 +    arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep);
  1.1382 +    runtime->gcArenasAllocatedDuringSweep = arena;
  1.1383 +}
  1.1384 +
  1.1385 +inline void *
  1.1386 +ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind)
  1.1387 +{
  1.1388 +    /*
  1.1389 +     * Parallel JS Note:
  1.1390 +     *
  1.1391 +     * This function can be called from parallel threads all of which
  1.1392 +     * are associated with the same compartment. In that case, each
  1.1393 +     * thread will have a distinct ArenaLists.  Therefore, whenever we
  1.1394 +     * fall through to PickChunk() we must be sure that we are holding
  1.1395 +     * a lock.
  1.1396 +     */
  1.1397 +
  1.1398 +    Chunk *chunk = nullptr;
  1.1399 +
  1.1400 +    ArenaList *al = &arenaLists[thingKind];
  1.1401 +    AutoLockGC maybeLock;
  1.1402 +
  1.1403 +#ifdef JS_THREADSAFE
  1.1404 +    volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind];
  1.1405 +    if (*bfs != BFS_DONE) {
  1.1406 +        /*
  1.1407 +         * We cannot search the arena list for free things while the
  1.1408 +         * background finalization runs and can modify head or cursor at any
  1.1409 +         * moment. So we always allocate a new arena in that case.
  1.1410 +         */
  1.1411 +        maybeLock.lock(zone->runtimeFromAnyThread());
  1.1412 +        if (*bfs == BFS_RUN) {
  1.1413 +            JS_ASSERT(!*al->cursor);
  1.1414 +            chunk = PickChunk(zone);
  1.1415 +            if (!chunk) {
  1.1416 +                /*
  1.1417 +                 * Let the caller to wait for the background allocation to
  1.1418 +                 * finish and restart the allocation attempt.
  1.1419 +                 */
  1.1420 +                return nullptr;
  1.1421 +            }
  1.1422 +        } else if (*bfs == BFS_JUST_FINISHED) {
  1.1423 +            /* See comments before BackgroundFinalizeState definition. */
  1.1424 +            *bfs = BFS_DONE;
  1.1425 +        } else {
  1.1426 +            JS_ASSERT(*bfs == BFS_DONE);
  1.1427 +        }
  1.1428 +    }
  1.1429 +#endif /* JS_THREADSAFE */
  1.1430 +
  1.1431 +    if (!chunk) {
  1.1432 +        if (ArenaHeader *aheader = *al->cursor) {
  1.1433 +            JS_ASSERT(aheader->hasFreeThings());
  1.1434 +
  1.1435 +            /*
  1.1436 +             * Normally, the empty arenas are returned to the chunk
  1.1437 +             * and should not present on the list. In parallel
  1.1438 +             * execution, however, we keep empty arenas in the arena
  1.1439 +             * list to avoid synchronizing on the chunk.
  1.1440 +             */
  1.1441 +            JS_ASSERT(!aheader->isEmpty() || InParallelSection());
  1.1442 +            al->cursor = &aheader->next;
  1.1443 +
  1.1444 +            /*
  1.1445 +             * Move the free span stored in the arena to the free list and
  1.1446 +             * allocate from it.
  1.1447 +             */
  1.1448 +            freeLists[thingKind] = aheader->getFirstFreeSpan();
  1.1449 +            aheader->setAsFullyUsed();
  1.1450 +            if (MOZ_UNLIKELY(zone->wasGCStarted())) {
  1.1451 +                if (zone->needsBarrier()) {
  1.1452 +                    aheader->allocatedDuringIncremental = true;
  1.1453 +                    zone->runtimeFromMainThread()->gcMarker.delayMarkingArena(aheader);
  1.1454 +                } else if (zone->isGCSweeping()) {
  1.1455 +                    PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader);
  1.1456 +                }
  1.1457 +            }
  1.1458 +            return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
  1.1459 +        }
  1.1460 +
  1.1461 +        /* Make sure we hold the GC lock before we call PickChunk. */
  1.1462 +        if (!maybeLock.locked())
  1.1463 +            maybeLock.lock(zone->runtimeFromAnyThread());
  1.1464 +        chunk = PickChunk(zone);
  1.1465 +        if (!chunk)
  1.1466 +            return nullptr;
  1.1467 +    }
  1.1468 +
  1.1469 +    /*
  1.1470 +     * While we still hold the GC lock get an arena from some chunk, mark it
  1.1471 +     * as full as its single free span is moved to the free lits, and insert
  1.1472 +     * it to the list as a fully allocated arena.
  1.1473 +     *
  1.1474 +     * We add the arena before the the head, not after the tail pointed by the
  1.1475 +     * cursor, so after the GC the most recently added arena will be used first
  1.1476 +     * for allocations improving cache locality.
  1.1477 +     */
  1.1478 +    JS_ASSERT(!*al->cursor);
  1.1479 +    ArenaHeader *aheader = chunk->allocateArena(zone, thingKind);
  1.1480 +    if (!aheader)
  1.1481 +        return nullptr;
  1.1482 +
  1.1483 +    if (MOZ_UNLIKELY(zone->wasGCStarted())) {
  1.1484 +        if (zone->needsBarrier()) {
  1.1485 +            aheader->allocatedDuringIncremental = true;
  1.1486 +            zone->runtimeFromMainThread()->gcMarker.delayMarkingArena(aheader);
  1.1487 +        } else if (zone->isGCSweeping()) {
  1.1488 +            PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader);
  1.1489 +        }
  1.1490 +    }
  1.1491 +    aheader->next = al->head;
  1.1492 +    if (!al->head) {
  1.1493 +        JS_ASSERT(al->cursor == &al->head);
  1.1494 +        al->cursor = &aheader->next;
  1.1495 +    }
  1.1496 +    al->head = aheader;
  1.1497 +
  1.1498 +    /* See comments before allocateFromNewArena about this assert. */
  1.1499 +    JS_ASSERT(!aheader->hasFreeThings());
  1.1500 +    uintptr_t arenaAddr = aheader->arenaAddress();
  1.1501 +    return freeLists[thingKind].allocateFromNewArena(arenaAddr,
  1.1502 +                                                     Arena::firstThingOffset(thingKind),
  1.1503 +                                                     Arena::thingSize(thingKind));
  1.1504 +}
  1.1505 +
  1.1506 +void *
  1.1507 +ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind)
  1.1508 +{
  1.1509 +    return allocateFromArenaInline(zone, thingKind);
  1.1510 +}
  1.1511 +
  1.1512 +void
  1.1513 +ArenaLists::wipeDuringParallelExecution(JSRuntime *rt)
  1.1514 +{
  1.1515 +    JS_ASSERT(InParallelSection());
  1.1516 +
  1.1517 +    // First, check that we all objects we have allocated are eligible
  1.1518 +    // for background finalization. The idea is that we will free
  1.1519 +    // (below) ALL background finalizable objects, because we know (by
  1.1520 +    // the rules of parallel execution) they are not reachable except
  1.1521 +    // by other thread-local objects. However, if there were any
  1.1522 +    // object ineligible for background finalization, it might retain
  1.1523 +    // a reference to one of these background finalizable objects, and
  1.1524 +    // that'd be bad.
  1.1525 +    for (unsigned i = 0; i < FINALIZE_LAST; i++) {
  1.1526 +        AllocKind thingKind = AllocKind(i);
  1.1527 +        if (!IsBackgroundFinalized(thingKind) && arenaLists[thingKind].head)
  1.1528 +            return;
  1.1529 +    }
  1.1530 +
  1.1531 +    // Finalize all background finalizable objects immediately and
  1.1532 +    // return the (now empty) arenas back to arena list.
  1.1533 +    FreeOp fop(rt, false);
  1.1534 +    for (unsigned i = 0; i < FINALIZE_OBJECT_LAST; i++) {
  1.1535 +        AllocKind thingKind = AllocKind(i);
  1.1536 +
  1.1537 +        if (!IsBackgroundFinalized(thingKind))
  1.1538 +            continue;
  1.1539 +
  1.1540 +        if (arenaLists[i].head) {
  1.1541 +            purge(thingKind);
  1.1542 +            forceFinalizeNow(&fop, thingKind);
  1.1543 +        }
  1.1544 +    }
  1.1545 +}
  1.1546 +
  1.1547 +void
  1.1548 +ArenaLists::finalizeNow(FreeOp *fop, AllocKind thingKind)
  1.1549 +{
  1.1550 +    JS_ASSERT(!IsBackgroundFinalized(thingKind));
  1.1551 +    forceFinalizeNow(fop, thingKind);
  1.1552 +}
  1.1553 +
  1.1554 +void
  1.1555 +ArenaLists::forceFinalizeNow(FreeOp *fop, AllocKind thingKind)
  1.1556 +{
  1.1557 +    JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
  1.1558 +
  1.1559 +    ArenaHeader *arenas = arenaLists[thingKind].head;
  1.1560 +    arenaLists[thingKind].clear();
  1.1561 +
  1.1562 +    SliceBudget budget;
  1.1563 +    FinalizeArenas(fop, &arenas, arenaLists[thingKind], thingKind, budget);
  1.1564 +    JS_ASSERT(!arenas);
  1.1565 +}
  1.1566 +
  1.1567 +void
  1.1568 +ArenaLists::queueForForegroundSweep(FreeOp *fop, AllocKind thingKind)
  1.1569 +{
  1.1570 +    JS_ASSERT(!IsBackgroundFinalized(thingKind));
  1.1571 +    JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
  1.1572 +    JS_ASSERT(!arenaListsToSweep[thingKind]);
  1.1573 +
  1.1574 +    arenaListsToSweep[thingKind] = arenaLists[thingKind].head;
  1.1575 +    arenaLists[thingKind].clear();
  1.1576 +}
  1.1577 +
  1.1578 +inline void
  1.1579 +ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind)
  1.1580 +{
  1.1581 +    JS_ASSERT(IsBackgroundFinalized(thingKind));
  1.1582 +
  1.1583 +#ifdef JS_THREADSAFE
  1.1584 +    JS_ASSERT(!fop->runtime()->gcHelperThread.sweeping());
  1.1585 +#endif
  1.1586 +
  1.1587 +    ArenaList *al = &arenaLists[thingKind];
  1.1588 +    if (!al->head) {
  1.1589 +        JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
  1.1590 +        JS_ASSERT(al->cursor == &al->head);
  1.1591 +        return;
  1.1592 +    }
  1.1593 +
  1.1594 +    /*
  1.1595 +     * The state can be done, or just-finished if we have not allocated any GC
  1.1596 +     * things from the arena list after the previous background finalization.
  1.1597 +     */
  1.1598 +    JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE ||
  1.1599 +              backgroundFinalizeState[thingKind] == BFS_JUST_FINISHED);
  1.1600 +
  1.1601 +    arenaListsToSweep[thingKind] = al->head;
  1.1602 +    al->clear();
  1.1603 +    backgroundFinalizeState[thingKind] = BFS_RUN;
  1.1604 +}
  1.1605 +
  1.1606 +/*static*/ void
  1.1607 +ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead, bool onBackgroundThread)
  1.1608 +{
  1.1609 +    JS_ASSERT(listHead);
  1.1610 +    AllocKind thingKind = listHead->getAllocKind();
  1.1611 +    Zone *zone = listHead->zone;
  1.1612 +
  1.1613 +    ArenaList finalized;
  1.1614 +    SliceBudget budget;
  1.1615 +    FinalizeArenas(fop, &listHead, finalized, thingKind, budget);
  1.1616 +    JS_ASSERT(!listHead);
  1.1617 +
  1.1618 +    /*
  1.1619 +     * After we finish the finalization al->cursor must point to the end of
  1.1620 +     * the head list as we emptied the list before the background finalization
  1.1621 +     * and the allocation adds new arenas before the cursor.
  1.1622 +     */
  1.1623 +    ArenaLists *lists = &zone->allocator.arenas;
  1.1624 +    ArenaList *al = &lists->arenaLists[thingKind];
  1.1625 +
  1.1626 +    AutoLockGC lock(fop->runtime());
  1.1627 +    JS_ASSERT(lists->backgroundFinalizeState[thingKind] == BFS_RUN);
  1.1628 +    JS_ASSERT(!*al->cursor);
  1.1629 +
  1.1630 +    if (finalized.head) {
  1.1631 +        *al->cursor = finalized.head;
  1.1632 +        if (finalized.cursor != &finalized.head)
  1.1633 +            al->cursor = finalized.cursor;
  1.1634 +    }
  1.1635 +
  1.1636 +    /*
  1.1637 +     * We must set the state to BFS_JUST_FINISHED if we are running on the
  1.1638 +     * background thread and we have touched arenaList list, even if we add to
  1.1639 +     * the list only fully allocated arenas without any free things. It ensures
  1.1640 +     * that the allocation thread takes the GC lock and all writes to the free
  1.1641 +     * list elements are propagated. As we always take the GC lock when
  1.1642 +     * allocating new arenas from the chunks we can set the state to BFS_DONE if
  1.1643 +     * we have released all finalized arenas back to their chunks.
  1.1644 +     */
  1.1645 +    if (onBackgroundThread && finalized.head)
  1.1646 +        lists->backgroundFinalizeState[thingKind] = BFS_JUST_FINISHED;
  1.1647 +    else
  1.1648 +        lists->backgroundFinalizeState[thingKind] = BFS_DONE;
  1.1649 +
  1.1650 +    lists->arenaListsToSweep[thingKind] = nullptr;
  1.1651 +}
  1.1652 +
  1.1653 +void
  1.1654 +ArenaLists::queueObjectsForSweep(FreeOp *fop)
  1.1655 +{
  1.1656 +    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_OBJECT);
  1.1657 +
  1.1658 +    finalizeNow(fop, FINALIZE_OBJECT0);
  1.1659 +    finalizeNow(fop, FINALIZE_OBJECT2);
  1.1660 +    finalizeNow(fop, FINALIZE_OBJECT4);
  1.1661 +    finalizeNow(fop, FINALIZE_OBJECT8);
  1.1662 +    finalizeNow(fop, FINALIZE_OBJECT12);
  1.1663 +    finalizeNow(fop, FINALIZE_OBJECT16);
  1.1664 +
  1.1665 +    queueForBackgroundSweep(fop, FINALIZE_OBJECT0_BACKGROUND);
  1.1666 +    queueForBackgroundSweep(fop, FINALIZE_OBJECT2_BACKGROUND);
  1.1667 +    queueForBackgroundSweep(fop, FINALIZE_OBJECT4_BACKGROUND);
  1.1668 +    queueForBackgroundSweep(fop, FINALIZE_OBJECT8_BACKGROUND);
  1.1669 +    queueForBackgroundSweep(fop, FINALIZE_OBJECT12_BACKGROUND);
  1.1670 +    queueForBackgroundSweep(fop, FINALIZE_OBJECT16_BACKGROUND);
  1.1671 +}
  1.1672 +
  1.1673 +void
  1.1674 +ArenaLists::queueStringsForSweep(FreeOp *fop)
  1.1675 +{
  1.1676 +    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_STRING);
  1.1677 +
  1.1678 +    queueForBackgroundSweep(fop, FINALIZE_FAT_INLINE_STRING);
  1.1679 +    queueForBackgroundSweep(fop, FINALIZE_STRING);
  1.1680 +
  1.1681 +    queueForForegroundSweep(fop, FINALIZE_EXTERNAL_STRING);
  1.1682 +}
  1.1683 +
  1.1684 +void
  1.1685 +ArenaLists::queueScriptsForSweep(FreeOp *fop)
  1.1686 +{
  1.1687 +    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_SCRIPT);
  1.1688 +    queueForForegroundSweep(fop, FINALIZE_SCRIPT);
  1.1689 +    queueForForegroundSweep(fop, FINALIZE_LAZY_SCRIPT);
  1.1690 +}
  1.1691 +
  1.1692 +void
  1.1693 +ArenaLists::queueJitCodeForSweep(FreeOp *fop)
  1.1694 +{
  1.1695 +    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_JITCODE);
  1.1696 +    queueForForegroundSweep(fop, FINALIZE_JITCODE);
  1.1697 +}
  1.1698 +
  1.1699 +void
  1.1700 +ArenaLists::queueShapesForSweep(FreeOp *fop)
  1.1701 +{
  1.1702 +    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_SHAPE);
  1.1703 +
  1.1704 +    queueForBackgroundSweep(fop, FINALIZE_SHAPE);
  1.1705 +    queueForBackgroundSweep(fop, FINALIZE_BASE_SHAPE);
  1.1706 +    queueForBackgroundSweep(fop, FINALIZE_TYPE_OBJECT);
  1.1707 +}
  1.1708 +
  1.1709 +static void *
  1.1710 +RunLastDitchGC(JSContext *cx, JS::Zone *zone, AllocKind thingKind)
  1.1711 +{
  1.1712 +    /*
  1.1713 +     * In parallel sections, we do not attempt to refill the free list
  1.1714 +     * and hence do not encounter last ditch GC.
  1.1715 +     */
  1.1716 +    JS_ASSERT(!InParallelSection());
  1.1717 +
  1.1718 +    PrepareZoneForGC(zone);
  1.1719 +
  1.1720 +    JSRuntime *rt = cx->runtime();
  1.1721 +
  1.1722 +    /* The last ditch GC preserves all atoms. */
  1.1723 +    AutoKeepAtoms keepAtoms(cx->perThreadData);
  1.1724 +    GC(rt, GC_NORMAL, JS::gcreason::LAST_DITCH);
  1.1725 +
  1.1726 +    /*
  1.1727 +     * The JSGC_END callback can legitimately allocate new GC
  1.1728 +     * things and populate the free list. If that happens, just
  1.1729 +     * return that list head.
  1.1730 +     */
  1.1731 +    size_t thingSize = Arena::thingSize(thingKind);
  1.1732 +    if (void *thing = zone->allocator.arenas.allocateFromFreeList(thingKind, thingSize))
  1.1733 +        return thing;
  1.1734 +
  1.1735 +    return nullptr;
  1.1736 +}
  1.1737 +
  1.1738 +template <AllowGC allowGC>
  1.1739 +/* static */ void *
  1.1740 +ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind)
  1.1741 +{
  1.1742 +    JS_ASSERT(cx->allocator()->arenas.freeLists[thingKind].isEmpty());
  1.1743 +    JS_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->runtime()->isHeapBusy());
  1.1744 +
  1.1745 +    Zone *zone = cx->allocator()->zone_;
  1.1746 +
  1.1747 +    bool runGC = cx->allowGC() && allowGC &&
  1.1748 +                 cx->asJSContext()->runtime()->gcIncrementalState != NO_INCREMENTAL &&
  1.1749 +                 zone->gcBytes > zone->gcTriggerBytes;
  1.1750 +
  1.1751 +#ifdef JS_THREADSAFE
  1.1752 +    JS_ASSERT_IF(cx->isJSContext() && allowGC,
  1.1753 +                 !cx->asJSContext()->runtime()->currentThreadHasExclusiveAccess());
  1.1754 +#endif
  1.1755 +
  1.1756 +    for (;;) {
  1.1757 +        if (MOZ_UNLIKELY(runGC)) {
  1.1758 +            if (void *thing = RunLastDitchGC(cx->asJSContext(), zone, thingKind))
  1.1759 +                return thing;
  1.1760 +        }
  1.1761 +
  1.1762 +        if (cx->isJSContext()) {
  1.1763 +            /*
  1.1764 +             * allocateFromArena may fail while the background finalization still
  1.1765 +             * run. If we are on the main thread, we want to wait for it to finish
  1.1766 +             * and restart. However, checking for that is racy as the background
  1.1767 +             * finalization could free some things after allocateFromArena decided
  1.1768 +             * to fail but at this point it may have already stopped. To avoid
  1.1769 +             * this race we always try to allocate twice.
  1.1770 +             */
  1.1771 +            for (bool secondAttempt = false; ; secondAttempt = true) {
  1.1772 +                void *thing = cx->allocator()->arenas.allocateFromArenaInline(zone, thingKind);
  1.1773 +                if (MOZ_LIKELY(!!thing))
  1.1774 +                    return thing;
  1.1775 +                if (secondAttempt)
  1.1776 +                    break;
  1.1777 +
  1.1778 +                cx->asJSContext()->runtime()->gcHelperThread.waitBackgroundSweepEnd();
  1.1779 +            }
  1.1780 +        } else {
  1.1781 +#ifdef JS_THREADSAFE
  1.1782 +            /*
  1.1783 +             * If we're off the main thread, we try to allocate once and
  1.1784 +             * return whatever value we get. If we aren't in a ForkJoin
  1.1785 +             * session (i.e. we are in a worker thread async with the main
  1.1786 +             * thread), we need to first ensure the main thread is not in a GC
  1.1787 +             * session.
  1.1788 +             */
  1.1789 +            mozilla::Maybe<AutoLockWorkerThreadState> lock;
  1.1790 +            JSRuntime *rt = zone->runtimeFromAnyThread();
  1.1791 +            if (rt->exclusiveThreadsPresent()) {
  1.1792 +                lock.construct();
  1.1793 +                while (rt->isHeapBusy())
  1.1794 +                    WorkerThreadState().wait(GlobalWorkerThreadState::PRODUCER);
  1.1795 +            }
  1.1796 +
  1.1797 +            void *thing = cx->allocator()->arenas.allocateFromArenaInline(zone, thingKind);
  1.1798 +            if (thing)
  1.1799 +                return thing;
  1.1800 +#else
  1.1801 +            MOZ_CRASH();
  1.1802 +#endif
  1.1803 +        }
  1.1804 +
  1.1805 +        if (!cx->allowGC() || !allowGC)
  1.1806 +            return nullptr;
  1.1807 +
  1.1808 +        /*
  1.1809 +         * We failed to allocate. Run the GC if we haven't done it already.
  1.1810 +         * Otherwise report OOM.
  1.1811 +         */
  1.1812 +        if (runGC)
  1.1813 +            break;
  1.1814 +        runGC = true;
  1.1815 +    }
  1.1816 +
  1.1817 +    JS_ASSERT(allowGC);
  1.1818 +    js_ReportOutOfMemory(cx);
  1.1819 +    return nullptr;
  1.1820 +}
  1.1821 +
  1.1822 +template void *
  1.1823 +ArenaLists::refillFreeList<NoGC>(ThreadSafeContext *cx, AllocKind thingKind);
  1.1824 +
  1.1825 +template void *
  1.1826 +ArenaLists::refillFreeList<CanGC>(ThreadSafeContext *cx, AllocKind thingKind);
  1.1827 +
  1.1828 +JSGCTraceKind
  1.1829 +js_GetGCThingTraceKind(void *thing)
  1.1830 +{
  1.1831 +    return GetGCThingTraceKind(thing);
  1.1832 +}
  1.1833 +
  1.1834 +/* static */ int64_t
  1.1835 +SliceBudget::TimeBudget(int64_t millis)
  1.1836 +{
  1.1837 +    return millis * PRMJ_USEC_PER_MSEC;
  1.1838 +}
  1.1839 +
  1.1840 +/* static */ int64_t
  1.1841 +SliceBudget::WorkBudget(int64_t work)
  1.1842 +{
  1.1843 +    /* For work = 0 not to mean Unlimited, we subtract 1. */
  1.1844 +    return -work - 1;
  1.1845 +}
  1.1846 +
  1.1847 +SliceBudget::SliceBudget()
  1.1848 +  : deadline(INT64_MAX),
  1.1849 +    counter(INTPTR_MAX)
  1.1850 +{
  1.1851 +}
  1.1852 +
  1.1853 +SliceBudget::SliceBudget(int64_t budget)
  1.1854 +{
  1.1855 +    if (budget == Unlimited) {
  1.1856 +        deadline = INT64_MAX;
  1.1857 +        counter = INTPTR_MAX;
  1.1858 +    } else if (budget > 0) {
  1.1859 +        deadline = PRMJ_Now() + budget;
  1.1860 +        counter = CounterReset;
  1.1861 +    } else {
  1.1862 +        deadline = 0;
  1.1863 +        counter = -budget - 1;
  1.1864 +    }
  1.1865 +}
  1.1866 +
  1.1867 +bool
  1.1868 +SliceBudget::checkOverBudget()
  1.1869 +{
  1.1870 +    bool over = PRMJ_Now() > deadline;
  1.1871 +    if (!over)
  1.1872 +        counter = CounterReset;
  1.1873 +    return over;
  1.1874 +}
  1.1875 +
  1.1876 +void
  1.1877 +js::MarkCompartmentActive(InterpreterFrame *fp)
  1.1878 +{
  1.1879 +    fp->script()->compartment()->zone()->active = true;
  1.1880 +}
  1.1881 +
  1.1882 +static void
  1.1883 +RequestInterrupt(JSRuntime *rt, JS::gcreason::Reason reason)
  1.1884 +{
  1.1885 +    if (rt->gcIsNeeded)
  1.1886 +        return;
  1.1887 +
  1.1888 +    rt->gcIsNeeded = true;
  1.1889 +    rt->gcTriggerReason = reason;
  1.1890 +    rt->requestInterrupt(JSRuntime::RequestInterruptMainThread);
  1.1891 +}
  1.1892 +
  1.1893 +bool
  1.1894 +js::TriggerGC(JSRuntime *rt, JS::gcreason::Reason reason)
  1.1895 +{
  1.1896 +    /* Wait till end of parallel section to trigger GC. */
  1.1897 +    if (InParallelSection()) {
  1.1898 +        ForkJoinContext::current()->requestGC(reason);
  1.1899 +        return true;
  1.1900 +    }
  1.1901 +
  1.1902 +    /* Don't trigger GCs when allocating under the interrupt callback lock. */
  1.1903 +    if (rt->currentThreadOwnsInterruptLock())
  1.1904 +        return false;
  1.1905 +
  1.1906 +    JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
  1.1907 +
  1.1908 +    /* GC is already running. */
  1.1909 +    if (rt->isHeapCollecting())
  1.1910 +        return false;
  1.1911 +
  1.1912 +    JS::PrepareForFullGC(rt);
  1.1913 +    RequestInterrupt(rt, reason);
  1.1914 +    return true;
  1.1915 +}
  1.1916 +
  1.1917 +bool
  1.1918 +js::TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
  1.1919 +{
  1.1920 +    /*
  1.1921 +     * If parallel threads are running, wait till they
  1.1922 +     * are stopped to trigger GC.
  1.1923 +     */
  1.1924 +    if (InParallelSection()) {
  1.1925 +        ForkJoinContext::current()->requestZoneGC(zone, reason);
  1.1926 +        return true;
  1.1927 +    }
  1.1928 +
  1.1929 +    /* Zones in use by a thread with an exclusive context can't be collected. */
  1.1930 +    if (zone->usedByExclusiveThread)
  1.1931 +        return false;
  1.1932 +
  1.1933 +    JSRuntime *rt = zone->runtimeFromMainThread();
  1.1934 +
  1.1935 +    /* Don't trigger GCs when allocating under the interrupt callback lock. */
  1.1936 +    if (rt->currentThreadOwnsInterruptLock())
  1.1937 +        return false;
  1.1938 +
  1.1939 +    /* GC is already running. */
  1.1940 +    if (rt->isHeapCollecting())
  1.1941 +        return false;
  1.1942 +
  1.1943 +    if (rt->gcZeal() == ZealAllocValue) {
  1.1944 +        TriggerGC(rt, reason);
  1.1945 +        return true;
  1.1946 +    }
  1.1947 +
  1.1948 +    if (rt->isAtomsZone(zone)) {
  1.1949 +        /* We can't do a zone GC of the atoms compartment. */
  1.1950 +        TriggerGC(rt, reason);
  1.1951 +        return true;
  1.1952 +    }
  1.1953 +
  1.1954 +    PrepareZoneForGC(zone);
  1.1955 +    RequestInterrupt(rt, reason);
  1.1956 +    return true;
  1.1957 +}
  1.1958 +
  1.1959 +void
  1.1960 +js::MaybeGC(JSContext *cx)
  1.1961 +{
  1.1962 +    JSRuntime *rt = cx->runtime();
  1.1963 +    JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
  1.1964 +
  1.1965 +    if (rt->gcZeal() == ZealAllocValue || rt->gcZeal() == ZealPokeValue) {
  1.1966 +        JS::PrepareForFullGC(rt);
  1.1967 +        GC(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
  1.1968 +        return;
  1.1969 +    }
  1.1970 +
  1.1971 +    if (rt->gcIsNeeded) {
  1.1972 +        GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
  1.1973 +        return;
  1.1974 +    }
  1.1975 +
  1.1976 +    double factor = rt->gcHighFrequencyGC ? 0.85 : 0.9;
  1.1977 +    Zone *zone = cx->zone();
  1.1978 +    if (zone->gcBytes > 1024 * 1024 &&
  1.1979 +        zone->gcBytes >= factor * zone->gcTriggerBytes &&
  1.1980 +        rt->gcIncrementalState == NO_INCREMENTAL &&
  1.1981 +        !rt->gcHelperThread.sweeping())
  1.1982 +    {
  1.1983 +        PrepareZoneForGC(zone);
  1.1984 +        GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
  1.1985 +        return;
  1.1986 +    }
  1.1987 +
  1.1988 +#ifndef JS_MORE_DETERMINISTIC
  1.1989 +    /*
  1.1990 +     * Access to the counters and, on 32 bit, setting gcNextFullGCTime below
  1.1991 +     * is not atomic and a race condition could trigger or suppress the GC. We
  1.1992 +     * tolerate this.
  1.1993 +     */
  1.1994 +    int64_t now = PRMJ_Now();
  1.1995 +    if (rt->gcNextFullGCTime && rt->gcNextFullGCTime <= now) {
  1.1996 +        if (rt->gcChunkAllocationSinceLastGC ||
  1.1997 +            rt->gcNumArenasFreeCommitted > rt->gcDecommitThreshold)
  1.1998 +        {
  1.1999 +            JS::PrepareForFullGC(rt);
  1.2000 +            GCSlice(rt, GC_SHRINK, JS::gcreason::MAYBEGC);
  1.2001 +        } else {
  1.2002 +            rt->gcNextFullGCTime = now + GC_IDLE_FULL_SPAN;
  1.2003 +        }
  1.2004 +    }
  1.2005 +#endif
  1.2006 +}
  1.2007 +
  1.2008 +static void
  1.2009 +DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
  1.2010 +{
  1.2011 +    Chunk *chunk = *availableListHeadp;
  1.2012 +    if (!chunk)
  1.2013 +        return;
  1.2014 +
  1.2015 +    /*
  1.2016 +     * Decommit is expensive so we avoid holding the GC lock while calling it.
  1.2017 +     *
  1.2018 +     * We decommit from the tail of the list to minimize interference with the
  1.2019 +     * main thread that may start to allocate things at this point.
  1.2020 +     *
  1.2021 +     * The arena that is been decommitted outside the GC lock must not be
  1.2022 +     * available for allocations either via the free list or via the
  1.2023 +     * decommittedArenas bitmap. For that we just fetch the arena from the
  1.2024 +     * free list before the decommit pretending as it was allocated. If this
  1.2025 +     * arena also is the single free arena in the chunk, then we must remove
  1.2026 +     * from the available list before we release the lock so the allocation
  1.2027 +     * thread would not see chunks with no free arenas on the available list.
  1.2028 +     *
  1.2029 +     * After we retake the lock, we mark the arena as free and decommitted if
  1.2030 +     * the decommit was successful. We must also add the chunk back to the
  1.2031 +     * available list if we removed it previously or when the main thread
  1.2032 +     * have allocated all remaining free arenas in the chunk.
  1.2033 +     *
  1.2034 +     * We also must make sure that the aheader is not accessed again after we
  1.2035 +     * decommit the arena.
  1.2036 +     */
  1.2037 +    JS_ASSERT(chunk->info.prevp == availableListHeadp);
  1.2038 +    while (Chunk *next = chunk->info.next) {
  1.2039 +        JS_ASSERT(next->info.prevp == &chunk->info.next);
  1.2040 +        chunk = next;
  1.2041 +    }
  1.2042 +
  1.2043 +    for (;;) {
  1.2044 +        while (chunk->info.numArenasFreeCommitted != 0) {
  1.2045 +            ArenaHeader *aheader = chunk->fetchNextFreeArena(rt);
  1.2046 +
  1.2047 +            Chunk **savedPrevp = chunk->info.prevp;
  1.2048 +            if (!chunk->hasAvailableArenas())
  1.2049 +                chunk->removeFromAvailableList();
  1.2050 +
  1.2051 +            size_t arenaIndex = Chunk::arenaIndex(aheader->arenaAddress());
  1.2052 +            bool ok;
  1.2053 +            {
  1.2054 +                /*
  1.2055 +                 * If the main thread waits for the decommit to finish, skip
  1.2056 +                 * potentially expensive unlock/lock pair on the contested
  1.2057 +                 * lock.
  1.2058 +                 */
  1.2059 +                Maybe<AutoUnlockGC> maybeUnlock;
  1.2060 +                if (!rt->isHeapBusy())
  1.2061 +                    maybeUnlock.construct(rt);
  1.2062 +                ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize);
  1.2063 +            }
  1.2064 +
  1.2065 +            if (ok) {
  1.2066 +                ++chunk->info.numArenasFree;
  1.2067 +                chunk->decommittedArenas.set(arenaIndex);
  1.2068 +            } else {
  1.2069 +                chunk->addArenaToFreeList(rt, aheader);
  1.2070 +            }
  1.2071 +            JS_ASSERT(chunk->hasAvailableArenas());
  1.2072 +            JS_ASSERT(!chunk->unused());
  1.2073 +            if (chunk->info.numArenasFree == 1) {
  1.2074 +                /*
  1.2075 +                 * Put the chunk back to the available list either at the
  1.2076 +                 * point where it was before to preserve the available list
  1.2077 +                 * that we enumerate, or, when the allocation thread has fully
  1.2078 +                 * used all the previous chunks, at the beginning of the
  1.2079 +                 * available list.
  1.2080 +                 */
  1.2081 +                Chunk **insertPoint = savedPrevp;
  1.2082 +                if (savedPrevp != availableListHeadp) {
  1.2083 +                    Chunk *prev = Chunk::fromPointerToNext(savedPrevp);
  1.2084 +                    if (!prev->hasAvailableArenas())
  1.2085 +                        insertPoint = availableListHeadp;
  1.2086 +                }
  1.2087 +                chunk->insertToAvailableList(insertPoint);
  1.2088 +            } else {
  1.2089 +                JS_ASSERT(chunk->info.prevp);
  1.2090 +            }
  1.2091 +
  1.2092 +            if (rt->gcChunkAllocationSinceLastGC || !ok) {
  1.2093 +                /*
  1.2094 +                 * The allocator thread has started to get new chunks. We should stop
  1.2095 +                 * to avoid decommitting arenas in just allocated chunks.
  1.2096 +                 */
  1.2097 +                return;
  1.2098 +            }
  1.2099 +        }
  1.2100 +
  1.2101 +        /*
  1.2102 +         * chunk->info.prevp becomes null when the allocator thread consumed
  1.2103 +         * all chunks from the available list.
  1.2104 +         */
  1.2105 +        JS_ASSERT_IF(chunk->info.prevp, *chunk->info.prevp == chunk);
  1.2106 +        if (chunk->info.prevp == availableListHeadp || !chunk->info.prevp)
  1.2107 +            break;
  1.2108 +
  1.2109 +        /*
  1.2110 +         * prevp exists and is not the list head. It must point to the next
  1.2111 +         * field of the previous chunk.
  1.2112 +         */
  1.2113 +        chunk = chunk->getPrevious();
  1.2114 +    }
  1.2115 +}
  1.2116 +
  1.2117 +static void
  1.2118 +DecommitArenas(JSRuntime *rt)
  1.2119 +{
  1.2120 +    DecommitArenasFromAvailableList(rt, &rt->gcSystemAvailableChunkListHead);
  1.2121 +    DecommitArenasFromAvailableList(rt, &rt->gcUserAvailableChunkListHead);
  1.2122 +}
  1.2123 +
  1.2124 +/* Must be called with the GC lock taken. */
  1.2125 +static void
  1.2126 +ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
  1.2127 +{
  1.2128 +    if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) {
  1.2129 +        AutoUnlockGC unlock(rt);
  1.2130 +        FreeChunkList(rt, toFree);
  1.2131 +    }
  1.2132 +
  1.2133 +    if (shouldShrink)
  1.2134 +        DecommitArenas(rt);
  1.2135 +}
  1.2136 +
  1.2137 +static void
  1.2138 +SweepBackgroundThings(JSRuntime* rt, bool onBackgroundThread)
  1.2139 +{
  1.2140 +    /*
  1.2141 +     * We must finalize in the correct order, see comments in
  1.2142 +     * finalizeObjects.
  1.2143 +     */
  1.2144 +    FreeOp fop(rt, false);
  1.2145 +    for (int phase = 0 ; phase < BackgroundPhaseCount ; ++phase) {
  1.2146 +        for (Zone *zone = rt->gcSweepingZones; zone; zone = zone->gcNextGraphNode) {
  1.2147 +            for (int index = 0 ; index < BackgroundPhaseLength[phase] ; ++index) {
  1.2148 +                AllocKind kind = BackgroundPhases[phase][index];
  1.2149 +                ArenaHeader *arenas = zone->allocator.arenas.arenaListsToSweep[kind];
  1.2150 +                if (arenas)
  1.2151 +                    ArenaLists::backgroundFinalize(&fop, arenas, onBackgroundThread);
  1.2152 +            }
  1.2153 +        }
  1.2154 +    }
  1.2155 +
  1.2156 +    rt->gcSweepingZones = nullptr;
  1.2157 +}
  1.2158 +
  1.2159 +#ifdef JS_THREADSAFE
  1.2160 +static void
  1.2161 +AssertBackgroundSweepingFinished(JSRuntime *rt)
  1.2162 +{
  1.2163 +    JS_ASSERT(!rt->gcSweepingZones);
  1.2164 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.2165 +        for (unsigned i = 0; i < FINALIZE_LIMIT; ++i) {
  1.2166 +            JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]);
  1.2167 +            JS_ASSERT(zone->allocator.arenas.doneBackgroundFinalize(AllocKind(i)));
  1.2168 +        }
  1.2169 +    }
  1.2170 +}
  1.2171 +
  1.2172 +unsigned
  1.2173 +js::GetCPUCount()
  1.2174 +{
  1.2175 +    static unsigned ncpus = 0;
  1.2176 +    if (ncpus == 0) {
  1.2177 +# ifdef XP_WIN
  1.2178 +        SYSTEM_INFO sysinfo;
  1.2179 +        GetSystemInfo(&sysinfo);
  1.2180 +        ncpus = unsigned(sysinfo.dwNumberOfProcessors);
  1.2181 +# else
  1.2182 +        long n = sysconf(_SC_NPROCESSORS_ONLN);
  1.2183 +        ncpus = (n > 0) ? unsigned(n) : 1;
  1.2184 +# endif
  1.2185 +    }
  1.2186 +    return ncpus;
  1.2187 +}
  1.2188 +#endif /* JS_THREADSAFE */
  1.2189 +
  1.2190 +bool
  1.2191 +GCHelperThread::init()
  1.2192 +{
  1.2193 +    if (!rt->useHelperThreads()) {
  1.2194 +        backgroundAllocation = false;
  1.2195 +        return true;
  1.2196 +    }
  1.2197 +
  1.2198 +#ifdef JS_THREADSAFE
  1.2199 +    if (!(wakeup = PR_NewCondVar(rt->gcLock)))
  1.2200 +        return false;
  1.2201 +    if (!(done = PR_NewCondVar(rt->gcLock)))
  1.2202 +        return false;
  1.2203 +
  1.2204 +    thread = PR_CreateThread(PR_USER_THREAD, threadMain, this, PR_PRIORITY_NORMAL,
  1.2205 +                             PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
  1.2206 +    if (!thread)
  1.2207 +        return false;
  1.2208 +
  1.2209 +    backgroundAllocation = (GetCPUCount() >= 2);
  1.2210 +#endif /* JS_THREADSAFE */
  1.2211 +    return true;
  1.2212 +}
  1.2213 +
  1.2214 +void
  1.2215 +GCHelperThread::finish()
  1.2216 +{
  1.2217 +    if (!rt->useHelperThreads() || !rt->gcLock) {
  1.2218 +        JS_ASSERT(state == IDLE);
  1.2219 +        return;
  1.2220 +    }
  1.2221 +
  1.2222 +#ifdef JS_THREADSAFE
  1.2223 +    PRThread *join = nullptr;
  1.2224 +    {
  1.2225 +        AutoLockGC lock(rt);
  1.2226 +        if (thread && state != SHUTDOWN) {
  1.2227 +            /*
  1.2228 +             * We cannot be in the ALLOCATING or CANCEL_ALLOCATION states as
  1.2229 +             * the allocations should have been stopped during the last GC.
  1.2230 +             */
  1.2231 +            JS_ASSERT(state == IDLE || state == SWEEPING);
  1.2232 +            if (state == IDLE)
  1.2233 +                PR_NotifyCondVar(wakeup);
  1.2234 +            state = SHUTDOWN;
  1.2235 +            join = thread;
  1.2236 +        }
  1.2237 +    }
  1.2238 +    if (join) {
  1.2239 +        /* PR_DestroyThread is not necessary. */
  1.2240 +        PR_JoinThread(join);
  1.2241 +    }
  1.2242 +    if (wakeup)
  1.2243 +        PR_DestroyCondVar(wakeup);
  1.2244 +    if (done)
  1.2245 +        PR_DestroyCondVar(done);
  1.2246 +#endif /* JS_THREADSAFE */
  1.2247 +}
  1.2248 +
  1.2249 +#ifdef JS_THREADSAFE
  1.2250 +#ifdef MOZ_NUWA_PROCESS
  1.2251 +extern "C" {
  1.2252 +MFBT_API bool IsNuwaProcess();
  1.2253 +MFBT_API void NuwaMarkCurrentThread(void (*recreate)(void *), void *arg);
  1.2254 +}
  1.2255 +#endif
  1.2256 +
  1.2257 +/* static */
  1.2258 +void
  1.2259 +GCHelperThread::threadMain(void *arg)
  1.2260 +{
  1.2261 +    PR_SetCurrentThreadName("JS GC Helper");
  1.2262 +
  1.2263 +#ifdef MOZ_NUWA_PROCESS
  1.2264 +    if (IsNuwaProcess && IsNuwaProcess()) {
  1.2265 +        JS_ASSERT(NuwaMarkCurrentThread != nullptr);
  1.2266 +        NuwaMarkCurrentThread(nullptr, nullptr);
  1.2267 +    }
  1.2268 +#endif
  1.2269 +
  1.2270 +    static_cast<GCHelperThread *>(arg)->threadLoop();
  1.2271 +}
  1.2272 +
  1.2273 +void
  1.2274 +GCHelperThread::wait(PRCondVar *which)
  1.2275 +{
  1.2276 +    rt->gcLockOwner = nullptr;
  1.2277 +    PR_WaitCondVar(which, PR_INTERVAL_NO_TIMEOUT);
  1.2278 +#ifdef DEBUG
  1.2279 +    rt->gcLockOwner = PR_GetCurrentThread();
  1.2280 +#endif
  1.2281 +}
  1.2282 +
  1.2283 +void
  1.2284 +GCHelperThread::threadLoop()
  1.2285 +{
  1.2286 +    AutoLockGC lock(rt);
  1.2287 +
  1.2288 +    TraceLogger *logger = TraceLoggerForCurrentThread();
  1.2289 +
  1.2290 +    /*
  1.2291 +     * Even on the first iteration the state can be SHUTDOWN or SWEEPING if
  1.2292 +     * the stop request or the GC and the corresponding startBackgroundSweep call
  1.2293 +     * happen before this thread has a chance to run.
  1.2294 +     */
  1.2295 +    for (;;) {
  1.2296 +        switch (state) {
  1.2297 +          case SHUTDOWN:
  1.2298 +            return;
  1.2299 +          case IDLE:
  1.2300 +            wait(wakeup);
  1.2301 +            break;
  1.2302 +          case SWEEPING: {
  1.2303 +            AutoTraceLog logSweeping(logger, TraceLogger::GCSweeping);
  1.2304 +            doSweep();
  1.2305 +            if (state == SWEEPING)
  1.2306 +                state = IDLE;
  1.2307 +            PR_NotifyAllCondVar(done);
  1.2308 +            break;
  1.2309 +          }
  1.2310 +          case ALLOCATING: {
  1.2311 +            AutoTraceLog logAllocating(logger, TraceLogger::GCAllocation);
  1.2312 +            do {
  1.2313 +                Chunk *chunk;
  1.2314 +                {
  1.2315 +                    AutoUnlockGC unlock(rt);
  1.2316 +                    chunk = Chunk::allocate(rt);
  1.2317 +                }
  1.2318 +
  1.2319 +                /* OOM stops the background allocation. */
  1.2320 +                if (!chunk)
  1.2321 +                    break;
  1.2322 +                JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
  1.2323 +                rt->gcChunkPool.put(chunk);
  1.2324 +            } while (state == ALLOCATING && rt->gcChunkPool.wantBackgroundAllocation(rt));
  1.2325 +            if (state == ALLOCATING)
  1.2326 +                state = IDLE;
  1.2327 +            break;
  1.2328 +          }
  1.2329 +          case CANCEL_ALLOCATION:
  1.2330 +            state = IDLE;
  1.2331 +            PR_NotifyAllCondVar(done);
  1.2332 +            break;
  1.2333 +        }
  1.2334 +    }
  1.2335 +}
  1.2336 +#endif /* JS_THREADSAFE */
  1.2337 +
  1.2338 +void
  1.2339 +GCHelperThread::startBackgroundSweep(bool shouldShrink)
  1.2340 +{
  1.2341 +    JS_ASSERT(rt->useHelperThreads());
  1.2342 +
  1.2343 +#ifdef JS_THREADSAFE
  1.2344 +    AutoLockGC lock(rt);
  1.2345 +    JS_ASSERT(state == IDLE);
  1.2346 +    JS_ASSERT(!sweepFlag);
  1.2347 +    sweepFlag = true;
  1.2348 +    shrinkFlag = shouldShrink;
  1.2349 +    state = SWEEPING;
  1.2350 +    PR_NotifyCondVar(wakeup);
  1.2351 +#endif /* JS_THREADSAFE */
  1.2352 +}
  1.2353 +
  1.2354 +/* Must be called with the GC lock taken. */
  1.2355 +void
  1.2356 +GCHelperThread::startBackgroundShrink()
  1.2357 +{
  1.2358 +    JS_ASSERT(rt->useHelperThreads());
  1.2359 +
  1.2360 +#ifdef JS_THREADSAFE
  1.2361 +    switch (state) {
  1.2362 +      case IDLE:
  1.2363 +        JS_ASSERT(!sweepFlag);
  1.2364 +        shrinkFlag = true;
  1.2365 +        state = SWEEPING;
  1.2366 +        PR_NotifyCondVar(wakeup);
  1.2367 +        break;
  1.2368 +      case SWEEPING:
  1.2369 +        shrinkFlag = true;
  1.2370 +        break;
  1.2371 +      case ALLOCATING:
  1.2372 +      case CANCEL_ALLOCATION:
  1.2373 +        /*
  1.2374 +         * If we have started background allocation there is nothing to
  1.2375 +         * shrink.
  1.2376 +         */
  1.2377 +        break;
  1.2378 +      case SHUTDOWN:
  1.2379 +        MOZ_ASSUME_UNREACHABLE("No shrink on shutdown");
  1.2380 +    }
  1.2381 +#endif /* JS_THREADSAFE */
  1.2382 +}
  1.2383 +
  1.2384 +void
  1.2385 +GCHelperThread::waitBackgroundSweepEnd()
  1.2386 +{
  1.2387 +    if (!rt->useHelperThreads()) {
  1.2388 +        JS_ASSERT(state == IDLE);
  1.2389 +        return;
  1.2390 +    }
  1.2391 +
  1.2392 +#ifdef JS_THREADSAFE
  1.2393 +    AutoLockGC lock(rt);
  1.2394 +    while (state == SWEEPING)
  1.2395 +        wait(done);
  1.2396 +    if (rt->gcIncrementalState == NO_INCREMENTAL)
  1.2397 +        AssertBackgroundSweepingFinished(rt);
  1.2398 +#endif /* JS_THREADSAFE */
  1.2399 +}
  1.2400 +
  1.2401 +void
  1.2402 +GCHelperThread::waitBackgroundSweepOrAllocEnd()
  1.2403 +{
  1.2404 +    if (!rt->useHelperThreads()) {
  1.2405 +        JS_ASSERT(state == IDLE);
  1.2406 +        return;
  1.2407 +    }
  1.2408 +
  1.2409 +#ifdef JS_THREADSAFE
  1.2410 +    AutoLockGC lock(rt);
  1.2411 +    if (state == ALLOCATING)
  1.2412 +        state = CANCEL_ALLOCATION;
  1.2413 +    while (state == SWEEPING || state == CANCEL_ALLOCATION)
  1.2414 +        wait(done);
  1.2415 +    if (rt->gcIncrementalState == NO_INCREMENTAL)
  1.2416 +        AssertBackgroundSweepingFinished(rt);
  1.2417 +#endif /* JS_THREADSAFE */
  1.2418 +}
  1.2419 +
  1.2420 +/* Must be called with the GC lock taken. */
  1.2421 +inline void
  1.2422 +GCHelperThread::startBackgroundAllocationIfIdle()
  1.2423 +{
  1.2424 +    JS_ASSERT(rt->useHelperThreads());
  1.2425 +
  1.2426 +#ifdef JS_THREADSAFE
  1.2427 +    if (state == IDLE) {
  1.2428 +        state = ALLOCATING;
  1.2429 +        PR_NotifyCondVar(wakeup);
  1.2430 +    }
  1.2431 +#endif /* JS_THREADSAFE */
  1.2432 +}
  1.2433 +
  1.2434 +void
  1.2435 +GCHelperThread::replenishAndFreeLater(void *ptr)
  1.2436 +{
  1.2437 +    JS_ASSERT(freeCursor == freeCursorEnd);
  1.2438 +    do {
  1.2439 +        if (freeCursor && !freeVector.append(freeCursorEnd - FREE_ARRAY_LENGTH))
  1.2440 +            break;
  1.2441 +        freeCursor = (void **) js_malloc(FREE_ARRAY_SIZE);
  1.2442 +        if (!freeCursor) {
  1.2443 +            freeCursorEnd = nullptr;
  1.2444 +            break;
  1.2445 +        }
  1.2446 +        freeCursorEnd = freeCursor + FREE_ARRAY_LENGTH;
  1.2447 +        *freeCursor++ = ptr;
  1.2448 +        return;
  1.2449 +    } while (false);
  1.2450 +    js_free(ptr);
  1.2451 +}
  1.2452 +
  1.2453 +#ifdef JS_THREADSAFE
  1.2454 +/* Must be called with the GC lock taken. */
  1.2455 +void
  1.2456 +GCHelperThread::doSweep()
  1.2457 +{
  1.2458 +    if (sweepFlag) {
  1.2459 +        sweepFlag = false;
  1.2460 +        AutoUnlockGC unlock(rt);
  1.2461 +
  1.2462 +        SweepBackgroundThings(rt, true);
  1.2463 +
  1.2464 +        if (freeCursor) {
  1.2465 +            void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
  1.2466 +            freeElementsAndArray(array, freeCursor);
  1.2467 +            freeCursor = freeCursorEnd = nullptr;
  1.2468 +        } else {
  1.2469 +            JS_ASSERT(!freeCursorEnd);
  1.2470 +        }
  1.2471 +        for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
  1.2472 +            void **array = *iter;
  1.2473 +            freeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
  1.2474 +        }
  1.2475 +        freeVector.resize(0);
  1.2476 +
  1.2477 +        rt->freeLifoAlloc.freeAll();
  1.2478 +    }
  1.2479 +
  1.2480 +    bool shrinking = shrinkFlag;
  1.2481 +    ExpireChunksAndArenas(rt, shrinking);
  1.2482 +
  1.2483 +    /*
  1.2484 +     * The main thread may have called ShrinkGCBuffers while
  1.2485 +     * ExpireChunksAndArenas(rt, false) was running, so we recheck the flag
  1.2486 +     * afterwards.
  1.2487 +     */
  1.2488 +    if (!shrinking && shrinkFlag) {
  1.2489 +        shrinkFlag = false;
  1.2490 +        ExpireChunksAndArenas(rt, true);
  1.2491 +    }
  1.2492 +}
  1.2493 +#endif /* JS_THREADSAFE */
  1.2494 +
  1.2495 +bool
  1.2496 +GCHelperThread::onBackgroundThread()
  1.2497 +{
  1.2498 +#ifdef JS_THREADSAFE
  1.2499 +    return PR_GetCurrentThread() == getThread();
  1.2500 +#else
  1.2501 +    return false;
  1.2502 +#endif
  1.2503 +}
  1.2504 +
  1.2505 +static bool
  1.2506 +ReleaseObservedTypes(JSRuntime *rt)
  1.2507 +{
  1.2508 +    bool releaseTypes = rt->gcZeal() != 0;
  1.2509 +
  1.2510 +#ifndef JS_MORE_DETERMINISTIC
  1.2511 +    int64_t now = PRMJ_Now();
  1.2512 +    if (now >= rt->gcJitReleaseTime)
  1.2513 +        releaseTypes = true;
  1.2514 +    if (releaseTypes)
  1.2515 +        rt->gcJitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
  1.2516 +#endif
  1.2517 +
  1.2518 +    return releaseTypes;
  1.2519 +}
  1.2520 +
  1.2521 +/*
  1.2522 + * It's simpler if we preserve the invariant that every zone has at least one
  1.2523 + * compartment. If we know we're deleting the entire zone, then
  1.2524 + * SweepCompartments is allowed to delete all compartments. In this case,
  1.2525 + * |keepAtleastOne| is false. If some objects remain in the zone so that it
  1.2526 + * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits
  1.2527 + * SweepCompartments from deleting every compartment. Instead, it preserves an
  1.2528 + * arbitrary compartment in the zone.
  1.2529 + */
  1.2530 +static void
  1.2531 +SweepCompartments(FreeOp *fop, Zone *zone, bool keepAtleastOne, bool lastGC)
  1.2532 +{
  1.2533 +    JSRuntime *rt = zone->runtimeFromMainThread();
  1.2534 +    JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
  1.2535 +
  1.2536 +    JSCompartment **read = zone->compartments.begin();
  1.2537 +    JSCompartment **end = zone->compartments.end();
  1.2538 +    JSCompartment **write = read;
  1.2539 +    bool foundOne = false;
  1.2540 +    while (read < end) {
  1.2541 +        JSCompartment *comp = *read++;
  1.2542 +        JS_ASSERT(!rt->isAtomsCompartment(comp));
  1.2543 +
  1.2544 +        /*
  1.2545 +         * Don't delete the last compartment if all the ones before it were
  1.2546 +         * deleted and keepAtleastOne is true.
  1.2547 +         */
  1.2548 +        bool dontDelete = read == end && !foundOne && keepAtleastOne;
  1.2549 +        if ((!comp->marked && !dontDelete) || lastGC) {
  1.2550 +            if (callback)
  1.2551 +                callback(fop, comp);
  1.2552 +            if (comp->principals)
  1.2553 +                JS_DropPrincipals(rt, comp->principals);
  1.2554 +            js_delete(comp);
  1.2555 +        } else {
  1.2556 +            *write++ = comp;
  1.2557 +            foundOne = true;
  1.2558 +        }
  1.2559 +    }
  1.2560 +    zone->compartments.resize(write - zone->compartments.begin());
  1.2561 +    JS_ASSERT_IF(keepAtleastOne, !zone->compartments.empty());
  1.2562 +}
  1.2563 +
  1.2564 +static void
  1.2565 +SweepZones(FreeOp *fop, bool lastGC)
  1.2566 +{
  1.2567 +    JSRuntime *rt = fop->runtime();
  1.2568 +    JSZoneCallback callback = rt->destroyZoneCallback;
  1.2569 +
  1.2570 +    /* Skip the atomsCompartment zone. */
  1.2571 +    Zone **read = rt->zones.begin() + 1;
  1.2572 +    Zone **end = rt->zones.end();
  1.2573 +    Zone **write = read;
  1.2574 +    JS_ASSERT(rt->zones.length() >= 1);
  1.2575 +    JS_ASSERT(rt->isAtomsZone(rt->zones[0]));
  1.2576 +
  1.2577 +    while (read < end) {
  1.2578 +        Zone *zone = *read++;
  1.2579 +
  1.2580 +        if (zone->wasGCStarted()) {
  1.2581 +            if ((zone->allocator.arenas.arenaListsAreEmpty() && !zone->hasMarkedCompartments()) ||
  1.2582 +                lastGC)
  1.2583 +            {
  1.2584 +                zone->allocator.arenas.checkEmptyFreeLists();
  1.2585 +                if (callback)
  1.2586 +                    callback(zone);
  1.2587 +                SweepCompartments(fop, zone, false, lastGC);
  1.2588 +                JS_ASSERT(zone->compartments.empty());
  1.2589 +                fop->delete_(zone);
  1.2590 +                continue;
  1.2591 +            }
  1.2592 +            SweepCompartments(fop, zone, true, lastGC);
  1.2593 +        }
  1.2594 +        *write++ = zone;
  1.2595 +    }
  1.2596 +    rt->zones.resize(write - rt->zones.begin());
  1.2597 +}
  1.2598 +
  1.2599 +static void
  1.2600 +PurgeRuntime(JSRuntime *rt)
  1.2601 +{
  1.2602 +    for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
  1.2603 +        comp->purge();
  1.2604 +
  1.2605 +    rt->freeLifoAlloc.transferUnusedFrom(&rt->tempLifoAlloc);
  1.2606 +    rt->interpreterStack().purge(rt);
  1.2607 +
  1.2608 +    rt->gsnCache.purge();
  1.2609 +    rt->scopeCoordinateNameCache.purge();
  1.2610 +    rt->newObjectCache.purge();
  1.2611 +    rt->nativeIterCache.purge();
  1.2612 +    rt->sourceDataCache.purge();
  1.2613 +    rt->evalCache.clear();
  1.2614 +
  1.2615 +    if (!rt->hasActiveCompilations())
  1.2616 +        rt->parseMapPool().purgeAll();
  1.2617 +}
  1.2618 +
  1.2619 +static bool
  1.2620 +ShouldPreserveJITCode(JSCompartment *comp, int64_t currentTime)
  1.2621 +{
  1.2622 +    JSRuntime *rt = comp->runtimeFromMainThread();
  1.2623 +    if (rt->gcShouldCleanUpEverything)
  1.2624 +        return false;
  1.2625 +
  1.2626 +    if (rt->alwaysPreserveCode)
  1.2627 +        return true;
  1.2628 +    if (comp->lastAnimationTime + PRMJ_USEC_PER_SEC >= currentTime)
  1.2629 +        return true;
  1.2630 +
  1.2631 +    return false;
  1.2632 +}
  1.2633 +
  1.2634 +#ifdef DEBUG
  1.2635 +class CompartmentCheckTracer : public JSTracer
  1.2636 +{
  1.2637 +  public:
  1.2638 +    CompartmentCheckTracer(JSRuntime *rt, JSTraceCallback callback)
  1.2639 +      : JSTracer(rt, callback)
  1.2640 +    {}
  1.2641 +
  1.2642 +    Cell *src;
  1.2643 +    JSGCTraceKind srcKind;
  1.2644 +    Zone *zone;
  1.2645 +    JSCompartment *compartment;
  1.2646 +};
  1.2647 +
  1.2648 +static bool
  1.2649 +InCrossCompartmentMap(JSObject *src, Cell *dst, JSGCTraceKind dstKind)
  1.2650 +{
  1.2651 +    JSCompartment *srccomp = src->compartment();
  1.2652 +
  1.2653 +    if (dstKind == JSTRACE_OBJECT) {
  1.2654 +        Value key = ObjectValue(*static_cast<JSObject *>(dst));
  1.2655 +        if (WrapperMap::Ptr p = srccomp->lookupWrapper(key)) {
  1.2656 +            if (*p->value().unsafeGet() == ObjectValue(*src))
  1.2657 +                return true;
  1.2658 +        }
  1.2659 +    }
  1.2660 +
  1.2661 +    /*
  1.2662 +     * If the cross-compartment edge is caused by the debugger, then we don't
  1.2663 +     * know the right hashtable key, so we have to iterate.
  1.2664 +     */
  1.2665 +    for (JSCompartment::WrapperEnum e(srccomp); !e.empty(); e.popFront()) {
  1.2666 +        if (e.front().key().wrapped == dst && ToMarkable(e.front().value()) == src)
  1.2667 +            return true;
  1.2668 +    }
  1.2669 +
  1.2670 +    return false;
  1.2671 +}
  1.2672 +
  1.2673 +static void
  1.2674 +CheckCompartment(CompartmentCheckTracer *trc, JSCompartment *thingCompartment,
  1.2675 +                 Cell *thing, JSGCTraceKind kind)
  1.2676 +{
  1.2677 +    JS_ASSERT(thingCompartment == trc->compartment ||
  1.2678 +              trc->runtime()->isAtomsCompartment(thingCompartment) ||
  1.2679 +              (trc->srcKind == JSTRACE_OBJECT &&
  1.2680 +               InCrossCompartmentMap((JSObject *)trc->src, thing, kind)));
  1.2681 +}
  1.2682 +
  1.2683 +static JSCompartment *
  1.2684 +CompartmentOfCell(Cell *thing, JSGCTraceKind kind)
  1.2685 +{
  1.2686 +    if (kind == JSTRACE_OBJECT)
  1.2687 +        return static_cast<JSObject *>(thing)->compartment();
  1.2688 +    else if (kind == JSTRACE_SHAPE)
  1.2689 +        return static_cast<Shape *>(thing)->compartment();
  1.2690 +    else if (kind == JSTRACE_BASE_SHAPE)
  1.2691 +        return static_cast<BaseShape *>(thing)->compartment();
  1.2692 +    else if (kind == JSTRACE_SCRIPT)
  1.2693 +        return static_cast<JSScript *>(thing)->compartment();
  1.2694 +    else
  1.2695 +        return nullptr;
  1.2696 +}
  1.2697 +
  1.2698 +static void
  1.2699 +CheckCompartmentCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind kind)
  1.2700 +{
  1.2701 +    CompartmentCheckTracer *trc = static_cast<CompartmentCheckTracer *>(trcArg);
  1.2702 +    Cell *thing = (Cell *)*thingp;
  1.2703 +
  1.2704 +    JSCompartment *comp = CompartmentOfCell(thing, kind);
  1.2705 +    if (comp && trc->compartment) {
  1.2706 +        CheckCompartment(trc, comp, thing, kind);
  1.2707 +    } else {
  1.2708 +        JS_ASSERT(thing->tenuredZone() == trc->zone ||
  1.2709 +                  trc->runtime()->isAtomsZone(thing->tenuredZone()));
  1.2710 +    }
  1.2711 +}
  1.2712 +
  1.2713 +static void
  1.2714 +CheckForCompartmentMismatches(JSRuntime *rt)
  1.2715 +{
  1.2716 +    if (rt->gcDisableStrictProxyCheckingCount)
  1.2717 +        return;
  1.2718 +
  1.2719 +    CompartmentCheckTracer trc(rt, CheckCompartmentCallback);
  1.2720 +    for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
  1.2721 +        trc.zone = zone;
  1.2722 +        for (size_t thingKind = 0; thingKind < FINALIZE_LAST; thingKind++) {
  1.2723 +            for (CellIterUnderGC i(zone, AllocKind(thingKind)); !i.done(); i.next()) {
  1.2724 +                trc.src = i.getCell();
  1.2725 +                trc.srcKind = MapAllocToTraceKind(AllocKind(thingKind));
  1.2726 +                trc.compartment = CompartmentOfCell(trc.src, trc.srcKind);
  1.2727 +                JS_TraceChildren(&trc, trc.src, trc.srcKind);
  1.2728 +            }
  1.2729 +        }
  1.2730 +    }
  1.2731 +}
  1.2732 +#endif
  1.2733 +
  1.2734 +static bool
  1.2735 +BeginMarkPhase(JSRuntime *rt)
  1.2736 +{
  1.2737 +    int64_t currentTime = PRMJ_Now();
  1.2738 +
  1.2739 +#ifdef DEBUG
  1.2740 +    if (rt->gcFullCompartmentChecks)
  1.2741 +        CheckForCompartmentMismatches(rt);
  1.2742 +#endif
  1.2743 +
  1.2744 +    rt->gcIsFull = true;
  1.2745 +    bool any = false;
  1.2746 +
  1.2747 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.2748 +        /* Assert that zone state is as we expect */
  1.2749 +        JS_ASSERT(!zone->isCollecting());
  1.2750 +        JS_ASSERT(!zone->compartments.empty());
  1.2751 +        for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
  1.2752 +            JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]);
  1.2753 +
  1.2754 +        /* Set up which zones will be collected. */
  1.2755 +        if (zone->isGCScheduled()) {
  1.2756 +            if (!rt->isAtomsZone(zone)) {
  1.2757 +                any = true;
  1.2758 +                zone->setGCState(Zone::Mark);
  1.2759 +            }
  1.2760 +        } else {
  1.2761 +            rt->gcIsFull = false;
  1.2762 +        }
  1.2763 +
  1.2764 +        zone->scheduledForDestruction = false;
  1.2765 +        zone->maybeAlive = false;
  1.2766 +        zone->setPreservingCode(false);
  1.2767 +    }
  1.2768 +
  1.2769 +    for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
  1.2770 +        JS_ASSERT(c->gcLiveArrayBuffers.empty());
  1.2771 +        c->marked = false;
  1.2772 +        if (ShouldPreserveJITCode(c, currentTime))
  1.2773 +            c->zone()->setPreservingCode(true);
  1.2774 +    }
  1.2775 +
  1.2776 +    /*
  1.2777 +     * Atoms are not in the cross-compartment map. So if there are any
  1.2778 +     * zones that are not being collected, we are not allowed to collect
  1.2779 +     * atoms. Otherwise, the non-collected zones could contain pointers
  1.2780 +     * to atoms that we would miss.
  1.2781 +     *
  1.2782 +     * keepAtoms() will only change on the main thread, which we are currently
  1.2783 +     * on. If the value of keepAtoms() changes between GC slices, then we'll
  1.2784 +     * cancel the incremental GC. See IsIncrementalGCSafe.
  1.2785 +     */
  1.2786 +    if (rt->gcIsFull && !rt->keepAtoms()) {
  1.2787 +        Zone *atomsZone = rt->atomsCompartment()->zone();
  1.2788 +        if (atomsZone->isGCScheduled()) {
  1.2789 +            JS_ASSERT(!atomsZone->isCollecting());
  1.2790 +            atomsZone->setGCState(Zone::Mark);
  1.2791 +            any = true;
  1.2792 +        }
  1.2793 +    }
  1.2794 +
  1.2795 +    /* Check that at least one zone is scheduled for collection. */
  1.2796 +    if (!any)
  1.2797 +        return false;
  1.2798 +
  1.2799 +    /*
  1.2800 +     * At the end of each incremental slice, we call prepareForIncrementalGC,
  1.2801 +     * which marks objects in all arenas that we're currently allocating
  1.2802 +     * into. This can cause leaks if unreachable objects are in these
  1.2803 +     * arenas. This purge call ensures that we only mark arenas that have had
  1.2804 +     * allocations after the incremental GC started.
  1.2805 +     */
  1.2806 +    if (rt->gcIsIncremental) {
  1.2807 +        for (GCZonesIter zone(rt); !zone.done(); zone.next())
  1.2808 +            zone->allocator.arenas.purge();
  1.2809 +    }
  1.2810 +
  1.2811 +    rt->gcMarker.start();
  1.2812 +    JS_ASSERT(!rt->gcMarker.callback);
  1.2813 +    JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gcMarker));
  1.2814 +
  1.2815 +    /* For non-incremental GC the following sweep discards the jit code. */
  1.2816 +    if (rt->gcIsIncremental) {
  1.2817 +        for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.2818 +            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK_DISCARD_CODE);
  1.2819 +            zone->discardJitCode(rt->defaultFreeOp());
  1.2820 +        }
  1.2821 +    }
  1.2822 +
  1.2823 +    GCMarker *gcmarker = &rt->gcMarker;
  1.2824 +
  1.2825 +    rt->gcStartNumber = rt->gcNumber;
  1.2826 +
  1.2827 +    /*
  1.2828 +     * We must purge the runtime at the beginning of an incremental GC. The
  1.2829 +     * danger if we purge later is that the snapshot invariant of incremental
  1.2830 +     * GC will be broken, as follows. If some object is reachable only through
  1.2831 +     * some cache (say the dtoaCache) then it will not be part of the snapshot.
  1.2832 +     * If we purge after root marking, then the mutator could obtain a pointer
  1.2833 +     * to the object and start using it. This object might never be marked, so
  1.2834 +     * a GC hazard would exist.
  1.2835 +     */
  1.2836 +    {
  1.2837 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_PURGE);
  1.2838 +        PurgeRuntime(rt);
  1.2839 +    }
  1.2840 +
  1.2841 +    /*
  1.2842 +     * Mark phase.
  1.2843 +     */
  1.2844 +    gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_MARK);
  1.2845 +    gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_MARK_ROOTS);
  1.2846 +
  1.2847 +    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.2848 +        /* Unmark everything in the zones being collected. */
  1.2849 +        zone->allocator.arenas.unmarkAll();
  1.2850 +    }
  1.2851 +
  1.2852 +    for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
  1.2853 +        /* Reset weak map list for the compartments being collected. */
  1.2854 +        WeakMapBase::resetCompartmentWeakMapList(c);
  1.2855 +    }
  1.2856 +
  1.2857 +    if (rt->gcIsFull)
  1.2858 +        UnmarkScriptData(rt);
  1.2859 +
  1.2860 +    MarkRuntime(gcmarker);
  1.2861 +    if (rt->gcIsIncremental)
  1.2862 +        BufferGrayRoots(gcmarker);
  1.2863 +
  1.2864 +    /*
  1.2865 +     * This code ensures that if a zone is "dead", then it will be
  1.2866 +     * collected in this GC. A zone is considered dead if its maybeAlive
  1.2867 +     * flag is false. The maybeAlive flag is set if:
  1.2868 +     *   (1) the zone has incoming cross-compartment edges, or
  1.2869 +     *   (2) an object in the zone was marked during root marking, either
  1.2870 +     *       as a black root or a gray root.
  1.2871 +     * If the maybeAlive is false, then we set the scheduledForDestruction flag.
  1.2872 +     * At any time later in the GC, if we try to mark an object whose
  1.2873 +     * zone is scheduled for destruction, we will assert.
  1.2874 +     * NOTE: Due to bug 811587, we only assert if gcManipulatingDeadCompartments
  1.2875 +     * is true (e.g., if we're doing a brain transplant).
  1.2876 +     *
  1.2877 +     * The purpose of this check is to ensure that a zone that we would
  1.2878 +     * normally destroy is not resurrected by a read barrier or an
  1.2879 +     * allocation. This might happen during a function like JS_TransplantObject,
  1.2880 +     * which iterates over all compartments, live or dead, and operates on their
  1.2881 +     * objects. See bug 803376 for details on this problem. To avoid the
  1.2882 +     * problem, we are very careful to avoid allocation and read barriers during
  1.2883 +     * JS_TransplantObject and the like. The code here ensures that we don't
  1.2884 +     * regress.
  1.2885 +     *
  1.2886 +     * Note that there are certain cases where allocations or read barriers in
  1.2887 +     * dead zone are difficult to avoid. We detect such cases (via the
  1.2888 +     * gcObjectsMarkedInDeadCompartment counter) and redo any ongoing GCs after
  1.2889 +     * the JS_TransplantObject function has finished. This ensures that the dead
  1.2890 +     * zones will be cleaned up. See AutoMarkInDeadZone and
  1.2891 +     * AutoMaybeTouchDeadZones for details.
  1.2892 +     */
  1.2893 +
  1.2894 +    /* Set the maybeAlive flag based on cross-compartment edges. */
  1.2895 +    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
  1.2896 +        for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
  1.2897 +            Cell *dst = e.front().key().wrapped;
  1.2898 +            dst->tenuredZone()->maybeAlive = true;
  1.2899 +        }
  1.2900 +    }
  1.2901 +
  1.2902 +    /*
  1.2903 +     * For black roots, code in gc/Marking.cpp will already have set maybeAlive
  1.2904 +     * during MarkRuntime.
  1.2905 +     */
  1.2906 +
  1.2907 +    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.2908 +        if (!zone->maybeAlive && !rt->isAtomsZone(zone))
  1.2909 +            zone->scheduledForDestruction = true;
  1.2910 +    }
  1.2911 +    rt->gcFoundBlackGrayEdges = false;
  1.2912 +
  1.2913 +    return true;
  1.2914 +}
  1.2915 +
  1.2916 +template <class CompartmentIterT>
  1.2917 +static void
  1.2918 +MarkWeakReferences(JSRuntime *rt, gcstats::Phase phase)
  1.2919 +{
  1.2920 +    GCMarker *gcmarker = &rt->gcMarker;
  1.2921 +    JS_ASSERT(gcmarker->isDrained());
  1.2922 +
  1.2923 +    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK);
  1.2924 +    gcstats::AutoPhase ap1(rt->gcStats, phase);
  1.2925 +
  1.2926 +    for (;;) {
  1.2927 +        bool markedAny = false;
  1.2928 +        for (CompartmentIterT c(rt); !c.done(); c.next()) {
  1.2929 +            markedAny |= WatchpointMap::markCompartmentIteratively(c, gcmarker);
  1.2930 +            markedAny |= WeakMapBase::markCompartmentIteratively(c, gcmarker);
  1.2931 +        }
  1.2932 +        markedAny |= Debugger::markAllIteratively(gcmarker);
  1.2933 +
  1.2934 +        if (!markedAny)
  1.2935 +            break;
  1.2936 +
  1.2937 +        SliceBudget budget;
  1.2938 +        gcmarker->drainMarkStack(budget);
  1.2939 +    }
  1.2940 +    JS_ASSERT(gcmarker->isDrained());
  1.2941 +}
  1.2942 +
  1.2943 +static void
  1.2944 +MarkWeakReferencesInCurrentGroup(JSRuntime *rt, gcstats::Phase phase)
  1.2945 +{
  1.2946 +    MarkWeakReferences<GCCompartmentGroupIter>(rt, phase);
  1.2947 +}
  1.2948 +
  1.2949 +template <class ZoneIterT, class CompartmentIterT>
  1.2950 +static void
  1.2951 +MarkGrayReferences(JSRuntime *rt)
  1.2952 +{
  1.2953 +    GCMarker *gcmarker = &rt->gcMarker;
  1.2954 +
  1.2955 +    {
  1.2956 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK);
  1.2957 +        gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_SWEEP_MARK_GRAY);
  1.2958 +        gcmarker->setMarkColorGray();
  1.2959 +        if (gcmarker->hasBufferedGrayRoots()) {
  1.2960 +            for (ZoneIterT zone(rt); !zone.done(); zone.next())
  1.2961 +                gcmarker->markBufferedGrayRoots(zone);
  1.2962 +        } else {
  1.2963 +            JS_ASSERT(!rt->gcIsIncremental);
  1.2964 +            if (JSTraceDataOp op = rt->gcGrayRootTracer.op)
  1.2965 +                (*op)(gcmarker, rt->gcGrayRootTracer.data);
  1.2966 +        }
  1.2967 +        SliceBudget budget;
  1.2968 +        gcmarker->drainMarkStack(budget);
  1.2969 +    }
  1.2970 +
  1.2971 +    MarkWeakReferences<CompartmentIterT>(rt, gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
  1.2972 +
  1.2973 +    JS_ASSERT(gcmarker->isDrained());
  1.2974 +
  1.2975 +    gcmarker->setMarkColorBlack();
  1.2976 +}
  1.2977 +
  1.2978 +static void
  1.2979 +MarkGrayReferencesInCurrentGroup(JSRuntime *rt)
  1.2980 +{
  1.2981 +    MarkGrayReferences<GCZoneGroupIter, GCCompartmentGroupIter>(rt);
  1.2982 +}
  1.2983 +
  1.2984 +#ifdef DEBUG
  1.2985 +
  1.2986 +static void
  1.2987 +MarkAllWeakReferences(JSRuntime *rt, gcstats::Phase phase)
  1.2988 +{
  1.2989 +    MarkWeakReferences<GCCompartmentsIter>(rt, phase);
  1.2990 +}
  1.2991 +
  1.2992 +static void
  1.2993 +MarkAllGrayReferences(JSRuntime *rt)
  1.2994 +{
  1.2995 +    MarkGrayReferences<GCZonesIter, GCCompartmentsIter>(rt);
  1.2996 +}
  1.2997 +
  1.2998 +class js::gc::MarkingValidator
  1.2999 +{
  1.3000 +  public:
  1.3001 +    MarkingValidator(JSRuntime *rt);
  1.3002 +    ~MarkingValidator();
  1.3003 +    void nonIncrementalMark();
  1.3004 +    void validate();
  1.3005 +
  1.3006 +  private:
  1.3007 +    JSRuntime *runtime;
  1.3008 +    bool initialized;
  1.3009 +
  1.3010 +    typedef HashMap<Chunk *, ChunkBitmap *, GCChunkHasher, SystemAllocPolicy> BitmapMap;
  1.3011 +    BitmapMap map;
  1.3012 +};
  1.3013 +
  1.3014 +js::gc::MarkingValidator::MarkingValidator(JSRuntime *rt)
  1.3015 +  : runtime(rt),
  1.3016 +    initialized(false)
  1.3017 +{}
  1.3018 +
  1.3019 +js::gc::MarkingValidator::~MarkingValidator()
  1.3020 +{
  1.3021 +    if (!map.initialized())
  1.3022 +        return;
  1.3023 +
  1.3024 +    for (BitmapMap::Range r(map.all()); !r.empty(); r.popFront())
  1.3025 +        js_delete(r.front().value());
  1.3026 +}
  1.3027 +
  1.3028 +void
  1.3029 +js::gc::MarkingValidator::nonIncrementalMark()
  1.3030 +{
  1.3031 +    /*
  1.3032 +     * Perform a non-incremental mark for all collecting zones and record
  1.3033 +     * the results for later comparison.
  1.3034 +     *
  1.3035 +     * Currently this does not validate gray marking.
  1.3036 +     */
  1.3037 +
  1.3038 +    if (!map.init())
  1.3039 +        return;
  1.3040 +
  1.3041 +    GCMarker *gcmarker = &runtime->gcMarker;
  1.3042 +
  1.3043 +    /* Save existing mark bits. */
  1.3044 +    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) {
  1.3045 +        ChunkBitmap *bitmap = &r.front()->bitmap;
  1.3046 +	ChunkBitmap *entry = js_new<ChunkBitmap>();
  1.3047 +        if (!entry)
  1.3048 +            return;
  1.3049 +
  1.3050 +        memcpy((void *)entry->bitmap, (void *)bitmap->bitmap, sizeof(bitmap->bitmap));
  1.3051 +        if (!map.putNew(r.front(), entry))
  1.3052 +            return;
  1.3053 +    }
  1.3054 +
  1.3055 +    /*
  1.3056 +     * Temporarily clear the lists of live weakmaps and array buffers for the
  1.3057 +     * compartments we are collecting.
  1.3058 +     */
  1.3059 +
  1.3060 +    WeakMapVector weakmaps;
  1.3061 +    ArrayBufferVector arrayBuffers;
  1.3062 +    for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
  1.3063 +        if (!WeakMapBase::saveCompartmentWeakMapList(c, weakmaps) ||
  1.3064 +            !ArrayBufferObject::saveArrayBufferList(c, arrayBuffers))
  1.3065 +        {
  1.3066 +            return;
  1.3067 +        }
  1.3068 +    }
  1.3069 +
  1.3070 +    /*
  1.3071 +     * After this point, the function should run to completion, so we shouldn't
  1.3072 +     * do anything fallible.
  1.3073 +     */
  1.3074 +    initialized = true;
  1.3075 +
  1.3076 +    for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
  1.3077 +        WeakMapBase::resetCompartmentWeakMapList(c);
  1.3078 +        ArrayBufferObject::resetArrayBufferList(c);
  1.3079 +    }
  1.3080 +
  1.3081 +    /* Re-do all the marking, but non-incrementally. */
  1.3082 +    js::gc::State state = runtime->gcIncrementalState;
  1.3083 +    runtime->gcIncrementalState = MARK_ROOTS;
  1.3084 +
  1.3085 +    JS_ASSERT(gcmarker->isDrained());
  1.3086 +    gcmarker->reset();
  1.3087 +
  1.3088 +    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront())
  1.3089 +        r.front()->bitmap.clear();
  1.3090 +
  1.3091 +    {
  1.3092 +        gcstats::AutoPhase ap1(runtime->gcStats, gcstats::PHASE_MARK);
  1.3093 +        gcstats::AutoPhase ap2(runtime->gcStats, gcstats::PHASE_MARK_ROOTS);
  1.3094 +        MarkRuntime(gcmarker, true);
  1.3095 +    }
  1.3096 +
  1.3097 +    {
  1.3098 +        gcstats::AutoPhase ap1(runtime->gcStats, gcstats::PHASE_MARK);
  1.3099 +        SliceBudget budget;
  1.3100 +        runtime->gcIncrementalState = MARK;
  1.3101 +        runtime->gcMarker.drainMarkStack(budget);
  1.3102 +    }
  1.3103 +
  1.3104 +    runtime->gcIncrementalState = SWEEP;
  1.3105 +    {
  1.3106 +        gcstats::AutoPhase ap(runtime->gcStats, gcstats::PHASE_SWEEP);
  1.3107 +        MarkAllWeakReferences(runtime, gcstats::PHASE_SWEEP_MARK_WEAK);
  1.3108 +
  1.3109 +        /* Update zone state for gray marking. */
  1.3110 +        for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
  1.3111 +            JS_ASSERT(zone->isGCMarkingBlack());
  1.3112 +            zone->setGCState(Zone::MarkGray);
  1.3113 +        }
  1.3114 +
  1.3115 +        MarkAllGrayReferences(runtime);
  1.3116 +
  1.3117 +        /* Restore zone state. */
  1.3118 +        for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
  1.3119 +            JS_ASSERT(zone->isGCMarkingGray());
  1.3120 +            zone->setGCState(Zone::Mark);
  1.3121 +        }
  1.3122 +    }
  1.3123 +
  1.3124 +    /* Take a copy of the non-incremental mark state and restore the original. */
  1.3125 +    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) {
  1.3126 +        Chunk *chunk = r.front();
  1.3127 +        ChunkBitmap *bitmap = &chunk->bitmap;
  1.3128 +        ChunkBitmap *entry = map.lookup(chunk)->value();
  1.3129 +        Swap(*entry, *bitmap);
  1.3130 +    }
  1.3131 +
  1.3132 +    for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
  1.3133 +        WeakMapBase::resetCompartmentWeakMapList(c);
  1.3134 +        ArrayBufferObject::resetArrayBufferList(c);
  1.3135 +    }
  1.3136 +    WeakMapBase::restoreCompartmentWeakMapLists(weakmaps);
  1.3137 +    ArrayBufferObject::restoreArrayBufferLists(arrayBuffers);
  1.3138 +
  1.3139 +    runtime->gcIncrementalState = state;
  1.3140 +}
  1.3141 +
  1.3142 +void
  1.3143 +js::gc::MarkingValidator::validate()
  1.3144 +{
  1.3145 +    /*
  1.3146 +     * Validates the incremental marking for a single compartment by comparing
  1.3147 +     * the mark bits to those previously recorded for a non-incremental mark.
  1.3148 +     */
  1.3149 +
  1.3150 +    if (!initialized)
  1.3151 +        return;
  1.3152 +
  1.3153 +    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) {
  1.3154 +        Chunk *chunk = r.front();
  1.3155 +        BitmapMap::Ptr ptr = map.lookup(chunk);
  1.3156 +        if (!ptr)
  1.3157 +            continue;  /* Allocated after we did the non-incremental mark. */
  1.3158 +
  1.3159 +        ChunkBitmap *bitmap = ptr->value();
  1.3160 +        ChunkBitmap *incBitmap = &chunk->bitmap;
  1.3161 +
  1.3162 +        for (size_t i = 0; i < ArenasPerChunk; i++) {
  1.3163 +            if (chunk->decommittedArenas.get(i))
  1.3164 +                continue;
  1.3165 +            Arena *arena = &chunk->arenas[i];
  1.3166 +            if (!arena->aheader.allocated())
  1.3167 +                continue;
  1.3168 +            if (!arena->aheader.zone->isGCSweeping())
  1.3169 +                continue;
  1.3170 +            if (arena->aheader.allocatedDuringIncremental)
  1.3171 +                continue;
  1.3172 +
  1.3173 +            AllocKind kind = arena->aheader.getAllocKind();
  1.3174 +            uintptr_t thing = arena->thingsStart(kind);
  1.3175 +            uintptr_t end = arena->thingsEnd();
  1.3176 +            while (thing < end) {
  1.3177 +                Cell *cell = (Cell *)thing;
  1.3178 +
  1.3179 +                /*
  1.3180 +                 * If a non-incremental GC wouldn't have collected a cell, then
  1.3181 +                 * an incremental GC won't collect it.
  1.3182 +                 */
  1.3183 +                JS_ASSERT_IF(bitmap->isMarked(cell, BLACK), incBitmap->isMarked(cell, BLACK));
  1.3184 +
  1.3185 +                /*
  1.3186 +                 * If the cycle collector isn't allowed to collect an object
  1.3187 +                 * after a non-incremental GC has run, then it isn't allowed to
  1.3188 +                 * collected it after an incremental GC.
  1.3189 +                 */
  1.3190 +                JS_ASSERT_IF(!bitmap->isMarked(cell, GRAY), !incBitmap->isMarked(cell, GRAY));
  1.3191 +
  1.3192 +                thing += Arena::thingSize(kind);
  1.3193 +            }
  1.3194 +        }
  1.3195 +    }
  1.3196 +}
  1.3197 +
  1.3198 +#endif
  1.3199 +
  1.3200 +static void
  1.3201 +ComputeNonIncrementalMarkingForValidation(JSRuntime *rt)
  1.3202 +{
  1.3203 +#ifdef DEBUG
  1.3204 +    JS_ASSERT(!rt->gcMarkingValidator);
  1.3205 +    if (rt->gcIsIncremental && rt->gcValidate)
  1.3206 +        rt->gcMarkingValidator = js_new<MarkingValidator>(rt);
  1.3207 +    if (rt->gcMarkingValidator)
  1.3208 +        rt->gcMarkingValidator->nonIncrementalMark();
  1.3209 +#endif
  1.3210 +}
  1.3211 +
  1.3212 +static void
  1.3213 +ValidateIncrementalMarking(JSRuntime *rt)
  1.3214 +{
  1.3215 +#ifdef DEBUG
  1.3216 +    if (rt->gcMarkingValidator)
  1.3217 +        rt->gcMarkingValidator->validate();
  1.3218 +#endif
  1.3219 +}
  1.3220 +
  1.3221 +static void
  1.3222 +FinishMarkingValidation(JSRuntime *rt)
  1.3223 +{
  1.3224 +#ifdef DEBUG
  1.3225 +    js_delete(rt->gcMarkingValidator);
  1.3226 +    rt->gcMarkingValidator = nullptr;
  1.3227 +#endif
  1.3228 +}
  1.3229 +
  1.3230 +static void
  1.3231 +AssertNeedsBarrierFlagsConsistent(JSRuntime *rt)
  1.3232 +{
  1.3233 +#ifdef DEBUG
  1.3234 +    bool anyNeedsBarrier = false;
  1.3235 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
  1.3236 +        anyNeedsBarrier |= zone->needsBarrier();
  1.3237 +    JS_ASSERT(rt->needsBarrier() == anyNeedsBarrier);
  1.3238 +#endif
  1.3239 +}
  1.3240 +
  1.3241 +static void
  1.3242 +DropStringWrappers(JSRuntime *rt)
  1.3243 +{
  1.3244 +    /*
  1.3245 +     * String "wrappers" are dropped on GC because their presence would require
  1.3246 +     * us to sweep the wrappers in all compartments every time we sweep a
  1.3247 +     * compartment group.
  1.3248 +     */
  1.3249 +    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
  1.3250 +        for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
  1.3251 +            if (e.front().key().kind == CrossCompartmentKey::StringWrapper)
  1.3252 +                e.removeFront();
  1.3253 +        }
  1.3254 +    }
  1.3255 +}
  1.3256 +
  1.3257 +/*
  1.3258 + * Group zones that must be swept at the same time.
  1.3259 + *
  1.3260 + * If compartment A has an edge to an unmarked object in compartment B, then we
  1.3261 + * must not sweep A in a later slice than we sweep B. That's because a write
  1.3262 + * barrier in A that could lead to the unmarked object in B becoming
  1.3263 + * marked. However, if we had already swept that object, we would be in trouble.
  1.3264 + *
  1.3265 + * If we consider these dependencies as a graph, then all the compartments in
  1.3266 + * any strongly-connected component of this graph must be swept in the same
  1.3267 + * slice.
  1.3268 + *
  1.3269 + * Tarjan's algorithm is used to calculate the components.
  1.3270 + */
  1.3271 +
  1.3272 +void
  1.3273 +JSCompartment::findOutgoingEdges(ComponentFinder<JS::Zone> &finder)
  1.3274 +{
  1.3275 +    for (js::WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
  1.3276 +        CrossCompartmentKey::Kind kind = e.front().key().kind;
  1.3277 +        JS_ASSERT(kind != CrossCompartmentKey::StringWrapper);
  1.3278 +        Cell *other = e.front().key().wrapped;
  1.3279 +        if (kind == CrossCompartmentKey::ObjectWrapper) {
  1.3280 +            /*
  1.3281 +             * Add edge to wrapped object compartment if wrapped object is not
  1.3282 +             * marked black to indicate that wrapper compartment not be swept
  1.3283 +             * after wrapped compartment.
  1.3284 +             */
  1.3285 +            if (!other->isMarked(BLACK) || other->isMarked(GRAY)) {
  1.3286 +                JS::Zone *w = other->tenuredZone();
  1.3287 +                if (w->isGCMarking())
  1.3288 +                    finder.addEdgeTo(w);
  1.3289 +            }
  1.3290 +        } else {
  1.3291 +            JS_ASSERT(kind == CrossCompartmentKey::DebuggerScript ||
  1.3292 +                      kind == CrossCompartmentKey::DebuggerSource ||
  1.3293 +                      kind == CrossCompartmentKey::DebuggerObject ||
  1.3294 +                      kind == CrossCompartmentKey::DebuggerEnvironment);
  1.3295 +            /*
  1.3296 +             * Add edge for debugger object wrappers, to ensure (in conjuction
  1.3297 +             * with call to Debugger::findCompartmentEdges below) that debugger
  1.3298 +             * and debuggee objects are always swept in the same group.
  1.3299 +             */
  1.3300 +            JS::Zone *w = other->tenuredZone();
  1.3301 +            if (w->isGCMarking())
  1.3302 +                finder.addEdgeTo(w);
  1.3303 +        }
  1.3304 +    }
  1.3305 +
  1.3306 +    Debugger::findCompartmentEdges(zone(), finder);
  1.3307 +}
  1.3308 +
  1.3309 +void
  1.3310 +Zone::findOutgoingEdges(ComponentFinder<JS::Zone> &finder)
  1.3311 +{
  1.3312 +    /*
  1.3313 +     * Any compartment may have a pointer to an atom in the atoms
  1.3314 +     * compartment, and these aren't in the cross compartment map.
  1.3315 +     */
  1.3316 +    JSRuntime *rt = runtimeFromMainThread();
  1.3317 +    if (rt->atomsCompartment()->zone()->isGCMarking())
  1.3318 +        finder.addEdgeTo(rt->atomsCompartment()->zone());
  1.3319 +
  1.3320 +    for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
  1.3321 +        comp->findOutgoingEdges(finder);
  1.3322 +}
  1.3323 +
  1.3324 +static void
  1.3325 +FindZoneGroups(JSRuntime *rt)
  1.3326 +{
  1.3327 +    ComponentFinder<Zone> finder(rt->mainThread.nativeStackLimit[StackForSystemCode]);
  1.3328 +    if (!rt->gcIsIncremental)
  1.3329 +        finder.useOneComponent();
  1.3330 +
  1.3331 +    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.3332 +        JS_ASSERT(zone->isGCMarking());
  1.3333 +        finder.addNode(zone);
  1.3334 +    }
  1.3335 +    rt->gcZoneGroups = finder.getResultsList();
  1.3336 +    rt->gcCurrentZoneGroup = rt->gcZoneGroups;
  1.3337 +    rt->gcZoneGroupIndex = 0;
  1.3338 +    JS_ASSERT_IF(!rt->gcIsIncremental, !rt->gcCurrentZoneGroup->nextGroup());
  1.3339 +}
  1.3340 +
  1.3341 +static void
  1.3342 +ResetGrayList(JSCompartment* comp);
  1.3343 +
  1.3344 +static void
  1.3345 +GetNextZoneGroup(JSRuntime *rt)
  1.3346 +{
  1.3347 +    rt->gcCurrentZoneGroup = rt->gcCurrentZoneGroup->nextGroup();
  1.3348 +    ++rt->gcZoneGroupIndex;
  1.3349 +    if (!rt->gcCurrentZoneGroup) {
  1.3350 +        rt->gcAbortSweepAfterCurrentGroup = false;
  1.3351 +        return;
  1.3352 +    }
  1.3353 +
  1.3354 +    if (!rt->gcIsIncremental)
  1.3355 +        ComponentFinder<Zone>::mergeGroups(rt->gcCurrentZoneGroup);
  1.3356 +
  1.3357 +    if (rt->gcAbortSweepAfterCurrentGroup) {
  1.3358 +        JS_ASSERT(!rt->gcIsIncremental);
  1.3359 +        for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3360 +            JS_ASSERT(!zone->gcNextGraphComponent);
  1.3361 +            JS_ASSERT(zone->isGCMarking());
  1.3362 +            zone->setNeedsBarrier(false, Zone::UpdateIon);
  1.3363 +            zone->setGCState(Zone::NoGC);
  1.3364 +            zone->gcGrayRoots.clearAndFree();
  1.3365 +        }
  1.3366 +        rt->setNeedsBarrier(false);
  1.3367 +        AssertNeedsBarrierFlagsConsistent(rt);
  1.3368 +
  1.3369 +        for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next()) {
  1.3370 +            ArrayBufferObject::resetArrayBufferList(comp);
  1.3371 +            ResetGrayList(comp);
  1.3372 +        }
  1.3373 +
  1.3374 +        rt->gcAbortSweepAfterCurrentGroup = false;
  1.3375 +        rt->gcCurrentZoneGroup = nullptr;
  1.3376 +    }
  1.3377 +}
  1.3378 +
  1.3379 +/*
  1.3380 + * Gray marking:
  1.3381 + *
  1.3382 + * At the end of collection, anything reachable from a gray root that has not
  1.3383 + * otherwise been marked black must be marked gray.
  1.3384 + *
  1.3385 + * This means that when marking things gray we must not allow marking to leave
  1.3386 + * the current compartment group, as that could result in things being marked
  1.3387 + * grey when they might subsequently be marked black.  To achieve this, when we
  1.3388 + * find a cross compartment pointer we don't mark the referent but add it to a
  1.3389 + * singly-linked list of incoming gray pointers that is stored with each
  1.3390 + * compartment.
  1.3391 + *
  1.3392 + * The list head is stored in JSCompartment::gcIncomingGrayPointers and contains
  1.3393 + * cross compartment wrapper objects. The next pointer is stored in the second
  1.3394 + * extra slot of the cross compartment wrapper.
  1.3395 + *
  1.3396 + * The list is created during gray marking when one of the
  1.3397 + * MarkCrossCompartmentXXX functions is called for a pointer that leaves the
  1.3398 + * current compartent group.  This calls DelayCrossCompartmentGrayMarking to
  1.3399 + * push the referring object onto the list.
  1.3400 + *
  1.3401 + * The list is traversed and then unlinked in
  1.3402 + * MarkIncomingCrossCompartmentPointers.
  1.3403 + */
  1.3404 +
  1.3405 +static bool
  1.3406 +IsGrayListObject(JSObject *obj)
  1.3407 +{
  1.3408 +    JS_ASSERT(obj);
  1.3409 +    return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
  1.3410 +}
  1.3411 +
  1.3412 +/* static */ unsigned
  1.3413 +ProxyObject::grayLinkSlot(JSObject *obj)
  1.3414 +{
  1.3415 +    JS_ASSERT(IsGrayListObject(obj));
  1.3416 +    return ProxyObject::EXTRA_SLOT + 1;
  1.3417 +}
  1.3418 +
  1.3419 +#ifdef DEBUG
  1.3420 +static void
  1.3421 +AssertNotOnGrayList(JSObject *obj)
  1.3422 +{
  1.3423 +    JS_ASSERT_IF(IsGrayListObject(obj),
  1.3424 +                 obj->getReservedSlot(ProxyObject::grayLinkSlot(obj)).isUndefined());
  1.3425 +}
  1.3426 +#endif
  1.3427 +
  1.3428 +static JSObject *
  1.3429 +CrossCompartmentPointerReferent(JSObject *obj)
  1.3430 +{
  1.3431 +    JS_ASSERT(IsGrayListObject(obj));
  1.3432 +    return &obj->as<ProxyObject>().private_().toObject();
  1.3433 +}
  1.3434 +
  1.3435 +static JSObject *
  1.3436 +NextIncomingCrossCompartmentPointer(JSObject *prev, bool unlink)
  1.3437 +{
  1.3438 +    unsigned slot = ProxyObject::grayLinkSlot(prev);
  1.3439 +    JSObject *next = prev->getReservedSlot(slot).toObjectOrNull();
  1.3440 +    JS_ASSERT_IF(next, IsGrayListObject(next));
  1.3441 +
  1.3442 +    if (unlink)
  1.3443 +        prev->setSlot(slot, UndefinedValue());
  1.3444 +
  1.3445 +    return next;
  1.3446 +}
  1.3447 +
  1.3448 +void
  1.3449 +js::DelayCrossCompartmentGrayMarking(JSObject *src)
  1.3450 +{
  1.3451 +    JS_ASSERT(IsGrayListObject(src));
  1.3452 +
  1.3453 +    /* Called from MarkCrossCompartmentXXX functions. */
  1.3454 +    unsigned slot = ProxyObject::grayLinkSlot(src);
  1.3455 +    JSObject *dest = CrossCompartmentPointerReferent(src);
  1.3456 +    JSCompartment *comp = dest->compartment();
  1.3457 +
  1.3458 +    if (src->getReservedSlot(slot).isUndefined()) {
  1.3459 +        src->setCrossCompartmentSlot(slot, ObjectOrNullValue(comp->gcIncomingGrayPointers));
  1.3460 +        comp->gcIncomingGrayPointers = src;
  1.3461 +    } else {
  1.3462 +        JS_ASSERT(src->getReservedSlot(slot).isObjectOrNull());
  1.3463 +    }
  1.3464 +
  1.3465 +#ifdef DEBUG
  1.3466 +    /*
  1.3467 +     * Assert that the object is in our list, also walking the list to check its
  1.3468 +     * integrity.
  1.3469 +     */
  1.3470 +    JSObject *obj = comp->gcIncomingGrayPointers;
  1.3471 +    bool found = false;
  1.3472 +    while (obj) {
  1.3473 +        if (obj == src)
  1.3474 +            found = true;
  1.3475 +        obj = NextIncomingCrossCompartmentPointer(obj, false);
  1.3476 +    }
  1.3477 +    JS_ASSERT(found);
  1.3478 +#endif
  1.3479 +}
  1.3480 +
  1.3481 +static void
  1.3482 +MarkIncomingCrossCompartmentPointers(JSRuntime *rt, const uint32_t color)
  1.3483 +{
  1.3484 +    JS_ASSERT(color == BLACK || color == GRAY);
  1.3485 +
  1.3486 +    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK);
  1.3487 +    static const gcstats::Phase statsPhases[] = {
  1.3488 +        gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK,
  1.3489 +        gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY
  1.3490 +    };
  1.3491 +    gcstats::AutoPhase ap1(rt->gcStats, statsPhases[color]);
  1.3492 +
  1.3493 +    bool unlinkList = color == GRAY;
  1.3494 +
  1.3495 +    for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
  1.3496 +        JS_ASSERT_IF(color == GRAY, c->zone()->isGCMarkingGray());
  1.3497 +        JS_ASSERT_IF(color == BLACK, c->zone()->isGCMarkingBlack());
  1.3498 +        JS_ASSERT_IF(c->gcIncomingGrayPointers, IsGrayListObject(c->gcIncomingGrayPointers));
  1.3499 +
  1.3500 +        for (JSObject *src = c->gcIncomingGrayPointers;
  1.3501 +             src;
  1.3502 +             src = NextIncomingCrossCompartmentPointer(src, unlinkList))
  1.3503 +        {
  1.3504 +            JSObject *dst = CrossCompartmentPointerReferent(src);
  1.3505 +            JS_ASSERT(dst->compartment() == c);
  1.3506 +
  1.3507 +            if (color == GRAY) {
  1.3508 +                if (IsObjectMarked(&src) && src->isMarked(GRAY))
  1.3509 +                    MarkGCThingUnbarriered(&rt->gcMarker, (void**)&dst,
  1.3510 +                                           "cross-compartment gray pointer");
  1.3511 +            } else {
  1.3512 +                if (IsObjectMarked(&src) && !src->isMarked(GRAY))
  1.3513 +                    MarkGCThingUnbarriered(&rt->gcMarker, (void**)&dst,
  1.3514 +                                           "cross-compartment black pointer");
  1.3515 +            }
  1.3516 +        }
  1.3517 +
  1.3518 +        if (unlinkList)
  1.3519 +            c->gcIncomingGrayPointers = nullptr;
  1.3520 +    }
  1.3521 +
  1.3522 +    SliceBudget budget;
  1.3523 +    rt->gcMarker.drainMarkStack(budget);
  1.3524 +}
  1.3525 +
  1.3526 +static bool
  1.3527 +RemoveFromGrayList(JSObject *wrapper)
  1.3528 +{
  1.3529 +    if (!IsGrayListObject(wrapper))
  1.3530 +        return false;
  1.3531 +
  1.3532 +    unsigned slot = ProxyObject::grayLinkSlot(wrapper);
  1.3533 +    if (wrapper->getReservedSlot(slot).isUndefined())
  1.3534 +        return false;  /* Not on our list. */
  1.3535 +
  1.3536 +    JSObject *tail = wrapper->getReservedSlot(slot).toObjectOrNull();
  1.3537 +    wrapper->setReservedSlot(slot, UndefinedValue());
  1.3538 +
  1.3539 +    JSCompartment *comp = CrossCompartmentPointerReferent(wrapper)->compartment();
  1.3540 +    JSObject *obj = comp->gcIncomingGrayPointers;
  1.3541 +    if (obj == wrapper) {
  1.3542 +        comp->gcIncomingGrayPointers = tail;
  1.3543 +        return true;
  1.3544 +    }
  1.3545 +
  1.3546 +    while (obj) {
  1.3547 +        unsigned slot = ProxyObject::grayLinkSlot(obj);
  1.3548 +        JSObject *next = obj->getReservedSlot(slot).toObjectOrNull();
  1.3549 +        if (next == wrapper) {
  1.3550 +            obj->setCrossCompartmentSlot(slot, ObjectOrNullValue(tail));
  1.3551 +            return true;
  1.3552 +        }
  1.3553 +        obj = next;
  1.3554 +    }
  1.3555 +
  1.3556 +    MOZ_ASSUME_UNREACHABLE("object not found in gray link list");
  1.3557 +}
  1.3558 +
  1.3559 +static void
  1.3560 +ResetGrayList(JSCompartment *comp)
  1.3561 +{
  1.3562 +    JSObject *src = comp->gcIncomingGrayPointers;
  1.3563 +    while (src)
  1.3564 +        src = NextIncomingCrossCompartmentPointer(src, true);
  1.3565 +    comp->gcIncomingGrayPointers = nullptr;
  1.3566 +}
  1.3567 +
  1.3568 +void
  1.3569 +js::NotifyGCNukeWrapper(JSObject *obj)
  1.3570 +{
  1.3571 +    /*
  1.3572 +     * References to target of wrapper are being removed, we no longer have to
  1.3573 +     * remember to mark it.
  1.3574 +     */
  1.3575 +    RemoveFromGrayList(obj);
  1.3576 +}
  1.3577 +
  1.3578 +enum {
  1.3579 +    JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0,
  1.3580 +    JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1
  1.3581 +};
  1.3582 +
  1.3583 +unsigned
  1.3584 +js::NotifyGCPreSwap(JSObject *a, JSObject *b)
  1.3585 +{
  1.3586 +    /*
  1.3587 +     * Two objects in the same compartment are about to have had their contents
  1.3588 +     * swapped.  If either of them are in our gray pointer list, then we remove
  1.3589 +     * them from the lists, returning a bitset indicating what happened.
  1.3590 +     */
  1.3591 +    return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) |
  1.3592 +           (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0);
  1.3593 +}
  1.3594 +
  1.3595 +void
  1.3596 +js::NotifyGCPostSwap(JSObject *a, JSObject *b, unsigned removedFlags)
  1.3597 +{
  1.3598 +    /*
  1.3599 +     * Two objects in the same compartment have had their contents swapped.  If
  1.3600 +     * either of them were in our gray pointer list, we re-add them again.
  1.3601 +     */
  1.3602 +    if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED)
  1.3603 +        DelayCrossCompartmentGrayMarking(b);
  1.3604 +    if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED)
  1.3605 +        DelayCrossCompartmentGrayMarking(a);
  1.3606 +}
  1.3607 +
  1.3608 +static void
  1.3609 +EndMarkingZoneGroup(JSRuntime *rt)
  1.3610 +{
  1.3611 +    /*
  1.3612 +     * Mark any incoming black pointers from previously swept compartments
  1.3613 +     * whose referents are not marked. This can occur when gray cells become
  1.3614 +     * black by the action of UnmarkGray.
  1.3615 +     */
  1.3616 +    MarkIncomingCrossCompartmentPointers(rt, BLACK);
  1.3617 +
  1.3618 +    MarkWeakReferencesInCurrentGroup(rt, gcstats::PHASE_SWEEP_MARK_WEAK);
  1.3619 +
  1.3620 +    /*
  1.3621 +     * Change state of current group to MarkGray to restrict marking to this
  1.3622 +     * group.  Note that there may be pointers to the atoms compartment, and
  1.3623 +     * these will be marked through, as they are not marked with
  1.3624 +     * MarkCrossCompartmentXXX.
  1.3625 +     */
  1.3626 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3627 +        JS_ASSERT(zone->isGCMarkingBlack());
  1.3628 +        zone->setGCState(Zone::MarkGray);
  1.3629 +    }
  1.3630 +
  1.3631 +    /* Mark incoming gray pointers from previously swept compartments. */
  1.3632 +    rt->gcMarker.setMarkColorGray();
  1.3633 +    MarkIncomingCrossCompartmentPointers(rt, GRAY);
  1.3634 +    rt->gcMarker.setMarkColorBlack();
  1.3635 +
  1.3636 +    /* Mark gray roots and mark transitively inside the current compartment group. */
  1.3637 +    MarkGrayReferencesInCurrentGroup(rt);
  1.3638 +
  1.3639 +    /* Restore marking state. */
  1.3640 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3641 +        JS_ASSERT(zone->isGCMarkingGray());
  1.3642 +        zone->setGCState(Zone::Mark);
  1.3643 +    }
  1.3644 +
  1.3645 +    JS_ASSERT(rt->gcMarker.isDrained());
  1.3646 +}
  1.3647 +
  1.3648 +static void
  1.3649 +BeginSweepingZoneGroup(JSRuntime *rt)
  1.3650 +{
  1.3651 +    /*
  1.3652 +     * Begin sweeping the group of zones in gcCurrentZoneGroup,
  1.3653 +     * performing actions that must be done before yielding to caller.
  1.3654 +     */
  1.3655 +
  1.3656 +    bool sweepingAtoms = false;
  1.3657 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3658 +        /* Set the GC state to sweeping. */
  1.3659 +        JS_ASSERT(zone->isGCMarking());
  1.3660 +        zone->setGCState(Zone::Sweep);
  1.3661 +
  1.3662 +        /* Purge the ArenaLists before sweeping. */
  1.3663 +        zone->allocator.arenas.purge();
  1.3664 +
  1.3665 +        if (rt->isAtomsZone(zone))
  1.3666 +            sweepingAtoms = true;
  1.3667 +
  1.3668 +        if (rt->sweepZoneCallback)
  1.3669 +            rt->sweepZoneCallback(zone);
  1.3670 +    }
  1.3671 +
  1.3672 +    ValidateIncrementalMarking(rt);
  1.3673 +
  1.3674 +    FreeOp fop(rt, rt->gcSweepOnBackgroundThread);
  1.3675 +
  1.3676 +    {
  1.3677 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START);
  1.3678 +        if (rt->gcFinalizeCallback)
  1.3679 +            rt->gcFinalizeCallback(&fop, JSFINALIZE_GROUP_START, !rt->gcIsFull /* unused */);
  1.3680 +    }
  1.3681 +
  1.3682 +    if (sweepingAtoms) {
  1.3683 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_ATOMS);
  1.3684 +        rt->sweepAtoms();
  1.3685 +    }
  1.3686 +
  1.3687 +    /* Prune out dead views from ArrayBuffer's view lists. */
  1.3688 +    for (GCCompartmentGroupIter c(rt); !c.done(); c.next())
  1.3689 +        ArrayBufferObject::sweep(c);
  1.3690 +
  1.3691 +    /* Collect watch points associated with unreachable objects. */
  1.3692 +    WatchpointMap::sweepAll(rt);
  1.3693 +
  1.3694 +    /* Detach unreachable debuggers and global objects from each other. */
  1.3695 +    Debugger::sweepAll(&fop);
  1.3696 +
  1.3697 +    {
  1.3698 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_COMPARTMENTS);
  1.3699 +
  1.3700 +        for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3701 +            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_DISCARD_CODE);
  1.3702 +            zone->discardJitCode(&fop);
  1.3703 +        }
  1.3704 +
  1.3705 +        bool releaseTypes = ReleaseObservedTypes(rt);
  1.3706 +        for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
  1.3707 +            gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3708 +            c->sweep(&fop, releaseTypes && !c->zone()->isPreservingCode());
  1.3709 +        }
  1.3710 +
  1.3711 +        for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3712 +            gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3713 +
  1.3714 +            // If there is an OOM while sweeping types, the type information
  1.3715 +            // will be deoptimized so that it is still correct (i.e.
  1.3716 +            // overapproximates the possible types in the zone), but the
  1.3717 +            // constraints might not have been triggered on the deoptimization
  1.3718 +            // or even copied over completely. In this case, destroy all JIT
  1.3719 +            // code and new script addendums in the zone, the only things whose
  1.3720 +            // correctness depends on the type constraints.
  1.3721 +            bool oom = false;
  1.3722 +            zone->sweep(&fop, releaseTypes && !zone->isPreservingCode(), &oom);
  1.3723 +
  1.3724 +            if (oom) {
  1.3725 +                zone->setPreservingCode(false);
  1.3726 +                zone->discardJitCode(&fop);
  1.3727 +                zone->types.clearAllNewScriptAddendumsOnOOM();
  1.3728 +            }
  1.3729 +        }
  1.3730 +    }
  1.3731 +
  1.3732 +    /*
  1.3733 +     * Queue all GC things in all zones for sweeping, either in the
  1.3734 +     * foreground or on the background thread.
  1.3735 +     *
  1.3736 +     * Note that order is important here for the background case.
  1.3737 +     *
  1.3738 +     * Objects are finalized immediately but this may change in the future.
  1.3739 +     */
  1.3740 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3741 +        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3742 +        zone->allocator.arenas.queueObjectsForSweep(&fop);
  1.3743 +    }
  1.3744 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3745 +        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3746 +        zone->allocator.arenas.queueStringsForSweep(&fop);
  1.3747 +    }
  1.3748 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3749 +        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3750 +        zone->allocator.arenas.queueScriptsForSweep(&fop);
  1.3751 +    }
  1.3752 +#ifdef JS_ION
  1.3753 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3754 +        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3755 +        zone->allocator.arenas.queueJitCodeForSweep(&fop);
  1.3756 +    }
  1.3757 +#endif
  1.3758 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3759 +        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
  1.3760 +        zone->allocator.arenas.queueShapesForSweep(&fop);
  1.3761 +        zone->allocator.arenas.gcShapeArenasToSweep =
  1.3762 +            zone->allocator.arenas.arenaListsToSweep[FINALIZE_SHAPE];
  1.3763 +    }
  1.3764 +
  1.3765 +    rt->gcSweepPhase = 0;
  1.3766 +    rt->gcSweepZone = rt->gcCurrentZoneGroup;
  1.3767 +    rt->gcSweepKindIndex = 0;
  1.3768 +
  1.3769 +    {
  1.3770 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
  1.3771 +        if (rt->gcFinalizeCallback)
  1.3772 +            rt->gcFinalizeCallback(&fop, JSFINALIZE_GROUP_END, !rt->gcIsFull /* unused */);
  1.3773 +    }
  1.3774 +}
  1.3775 +
  1.3776 +static void
  1.3777 +EndSweepingZoneGroup(JSRuntime *rt)
  1.3778 +{
  1.3779 +    /* Update the GC state for zones we have swept and unlink the list. */
  1.3780 +    for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
  1.3781 +        JS_ASSERT(zone->isGCSweeping());
  1.3782 +        zone->setGCState(Zone::Finished);
  1.3783 +    }
  1.3784 +
  1.3785 +    /* Reset the list of arenas marked as being allocated during sweep phase. */
  1.3786 +    while (ArenaHeader *arena = rt->gcArenasAllocatedDuringSweep) {
  1.3787 +        rt->gcArenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
  1.3788 +        arena->unsetAllocDuringSweep();
  1.3789 +    }
  1.3790 +}
  1.3791 +
  1.3792 +static void
  1.3793 +BeginSweepPhase(JSRuntime *rt, bool lastGC)
  1.3794 +{
  1.3795 +    /*
  1.3796 +     * Sweep phase.
  1.3797 +     *
  1.3798 +     * Finalize as we sweep, outside of rt->gcLock but with rt->isHeapBusy()
  1.3799 +     * true so that any attempt to allocate a GC-thing from a finalizer will
  1.3800 +     * fail, rather than nest badly and leave the unmarked newborn to be swept.
  1.3801 +     */
  1.3802 +
  1.3803 +    JS_ASSERT(!rt->gcAbortSweepAfterCurrentGroup);
  1.3804 +
  1.3805 +    ComputeNonIncrementalMarkingForValidation(rt);
  1.3806 +
  1.3807 +    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
  1.3808 +
  1.3809 +#ifdef JS_THREADSAFE
  1.3810 +    rt->gcSweepOnBackgroundThread = !lastGC && rt->useHelperThreads();
  1.3811 +#endif
  1.3812 +
  1.3813 +#ifdef DEBUG
  1.3814 +    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
  1.3815 +        JS_ASSERT(!c->gcIncomingGrayPointers);
  1.3816 +        for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
  1.3817 +            if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
  1.3818 +                AssertNotOnGrayList(&e.front().value().get().toObject());
  1.3819 +        }
  1.3820 +    }
  1.3821 +#endif
  1.3822 +
  1.3823 +    DropStringWrappers(rt);
  1.3824 +    FindZoneGroups(rt);
  1.3825 +    EndMarkingZoneGroup(rt);
  1.3826 +    BeginSweepingZoneGroup(rt);
  1.3827 +}
  1.3828 +
  1.3829 +bool
  1.3830 +ArenaLists::foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget)
  1.3831 +{
  1.3832 +    if (!arenaListsToSweep[thingKind])
  1.3833 +        return true;
  1.3834 +
  1.3835 +    ArenaList &dest = arenaLists[thingKind];
  1.3836 +    return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget);
  1.3837 +}
  1.3838 +
  1.3839 +static bool
  1.3840 +DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget, gcstats::Phase phase)
  1.3841 +{
  1.3842 +    /* Run a marking slice and return whether the stack is now empty. */
  1.3843 +    gcstats::AutoPhase ap(rt->gcStats, phase);
  1.3844 +    return rt->gcMarker.drainMarkStack(sliceBudget);
  1.3845 +}
  1.3846 +
  1.3847 +static bool
  1.3848 +SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
  1.3849 +{
  1.3850 +    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
  1.3851 +    FreeOp fop(rt, rt->gcSweepOnBackgroundThread);
  1.3852 +
  1.3853 +    bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_SWEEP_MARK);
  1.3854 +    if (!finished)
  1.3855 +        return false;
  1.3856 +
  1.3857 +    for (;;) {
  1.3858 +        /* Finalize foreground finalized things. */
  1.3859 +        for (; rt->gcSweepPhase < FinalizePhaseCount ; ++rt->gcSweepPhase) {
  1.3860 +            gcstats::AutoPhase ap(rt->gcStats, FinalizePhaseStatsPhase[rt->gcSweepPhase]);
  1.3861 +
  1.3862 +            for (; rt->gcSweepZone; rt->gcSweepZone = rt->gcSweepZone->nextNodeInGroup()) {
  1.3863 +                Zone *zone = rt->gcSweepZone;
  1.3864 +
  1.3865 +                while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) {
  1.3866 +                    AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex];
  1.3867 +
  1.3868 +                    if (!zone->allocator.arenas.foregroundFinalize(&fop, kind, sliceBudget))
  1.3869 +                        return false;  /* Yield to the mutator. */
  1.3870 +
  1.3871 +                    ++rt->gcSweepKindIndex;
  1.3872 +                }
  1.3873 +                rt->gcSweepKindIndex = 0;
  1.3874 +            }
  1.3875 +            rt->gcSweepZone = rt->gcCurrentZoneGroup;
  1.3876 +        }
  1.3877 +
  1.3878 +        /* Remove dead shapes from the shape tree, but don't finalize them yet. */
  1.3879 +        {
  1.3880 +            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE);
  1.3881 +
  1.3882 +            for (; rt->gcSweepZone; rt->gcSweepZone = rt->gcSweepZone->nextNodeInGroup()) {
  1.3883 +                Zone *zone = rt->gcSweepZone;
  1.3884 +                while (ArenaHeader *arena = zone->allocator.arenas.gcShapeArenasToSweep) {
  1.3885 +                    for (CellIterUnderGC i(arena); !i.done(); i.next()) {
  1.3886 +                        Shape *shape = i.get<Shape>();
  1.3887 +                        if (!shape->isMarked())
  1.3888 +                            shape->sweep();
  1.3889 +                    }
  1.3890 +
  1.3891 +                    zone->allocator.arenas.gcShapeArenasToSweep = arena->next;
  1.3892 +                    sliceBudget.step(Arena::thingsPerArena(Arena::thingSize(FINALIZE_SHAPE)));
  1.3893 +                    if (sliceBudget.isOverBudget())
  1.3894 +                        return false;  /* Yield to the mutator. */
  1.3895 +                }
  1.3896 +            }
  1.3897 +        }
  1.3898 +
  1.3899 +        EndSweepingZoneGroup(rt);
  1.3900 +        GetNextZoneGroup(rt);
  1.3901 +        if (!rt->gcCurrentZoneGroup)
  1.3902 +            return true;  /* We're finished. */
  1.3903 +        EndMarkingZoneGroup(rt);
  1.3904 +        BeginSweepingZoneGroup(rt);
  1.3905 +    }
  1.3906 +}
  1.3907 +
  1.3908 +static void
  1.3909 +EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC)
  1.3910 +{
  1.3911 +    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
  1.3912 +    FreeOp fop(rt, rt->gcSweepOnBackgroundThread);
  1.3913 +
  1.3914 +    JS_ASSERT_IF(lastGC, !rt->gcSweepOnBackgroundThread);
  1.3915 +
  1.3916 +    JS_ASSERT(rt->gcMarker.isDrained());
  1.3917 +    rt->gcMarker.stop();
  1.3918 +
  1.3919 +    /*
  1.3920 +     * Recalculate whether GC was full or not as this may have changed due to
  1.3921 +     * newly created zones.  Can only change from full to not full.
  1.3922 +     */
  1.3923 +    if (rt->gcIsFull) {
  1.3924 +        for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.3925 +            if (!zone->isCollecting()) {
  1.3926 +                rt->gcIsFull = false;
  1.3927 +                break;
  1.3928 +            }
  1.3929 +        }
  1.3930 +    }
  1.3931 +
  1.3932 +    /*
  1.3933 +     * If we found any black->gray edges during marking, we completely clear the
  1.3934 +     * mark bits of all uncollected zones, or if a reset has occured, zones that
  1.3935 +     * will no longer be collected. This is safe, although it may
  1.3936 +     * prevent the cycle collector from collecting some dead objects.
  1.3937 +     */
  1.3938 +    if (rt->gcFoundBlackGrayEdges) {
  1.3939 +        for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.3940 +            if (!zone->isCollecting())
  1.3941 +                zone->allocator.arenas.unmarkAll();
  1.3942 +        }
  1.3943 +    }
  1.3944 +
  1.3945 +    {
  1.3946 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY);
  1.3947 +
  1.3948 +        /*
  1.3949 +         * Sweep script filenames after sweeping functions in the generic loop
  1.3950 +         * above. In this way when a scripted function's finalizer destroys the
  1.3951 +         * script and calls rt->destroyScriptHook, the hook can still access the
  1.3952 +         * script's filename. See bug 323267.
  1.3953 +         */
  1.3954 +        if (rt->gcIsFull)
  1.3955 +            SweepScriptData(rt);
  1.3956 +
  1.3957 +        /* Clear out any small pools that we're hanging on to. */
  1.3958 +        if (JSC::ExecutableAllocator *execAlloc = rt->maybeExecAlloc())
  1.3959 +            execAlloc->purge();
  1.3960 +
  1.3961 +        /*
  1.3962 +         * This removes compartments from rt->compartment, so we do it last to make
  1.3963 +         * sure we don't miss sweeping any compartments.
  1.3964 +         */
  1.3965 +        if (!lastGC)
  1.3966 +            SweepZones(&fop, lastGC);
  1.3967 +
  1.3968 +        if (!rt->gcSweepOnBackgroundThread) {
  1.3969 +            /*
  1.3970 +             * Destroy arenas after we finished the sweeping so finalizers can
  1.3971 +             * safely use IsAboutToBeFinalized(). This is done on the
  1.3972 +             * GCHelperThread if possible. We acquire the lock only because
  1.3973 +             * Expire needs to unlock it for other callers.
  1.3974 +             */
  1.3975 +            AutoLockGC lock(rt);
  1.3976 +            ExpireChunksAndArenas(rt, gckind == GC_SHRINK);
  1.3977 +        }
  1.3978 +    }
  1.3979 +
  1.3980 +    {
  1.3981 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
  1.3982 +
  1.3983 +        if (rt->gcFinalizeCallback)
  1.3984 +            rt->gcFinalizeCallback(&fop, JSFINALIZE_COLLECTION_END, !rt->gcIsFull);
  1.3985 +
  1.3986 +        /* If we finished a full GC, then the gray bits are correct. */
  1.3987 +        if (rt->gcIsFull)
  1.3988 +            rt->gcGrayBitsValid = true;
  1.3989 +    }
  1.3990 +
  1.3991 +    /* Set up list of zones for sweeping of background things. */
  1.3992 +    JS_ASSERT(!rt->gcSweepingZones);
  1.3993 +    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.3994 +        zone->gcNextGraphNode = rt->gcSweepingZones;
  1.3995 +        rt->gcSweepingZones = zone;
  1.3996 +    }
  1.3997 +
  1.3998 +    /* If not sweeping on background thread then we must do it here. */
  1.3999 +    if (!rt->gcSweepOnBackgroundThread) {
  1.4000 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY);
  1.4001 +
  1.4002 +        SweepBackgroundThings(rt, false);
  1.4003 +
  1.4004 +        rt->freeLifoAlloc.freeAll();
  1.4005 +
  1.4006 +        /* Ensure the compartments get swept if it's the last GC. */
  1.4007 +        if (lastGC)
  1.4008 +            SweepZones(&fop, lastGC);
  1.4009 +    }
  1.4010 +
  1.4011 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.4012 +        zone->setGCLastBytes(zone->gcBytes, gckind);
  1.4013 +        if (zone->isCollecting()) {
  1.4014 +            JS_ASSERT(zone->isGCFinished());
  1.4015 +            zone->setGCState(Zone::NoGC);
  1.4016 +        }
  1.4017 +
  1.4018 +#ifdef DEBUG
  1.4019 +        JS_ASSERT(!zone->isCollecting());
  1.4020 +        JS_ASSERT(!zone->wasGCStarted());
  1.4021 +
  1.4022 +        for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) {
  1.4023 +            JS_ASSERT_IF(!IsBackgroundFinalized(AllocKind(i)) ||
  1.4024 +                         !rt->gcSweepOnBackgroundThread,
  1.4025 +                         !zone->allocator.arenas.arenaListsToSweep[i]);
  1.4026 +        }
  1.4027 +#endif
  1.4028 +    }
  1.4029 +
  1.4030 +#ifdef DEBUG
  1.4031 +    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
  1.4032 +        JS_ASSERT(!c->gcIncomingGrayPointers);
  1.4033 +        JS_ASSERT(c->gcLiveArrayBuffers.empty());
  1.4034 +
  1.4035 +        for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
  1.4036 +            if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
  1.4037 +                AssertNotOnGrayList(&e.front().value().get().toObject());
  1.4038 +        }
  1.4039 +    }
  1.4040 +#endif
  1.4041 +
  1.4042 +    FinishMarkingValidation(rt);
  1.4043 +
  1.4044 +    rt->gcLastGCTime = PRMJ_Now();
  1.4045 +}
  1.4046 +
  1.4047 +namespace {
  1.4048 +
  1.4049 +/* ...while this class is to be used only for garbage collection. */
  1.4050 +class AutoGCSession
  1.4051 +{
  1.4052 +    JSRuntime *runtime;
  1.4053 +    AutoTraceSession session;
  1.4054 +    bool canceled;
  1.4055 +
  1.4056 +  public:
  1.4057 +    explicit AutoGCSession(JSRuntime *rt);
  1.4058 +    ~AutoGCSession();
  1.4059 +
  1.4060 +    void cancel() { canceled = true; }
  1.4061 +};
  1.4062 +
  1.4063 +} /* anonymous namespace */
  1.4064 +
  1.4065 +/* Start a new heap session. */
  1.4066 +AutoTraceSession::AutoTraceSession(JSRuntime *rt, js::HeapState heapState)
  1.4067 +  : lock(rt),
  1.4068 +    runtime(rt),
  1.4069 +    prevState(rt->heapState)
  1.4070 +{
  1.4071 +    JS_ASSERT(!rt->noGCOrAllocationCheck);
  1.4072 +    JS_ASSERT(!rt->isHeapBusy());
  1.4073 +    JS_ASSERT(heapState != Idle);
  1.4074 +#ifdef JSGC_GENERATIONAL
  1.4075 +    JS_ASSERT_IF(heapState == MajorCollecting, rt->gcNursery.isEmpty());
  1.4076 +#endif
  1.4077 +
  1.4078 +    // Threads with an exclusive context can hit refillFreeList while holding
  1.4079 +    // the exclusive access lock. To avoid deadlocking when we try to acquire
  1.4080 +    // this lock during GC and the other thread is waiting, make sure we hold
  1.4081 +    // the exclusive access lock during GC sessions.
  1.4082 +    JS_ASSERT(rt->currentThreadHasExclusiveAccess());
  1.4083 +
  1.4084 +    if (rt->exclusiveThreadsPresent()) {
  1.4085 +        // Lock the worker thread state when changing the heap state in the
  1.4086 +        // presence of exclusive threads, to avoid racing with refillFreeList.
  1.4087 +#ifdef JS_THREADSAFE
  1.4088 +        AutoLockWorkerThreadState lock;
  1.4089 +        rt->heapState = heapState;
  1.4090 +#else
  1.4091 +        MOZ_CRASH();
  1.4092 +#endif
  1.4093 +    } else {
  1.4094 +        rt->heapState = heapState;
  1.4095 +    }
  1.4096 +}
  1.4097 +
  1.4098 +AutoTraceSession::~AutoTraceSession()
  1.4099 +{
  1.4100 +    JS_ASSERT(runtime->isHeapBusy());
  1.4101 +
  1.4102 +    if (runtime->exclusiveThreadsPresent()) {
  1.4103 +#ifdef JS_THREADSAFE
  1.4104 +        AutoLockWorkerThreadState lock;
  1.4105 +        runtime->heapState = prevState;
  1.4106 +
  1.4107 +        // Notify any worker threads waiting for the trace session to end.
  1.4108 +        WorkerThreadState().notifyAll(GlobalWorkerThreadState::PRODUCER);
  1.4109 +#else
  1.4110 +        MOZ_CRASH();
  1.4111 +#endif
  1.4112 +    } else {
  1.4113 +        runtime->heapState = prevState;
  1.4114 +    }
  1.4115 +}
  1.4116 +
  1.4117 +AutoGCSession::AutoGCSession(JSRuntime *rt)
  1.4118 +  : runtime(rt),
  1.4119 +    session(rt, MajorCollecting),
  1.4120 +    canceled(false)
  1.4121 +{
  1.4122 +    runtime->gcIsNeeded = false;
  1.4123 +    runtime->gcInterFrameGC = true;
  1.4124 +
  1.4125 +    runtime->gcNumber++;
  1.4126 +
  1.4127 +    // It's ok if threads other than the main thread have suppressGC set, as
  1.4128 +    // they are operating on zones which will not be collected from here.
  1.4129 +    JS_ASSERT(!runtime->mainThread.suppressGC);
  1.4130 +}
  1.4131 +
  1.4132 +AutoGCSession::~AutoGCSession()
  1.4133 +{
  1.4134 +    if (canceled)
  1.4135 +        return;
  1.4136 +
  1.4137 +#ifndef JS_MORE_DETERMINISTIC
  1.4138 +    runtime->gcNextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
  1.4139 +#endif
  1.4140 +
  1.4141 +    runtime->gcChunkAllocationSinceLastGC = false;
  1.4142 +
  1.4143 +#ifdef JS_GC_ZEAL
  1.4144 +    /* Keeping these around after a GC is dangerous. */
  1.4145 +    runtime->gcSelectedForMarking.clearAndFree();
  1.4146 +#endif
  1.4147 +
  1.4148 +    /* Clear gcMallocBytes for all compartments */
  1.4149 +    for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
  1.4150 +        zone->resetGCMallocBytes();
  1.4151 +        zone->unscheduleGC();
  1.4152 +    }
  1.4153 +
  1.4154 +    runtime->resetGCMallocBytes();
  1.4155 +}
  1.4156 +
  1.4157 +AutoCopyFreeListToArenas::AutoCopyFreeListToArenas(JSRuntime *rt, ZoneSelector selector)
  1.4158 +  : runtime(rt),
  1.4159 +    selector(selector)
  1.4160 +{
  1.4161 +    for (ZonesIter zone(rt, selector); !zone.done(); zone.next())
  1.4162 +        zone->allocator.arenas.copyFreeListsToArenas();
  1.4163 +}
  1.4164 +
  1.4165 +AutoCopyFreeListToArenas::~AutoCopyFreeListToArenas()
  1.4166 +{
  1.4167 +    for (ZonesIter zone(runtime, selector); !zone.done(); zone.next())
  1.4168 +        zone->allocator.arenas.clearFreeListsInArenas();
  1.4169 +}
  1.4170 +
  1.4171 +class AutoCopyFreeListToArenasForGC
  1.4172 +{
  1.4173 +    JSRuntime *runtime;
  1.4174 +
  1.4175 +  public:
  1.4176 +    AutoCopyFreeListToArenasForGC(JSRuntime *rt) : runtime(rt) {
  1.4177 +        JS_ASSERT(rt->currentThreadHasExclusiveAccess());
  1.4178 +        for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
  1.4179 +            zone->allocator.arenas.copyFreeListsToArenas();
  1.4180 +    }
  1.4181 +    ~AutoCopyFreeListToArenasForGC() {
  1.4182 +        for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next())
  1.4183 +            zone->allocator.arenas.clearFreeListsInArenas();
  1.4184 +    }
  1.4185 +};
  1.4186 +
  1.4187 +static void
  1.4188 +IncrementalCollectSlice(JSRuntime *rt,
  1.4189 +                        int64_t budget,
  1.4190 +                        JS::gcreason::Reason gcReason,
  1.4191 +                        JSGCInvocationKind gcKind);
  1.4192 +
  1.4193 +static void
  1.4194 +ResetIncrementalGC(JSRuntime *rt, const char *reason)
  1.4195 +{
  1.4196 +    switch (rt->gcIncrementalState) {
  1.4197 +      case NO_INCREMENTAL:
  1.4198 +        return;
  1.4199 +
  1.4200 +      case MARK: {
  1.4201 +        /* Cancel any ongoing marking. */
  1.4202 +        AutoCopyFreeListToArenasForGC copy(rt);
  1.4203 +
  1.4204 +        rt->gcMarker.reset();
  1.4205 +        rt->gcMarker.stop();
  1.4206 +
  1.4207 +        for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
  1.4208 +            ArrayBufferObject::resetArrayBufferList(c);
  1.4209 +            ResetGrayList(c);
  1.4210 +        }
  1.4211 +
  1.4212 +        for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.4213 +            JS_ASSERT(zone->isGCMarking());
  1.4214 +            zone->setNeedsBarrier(false, Zone::UpdateIon);
  1.4215 +            zone->setGCState(Zone::NoGC);
  1.4216 +        }
  1.4217 +        rt->setNeedsBarrier(false);
  1.4218 +        AssertNeedsBarrierFlagsConsistent(rt);
  1.4219 +
  1.4220 +        rt->gcIncrementalState = NO_INCREMENTAL;
  1.4221 +
  1.4222 +        JS_ASSERT(!rt->gcStrictCompartmentChecking);
  1.4223 +
  1.4224 +        break;
  1.4225 +      }
  1.4226 +
  1.4227 +      case SWEEP:
  1.4228 +        rt->gcMarker.reset();
  1.4229 +
  1.4230 +        for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
  1.4231 +            zone->scheduledForDestruction = false;
  1.4232 +
  1.4233 +        /* Finish sweeping the current zone group, then abort. */
  1.4234 +        rt->gcAbortSweepAfterCurrentGroup = true;
  1.4235 +        IncrementalCollectSlice(rt, SliceBudget::Unlimited, JS::gcreason::RESET, GC_NORMAL);
  1.4236 +
  1.4237 +        {
  1.4238 +            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
  1.4239 +            rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
  1.4240 +        }
  1.4241 +        break;
  1.4242 +
  1.4243 +      default:
  1.4244 +        MOZ_ASSUME_UNREACHABLE("Invalid incremental GC state");
  1.4245 +    }
  1.4246 +
  1.4247 +    rt->gcStats.reset(reason);
  1.4248 +
  1.4249 +#ifdef DEBUG
  1.4250 +    for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
  1.4251 +        JS_ASSERT(c->gcLiveArrayBuffers.empty());
  1.4252 +
  1.4253 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.4254 +        JS_ASSERT(!zone->needsBarrier());
  1.4255 +        for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
  1.4256 +            JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]);
  1.4257 +    }
  1.4258 +#endif
  1.4259 +}
  1.4260 +
  1.4261 +namespace {
  1.4262 +
  1.4263 +class AutoGCSlice {
  1.4264 +  public:
  1.4265 +    AutoGCSlice(JSRuntime *rt);
  1.4266 +    ~AutoGCSlice();
  1.4267 +
  1.4268 +  private:
  1.4269 +    JSRuntime *runtime;
  1.4270 +};
  1.4271 +
  1.4272 +} /* anonymous namespace */
  1.4273 +
  1.4274 +AutoGCSlice::AutoGCSlice(JSRuntime *rt)
  1.4275 +  : runtime(rt)
  1.4276 +{
  1.4277 +    /*
  1.4278 +     * During incremental GC, the compartment's active flag determines whether
  1.4279 +     * there are stack frames active for any of its scripts. Normally this flag
  1.4280 +     * is set at the beginning of the mark phase. During incremental GC, we also
  1.4281 +     * set it at the start of every phase.
  1.4282 +     */
  1.4283 +    for (ActivationIterator iter(rt); !iter.done(); ++iter)
  1.4284 +        iter->compartment()->zone()->active = true;
  1.4285 +
  1.4286 +    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
  1.4287 +        /*
  1.4288 +         * Clear needsBarrier early so we don't do any write barriers during
  1.4289 +         * GC. We don't need to update the Ion barriers (which is expensive)
  1.4290 +         * because Ion code doesn't run during GC. If need be, we'll update the
  1.4291 +         * Ion barriers in ~AutoGCSlice.
  1.4292 +         */
  1.4293 +        if (zone->isGCMarking()) {
  1.4294 +            JS_ASSERT(zone->needsBarrier());
  1.4295 +            zone->setNeedsBarrier(false, Zone::DontUpdateIon);
  1.4296 +        } else {
  1.4297 +            JS_ASSERT(!zone->needsBarrier());
  1.4298 +        }
  1.4299 +    }
  1.4300 +    rt->setNeedsBarrier(false);
  1.4301 +    AssertNeedsBarrierFlagsConsistent(rt);
  1.4302 +}
  1.4303 +
  1.4304 +AutoGCSlice::~AutoGCSlice()
  1.4305 +{
  1.4306 +    /* We can't use GCZonesIter if this is the end of the last slice. */
  1.4307 +    bool haveBarriers = false;
  1.4308 +    for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
  1.4309 +        if (zone->isGCMarking()) {
  1.4310 +            zone->setNeedsBarrier(true, Zone::UpdateIon);
  1.4311 +            zone->allocator.arenas.prepareForIncrementalGC(runtime);
  1.4312 +            haveBarriers = true;
  1.4313 +        } else {
  1.4314 +            zone->setNeedsBarrier(false, Zone::UpdateIon);
  1.4315 +        }
  1.4316 +    }
  1.4317 +    runtime->setNeedsBarrier(haveBarriers);
  1.4318 +    AssertNeedsBarrierFlagsConsistent(runtime);
  1.4319 +}
  1.4320 +
  1.4321 +static void
  1.4322 +PushZealSelectedObjects(JSRuntime *rt)
  1.4323 +{
  1.4324 +#ifdef JS_GC_ZEAL
  1.4325 +    /* Push selected objects onto the mark stack and clear the list. */
  1.4326 +    for (JSObject **obj = rt->gcSelectedForMarking.begin();
  1.4327 +         obj != rt->gcSelectedForMarking.end(); obj++)
  1.4328 +    {
  1.4329 +        MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
  1.4330 +    }
  1.4331 +#endif
  1.4332 +}
  1.4333 +
  1.4334 +static void
  1.4335 +IncrementalCollectSlice(JSRuntime *rt,
  1.4336 +                        int64_t budget,
  1.4337 +                        JS::gcreason::Reason reason,
  1.4338 +                        JSGCInvocationKind gckind)
  1.4339 +{
  1.4340 +    JS_ASSERT(rt->currentThreadHasExclusiveAccess());
  1.4341 +
  1.4342 +    AutoCopyFreeListToArenasForGC copy(rt);
  1.4343 +    AutoGCSlice slice(rt);
  1.4344 +
  1.4345 +    bool lastGC = (reason == JS::gcreason::DESTROY_RUNTIME);
  1.4346 +
  1.4347 +    gc::State initialState = rt->gcIncrementalState;
  1.4348 +
  1.4349 +    int zeal = 0;
  1.4350 +#ifdef JS_GC_ZEAL
  1.4351 +    if (reason == JS::gcreason::DEBUG_GC && budget != SliceBudget::Unlimited) {
  1.4352 +        /*
  1.4353 +         * Do the incremental collection type specified by zeal mode if the
  1.4354 +         * collection was triggered by RunDebugGC() and incremental GC has not
  1.4355 +         * been cancelled by ResetIncrementalGC.
  1.4356 +         */
  1.4357 +        zeal = rt->gcZeal();
  1.4358 +    }
  1.4359 +#endif
  1.4360 +
  1.4361 +    JS_ASSERT_IF(rt->gcIncrementalState != NO_INCREMENTAL, rt->gcIsIncremental);
  1.4362 +    rt->gcIsIncremental = budget != SliceBudget::Unlimited;
  1.4363 +
  1.4364 +    if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
  1.4365 +        /*
  1.4366 +         * Yields between slices occurs at predetermined points in these modes;
  1.4367 +         * the budget is not used.
  1.4368 +         */
  1.4369 +        budget = SliceBudget::Unlimited;
  1.4370 +    }
  1.4371 +
  1.4372 +    SliceBudget sliceBudget(budget);
  1.4373 +
  1.4374 +    if (rt->gcIncrementalState == NO_INCREMENTAL) {
  1.4375 +        rt->gcIncrementalState = MARK_ROOTS;
  1.4376 +        rt->gcLastMarkSlice = false;
  1.4377 +    }
  1.4378 +
  1.4379 +    if (rt->gcIncrementalState == MARK)
  1.4380 +        AutoGCRooter::traceAllWrappers(&rt->gcMarker);
  1.4381 +
  1.4382 +    switch (rt->gcIncrementalState) {
  1.4383 +
  1.4384 +      case MARK_ROOTS:
  1.4385 +        if (!BeginMarkPhase(rt)) {
  1.4386 +            rt->gcIncrementalState = NO_INCREMENTAL;
  1.4387 +            return;
  1.4388 +        }
  1.4389 +
  1.4390 +        if (!lastGC)
  1.4391 +            PushZealSelectedObjects(rt);
  1.4392 +
  1.4393 +        rt->gcIncrementalState = MARK;
  1.4394 +
  1.4395 +        if (rt->gcIsIncremental && zeal == ZealIncrementalRootsThenFinish)
  1.4396 +            break;
  1.4397 +
  1.4398 +        /* fall through */
  1.4399 +
  1.4400 +      case MARK: {
  1.4401 +        /* If we needed delayed marking for gray roots, then collect until done. */
  1.4402 +        if (!rt->gcMarker.hasBufferedGrayRoots()) {
  1.4403 +            sliceBudget.reset();
  1.4404 +            rt->gcIsIncremental = false;
  1.4405 +        }
  1.4406 +
  1.4407 +        bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_MARK);
  1.4408 +        if (!finished)
  1.4409 +            break;
  1.4410 +
  1.4411 +        JS_ASSERT(rt->gcMarker.isDrained());
  1.4412 +
  1.4413 +        if (!rt->gcLastMarkSlice && rt->gcIsIncremental &&
  1.4414 +            ((initialState == MARK && zeal != ZealIncrementalRootsThenFinish) ||
  1.4415 +             zeal == ZealIncrementalMarkAllThenFinish))
  1.4416 +        {
  1.4417 +            /*
  1.4418 +             * Yield with the aim of starting the sweep in the next
  1.4419 +             * slice.  We will need to mark anything new on the stack
  1.4420 +             * when we resume, so we stay in MARK state.
  1.4421 +             */
  1.4422 +            rt->gcLastMarkSlice = true;
  1.4423 +            break;
  1.4424 +        }
  1.4425 +
  1.4426 +        rt->gcIncrementalState = SWEEP;
  1.4427 +
  1.4428 +        /*
  1.4429 +         * This runs to completion, but we don't continue if the budget is
  1.4430 +         * now exhasted.
  1.4431 +         */
  1.4432 +        BeginSweepPhase(rt, lastGC);
  1.4433 +        if (sliceBudget.isOverBudget())
  1.4434 +            break;
  1.4435 +
  1.4436 +        /*
  1.4437 +         * Always yield here when running in incremental multi-slice zeal
  1.4438 +         * mode, so RunDebugGC can reset the slice buget.
  1.4439 +         */
  1.4440 +        if (rt->gcIsIncremental && zeal == ZealIncrementalMultipleSlices)
  1.4441 +            break;
  1.4442 +
  1.4443 +        /* fall through */
  1.4444 +      }
  1.4445 +
  1.4446 +      case SWEEP: {
  1.4447 +        bool finished = SweepPhase(rt, sliceBudget);
  1.4448 +        if (!finished)
  1.4449 +            break;
  1.4450 +
  1.4451 +        EndSweepPhase(rt, gckind, lastGC);
  1.4452 +
  1.4453 +        if (rt->gcSweepOnBackgroundThread)
  1.4454 +            rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
  1.4455 +
  1.4456 +        rt->gcIncrementalState = NO_INCREMENTAL;
  1.4457 +        break;
  1.4458 +      }
  1.4459 +
  1.4460 +      default:
  1.4461 +        JS_ASSERT(false);
  1.4462 +    }
  1.4463 +}
  1.4464 +
  1.4465 +IncrementalSafety
  1.4466 +gc::IsIncrementalGCSafe(JSRuntime *rt)
  1.4467 +{
  1.4468 +    JS_ASSERT(!rt->mainThread.suppressGC);
  1.4469 +
  1.4470 +    if (rt->keepAtoms())
  1.4471 +        return IncrementalSafety::Unsafe("keepAtoms set");
  1.4472 +
  1.4473 +    if (!rt->gcIncrementalEnabled)
  1.4474 +        return IncrementalSafety::Unsafe("incremental permanently disabled");
  1.4475 +
  1.4476 +    return IncrementalSafety::Safe();
  1.4477 +}
  1.4478 +
  1.4479 +static void
  1.4480 +BudgetIncrementalGC(JSRuntime *rt, int64_t *budget)
  1.4481 +{
  1.4482 +    IncrementalSafety safe = IsIncrementalGCSafe(rt);
  1.4483 +    if (!safe) {
  1.4484 +        ResetIncrementalGC(rt, safe.reason());
  1.4485 +        *budget = SliceBudget::Unlimited;
  1.4486 +        rt->gcStats.nonincremental(safe.reason());
  1.4487 +        return;
  1.4488 +    }
  1.4489 +
  1.4490 +    if (rt->gcMode() != JSGC_MODE_INCREMENTAL) {
  1.4491 +        ResetIncrementalGC(rt, "GC mode change");
  1.4492 +        *budget = SliceBudget::Unlimited;
  1.4493 +        rt->gcStats.nonincremental("GC mode");
  1.4494 +        return;
  1.4495 +    }
  1.4496 +
  1.4497 +    if (rt->isTooMuchMalloc()) {
  1.4498 +        *budget = SliceBudget::Unlimited;
  1.4499 +        rt->gcStats.nonincremental("malloc bytes trigger");
  1.4500 +    }
  1.4501 +
  1.4502 +    bool reset = false;
  1.4503 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.4504 +        if (zone->gcBytes >= zone->gcTriggerBytes) {
  1.4505 +            *budget = SliceBudget::Unlimited;
  1.4506 +            rt->gcStats.nonincremental("allocation trigger");
  1.4507 +        }
  1.4508 +
  1.4509 +        if (rt->gcIncrementalState != NO_INCREMENTAL &&
  1.4510 +            zone->isGCScheduled() != zone->wasGCStarted())
  1.4511 +        {
  1.4512 +            reset = true;
  1.4513 +        }
  1.4514 +
  1.4515 +        if (zone->isTooMuchMalloc()) {
  1.4516 +            *budget = SliceBudget::Unlimited;
  1.4517 +            rt->gcStats.nonincremental("malloc bytes trigger");
  1.4518 +        }
  1.4519 +    }
  1.4520 +
  1.4521 +    if (reset)
  1.4522 +        ResetIncrementalGC(rt, "zone change");
  1.4523 +}
  1.4524 +
  1.4525 +/*
  1.4526 + * Run one GC "cycle" (either a slice of incremental GC or an entire
  1.4527 + * non-incremental GC. We disable inlining to ensure that the bottom of the
  1.4528 + * stack with possible GC roots recorded in MarkRuntime excludes any pointers we
  1.4529 + * use during the marking implementation.
  1.4530 + *
  1.4531 + * Returns true if we "reset" an existing incremental GC, which would force us
  1.4532 + * to run another cycle.
  1.4533 + */
  1.4534 +static MOZ_NEVER_INLINE bool
  1.4535 +GCCycle(JSRuntime *rt, bool incremental, int64_t budget,
  1.4536 +        JSGCInvocationKind gckind, JS::gcreason::Reason reason)
  1.4537 +{
  1.4538 +    AutoGCSession gcsession(rt);
  1.4539 +
  1.4540 +    /*
  1.4541 +     * As we about to purge caches and clear the mark bits we must wait for
  1.4542 +     * any background finalization to finish. We must also wait for the
  1.4543 +     * background allocation to finish so we can avoid taking the GC lock
  1.4544 +     * when manipulating the chunks during the GC.
  1.4545 +     */
  1.4546 +    {
  1.4547 +        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
  1.4548 +        rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
  1.4549 +    }
  1.4550 +
  1.4551 +    State prevState = rt->gcIncrementalState;
  1.4552 +
  1.4553 +    if (!incremental) {
  1.4554 +        /* If non-incremental GC was requested, reset incremental GC. */
  1.4555 +        ResetIncrementalGC(rt, "requested");
  1.4556 +        rt->gcStats.nonincremental("requested");
  1.4557 +        budget = SliceBudget::Unlimited;
  1.4558 +    } else {
  1.4559 +        BudgetIncrementalGC(rt, &budget);
  1.4560 +    }
  1.4561 +
  1.4562 +    /* The GC was reset, so we need a do-over. */
  1.4563 +    if (prevState != NO_INCREMENTAL && rt->gcIncrementalState == NO_INCREMENTAL) {
  1.4564 +        gcsession.cancel();
  1.4565 +        return true;
  1.4566 +    }
  1.4567 +
  1.4568 +    IncrementalCollectSlice(rt, budget, reason, gckind);
  1.4569 +    return false;
  1.4570 +}
  1.4571 +
  1.4572 +#ifdef JS_GC_ZEAL
  1.4573 +static bool
  1.4574 +IsDeterministicGCReason(JS::gcreason::Reason reason)
  1.4575 +{
  1.4576 +    if (reason > JS::gcreason::DEBUG_GC &&
  1.4577 +        reason != JS::gcreason::CC_FORCED && reason != JS::gcreason::SHUTDOWN_CC)
  1.4578 +    {
  1.4579 +        return false;
  1.4580 +    }
  1.4581 +
  1.4582 +    if (reason == JS::gcreason::MAYBEGC)
  1.4583 +        return false;
  1.4584 +
  1.4585 +    return true;
  1.4586 +}
  1.4587 +#endif
  1.4588 +
  1.4589 +static bool
  1.4590 +ShouldCleanUpEverything(JSRuntime *rt, JS::gcreason::Reason reason, JSGCInvocationKind gckind)
  1.4591 +{
  1.4592 +    // During shutdown, we must clean everything up, for the sake of leak
  1.4593 +    // detection. When a runtime has no contexts, or we're doing a GC before a
  1.4594 +    // shutdown CC, those are strong indications that we're shutting down.
  1.4595 +    return reason == JS::gcreason::DESTROY_RUNTIME ||
  1.4596 +           reason == JS::gcreason::SHUTDOWN_CC ||
  1.4597 +           gckind == GC_SHRINK;
  1.4598 +}
  1.4599 +
  1.4600 +namespace {
  1.4601 +
  1.4602 +#ifdef JSGC_GENERATIONAL
  1.4603 +class AutoDisableStoreBuffer
  1.4604 +{
  1.4605 +    JSRuntime *runtime;
  1.4606 +    bool prior;
  1.4607 +
  1.4608 +  public:
  1.4609 +    AutoDisableStoreBuffer(JSRuntime *rt) : runtime(rt) {
  1.4610 +        prior = rt->gcStoreBuffer.isEnabled();
  1.4611 +        rt->gcStoreBuffer.disable();
  1.4612 +    }
  1.4613 +    ~AutoDisableStoreBuffer() {
  1.4614 +        if (prior)
  1.4615 +            runtime->gcStoreBuffer.enable();
  1.4616 +    }
  1.4617 +};
  1.4618 +#else
  1.4619 +struct AutoDisableStoreBuffer
  1.4620 +{
  1.4621 +    AutoDisableStoreBuffer(JSRuntime *) {}
  1.4622 +};
  1.4623 +#endif
  1.4624 +
  1.4625 +} /* anonymous namespace */
  1.4626 +
  1.4627 +static void
  1.4628 +Collect(JSRuntime *rt, bool incremental, int64_t budget,
  1.4629 +        JSGCInvocationKind gckind, JS::gcreason::Reason reason)
  1.4630 +{
  1.4631 +    /* GC shouldn't be running in parallel execution mode */
  1.4632 +    JS_ASSERT(!InParallelSection());
  1.4633 +
  1.4634 +    JS_AbortIfWrongThread(rt);
  1.4635 +
  1.4636 +    /* If we attempt to invoke the GC while we are running in the GC, assert. */
  1.4637 +    JS_ASSERT(!rt->isHeapBusy());
  1.4638 +
  1.4639 +    if (rt->mainThread.suppressGC)
  1.4640 +        return;
  1.4641 +
  1.4642 +    TraceLogger *logger = TraceLoggerForMainThread(rt);
  1.4643 +    AutoTraceLog logGC(logger, TraceLogger::GC);
  1.4644 +
  1.4645 +#ifdef JS_GC_ZEAL
  1.4646 +    if (rt->gcDeterministicOnly && !IsDeterministicGCReason(reason))
  1.4647 +        return;
  1.4648 +#endif
  1.4649 +
  1.4650 +    JS_ASSERT_IF(!incremental || budget != SliceBudget::Unlimited, JSGC_INCREMENTAL);
  1.4651 +
  1.4652 +    AutoStopVerifyingBarriers av(rt, reason == JS::gcreason::SHUTDOWN_CC ||
  1.4653 +                                     reason == JS::gcreason::DESTROY_RUNTIME);
  1.4654 +
  1.4655 +    RecordNativeStackTopForGC(rt);
  1.4656 +
  1.4657 +    int zoneCount = 0;
  1.4658 +    int compartmentCount = 0;
  1.4659 +    int collectedCount = 0;
  1.4660 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.4661 +        if (rt->gcMode() == JSGC_MODE_GLOBAL)
  1.4662 +            zone->scheduleGC();
  1.4663 +
  1.4664 +        /* This is a heuristic to avoid resets. */
  1.4665 +        if (rt->gcIncrementalState != NO_INCREMENTAL && zone->needsBarrier())
  1.4666 +            zone->scheduleGC();
  1.4667 +
  1.4668 +        zoneCount++;
  1.4669 +        if (zone->isGCScheduled())
  1.4670 +            collectedCount++;
  1.4671 +    }
  1.4672 +
  1.4673 +    for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next())
  1.4674 +        compartmentCount++;
  1.4675 +
  1.4676 +    rt->gcShouldCleanUpEverything = ShouldCleanUpEverything(rt, reason, gckind);
  1.4677 +
  1.4678 +    bool repeat = false;
  1.4679 +    do {
  1.4680 +        MinorGC(rt, reason);
  1.4681 +
  1.4682 +        /*
  1.4683 +         * Marking can trigger many incidental post barriers, some of them for
  1.4684 +         * objects which are not going to be live after the GC.
  1.4685 +         */
  1.4686 +        AutoDisableStoreBuffer adsb(rt);
  1.4687 +
  1.4688 +        gcstats::AutoGCSlice agc(rt->gcStats, collectedCount, zoneCount, compartmentCount, reason);
  1.4689 +
  1.4690 +        /*
  1.4691 +         * Let the API user decide to defer a GC if it wants to (unless this
  1.4692 +         * is the last context). Invoke the callback regardless.
  1.4693 +         */
  1.4694 +        if (rt->gcIncrementalState == NO_INCREMENTAL) {
  1.4695 +            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_BEGIN);
  1.4696 +            if (JSGCCallback callback = rt->gcCallback)
  1.4697 +                callback(rt, JSGC_BEGIN, rt->gcCallbackData);
  1.4698 +        }
  1.4699 +
  1.4700 +        rt->gcPoke = false;
  1.4701 +        bool wasReset = GCCycle(rt, incremental, budget, gckind, reason);
  1.4702 +
  1.4703 +        if (rt->gcIncrementalState == NO_INCREMENTAL) {
  1.4704 +            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_END);
  1.4705 +            if (JSGCCallback callback = rt->gcCallback)
  1.4706 +                callback(rt, JSGC_END, rt->gcCallbackData);
  1.4707 +        }
  1.4708 +
  1.4709 +        /* Need to re-schedule all zones for GC. */
  1.4710 +        if (rt->gcPoke && rt->gcShouldCleanUpEverything)
  1.4711 +            JS::PrepareForFullGC(rt);
  1.4712 +
  1.4713 +        /*
  1.4714 +         * If we reset an existing GC, we need to start a new one. Also, we
  1.4715 +         * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
  1.4716 +         * case) until we can be sure that no additional garbage is created
  1.4717 +         * (which typically happens if roots are dropped during finalizers).
  1.4718 +         */
  1.4719 +        repeat = (rt->gcPoke && rt->gcShouldCleanUpEverything) || wasReset;
  1.4720 +    } while (repeat);
  1.4721 +
  1.4722 +    if (rt->gcIncrementalState == NO_INCREMENTAL) {
  1.4723 +#ifdef JS_THREADSAFE
  1.4724 +        EnqueuePendingParseTasksAfterGC(rt);
  1.4725 +#endif
  1.4726 +    }
  1.4727 +}
  1.4728 +
  1.4729 +void
  1.4730 +js::GC(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason)
  1.4731 +{
  1.4732 +    Collect(rt, false, SliceBudget::Unlimited, gckind, reason);
  1.4733 +}
  1.4734 +
  1.4735 +void
  1.4736 +js::GCSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
  1.4737 +{
  1.4738 +    int64_t sliceBudget;
  1.4739 +    if (millis)
  1.4740 +        sliceBudget = SliceBudget::TimeBudget(millis);
  1.4741 +    else if (rt->gcHighFrequencyGC && rt->gcDynamicMarkSlice)
  1.4742 +        sliceBudget = rt->gcSliceBudget * IGC_MARK_SLICE_MULTIPLIER;
  1.4743 +    else
  1.4744 +        sliceBudget = rt->gcSliceBudget;
  1.4745 +
  1.4746 +    Collect(rt, true, sliceBudget, gckind, reason);
  1.4747 +}
  1.4748 +
  1.4749 +void
  1.4750 +js::GCFinalSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason)
  1.4751 +{
  1.4752 +    Collect(rt, true, SliceBudget::Unlimited, gckind, reason);
  1.4753 +}
  1.4754 +
  1.4755 +static bool
  1.4756 +ZonesSelected(JSRuntime *rt)
  1.4757 +{
  1.4758 +    for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
  1.4759 +        if (zone->isGCScheduled())
  1.4760 +            return true;
  1.4761 +    }
  1.4762 +    return false;
  1.4763 +}
  1.4764 +
  1.4765 +void
  1.4766 +js::GCDebugSlice(JSRuntime *rt, bool limit, int64_t objCount)
  1.4767 +{
  1.4768 +    int64_t budget = limit ? SliceBudget::WorkBudget(objCount) : SliceBudget::Unlimited;
  1.4769 +    if (!ZonesSelected(rt)) {
  1.4770 +        if (JS::IsIncrementalGCInProgress(rt))
  1.4771 +            JS::PrepareForIncrementalGC(rt);
  1.4772 +        else
  1.4773 +            JS::PrepareForFullGC(rt);
  1.4774 +    }
  1.4775 +    Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
  1.4776 +}
  1.4777 +
  1.4778 +/* Schedule a full GC unless a zone will already be collected. */
  1.4779 +void
  1.4780 +js::PrepareForDebugGC(JSRuntime *rt)
  1.4781 +{
  1.4782 +    if (!ZonesSelected(rt))
  1.4783 +        JS::PrepareForFullGC(rt);
  1.4784 +}
  1.4785 +
  1.4786 +JS_FRIEND_API(void)
  1.4787 +JS::ShrinkGCBuffers(JSRuntime *rt)
  1.4788 +{
  1.4789 +    AutoLockGC lock(rt);
  1.4790 +    JS_ASSERT(!rt->isHeapBusy());
  1.4791 +
  1.4792 +    if (!rt->useHelperThreads())
  1.4793 +        ExpireChunksAndArenas(rt, true);
  1.4794 +    else
  1.4795 +        rt->gcHelperThread.startBackgroundShrink();
  1.4796 +}
  1.4797 +
  1.4798 +void
  1.4799 +js::MinorGC(JSRuntime *rt, JS::gcreason::Reason reason)
  1.4800 +{
  1.4801 +#ifdef JSGC_GENERATIONAL
  1.4802 +    TraceLogger *logger = TraceLoggerForMainThread(rt);
  1.4803 +    AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC);
  1.4804 +    rt->gcNursery.collect(rt, reason, nullptr);
  1.4805 +    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gcNursery.isEmpty());
  1.4806 +#endif
  1.4807 +}
  1.4808 +
  1.4809 +void
  1.4810 +js::MinorGC(JSContext *cx, JS::gcreason::Reason reason)
  1.4811 +{
  1.4812 +    // Alternate to the runtime-taking form above which allows marking type
  1.4813 +    // objects as needing pretenuring.
  1.4814 +#ifdef JSGC_GENERATIONAL
  1.4815 +    TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
  1.4816 +    AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC);
  1.4817 +
  1.4818 +    Nursery::TypeObjectList pretenureTypes;
  1.4819 +    JSRuntime *rt = cx->runtime();
  1.4820 +    rt->gcNursery.collect(cx->runtime(), reason, &pretenureTypes);
  1.4821 +    for (size_t i = 0; i < pretenureTypes.length(); i++) {
  1.4822 +        if (pretenureTypes[i]->canPreTenure())
  1.4823 +            pretenureTypes[i]->setShouldPreTenure(cx);
  1.4824 +    }
  1.4825 +    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gcNursery.isEmpty());
  1.4826 +#endif
  1.4827 +}
  1.4828 +
  1.4829 +void
  1.4830 +js::gc::GCIfNeeded(JSContext *cx)
  1.4831 +{
  1.4832 +    JSRuntime *rt = cx->runtime();
  1.4833 +
  1.4834 +#ifdef JSGC_GENERATIONAL
  1.4835 +    /*
  1.4836 +     * In case of store buffer overflow perform minor GC first so that the
  1.4837 +     * correct reason is seen in the logs.
  1.4838 +     */
  1.4839 +    if (rt->gcStoreBuffer.isAboutToOverflow())
  1.4840 +        MinorGC(cx, JS::gcreason::FULL_STORE_BUFFER);
  1.4841 +#endif
  1.4842 +
  1.4843 +    if (rt->gcIsNeeded)
  1.4844 +        GCSlice(rt, GC_NORMAL, rt->gcTriggerReason);
  1.4845 +}
  1.4846 +
  1.4847 +void
  1.4848 +js::gc::FinishBackgroundFinalize(JSRuntime *rt)
  1.4849 +{
  1.4850 +    rt->gcHelperThread.waitBackgroundSweepEnd();
  1.4851 +}
  1.4852 +
  1.4853 +AutoFinishGC::AutoFinishGC(JSRuntime *rt)
  1.4854 +{
  1.4855 +    if (JS::IsIncrementalGCInProgress(rt)) {
  1.4856 +        JS::PrepareForIncrementalGC(rt);
  1.4857 +        JS::FinishIncrementalGC(rt, JS::gcreason::API);
  1.4858 +    }
  1.4859 +
  1.4860 +    gc::FinishBackgroundFinalize(rt);
  1.4861 +}
  1.4862 +
  1.4863 +AutoPrepareForTracing::AutoPrepareForTracing(JSRuntime *rt, ZoneSelector selector)
  1.4864 +  : finish(rt),
  1.4865 +    session(rt),
  1.4866 +    copy(rt, selector)
  1.4867 +{
  1.4868 +    RecordNativeStackTopForGC(rt);
  1.4869 +}
  1.4870 +
  1.4871 +JSCompartment *
  1.4872 +js::NewCompartment(JSContext *cx, Zone *zone, JSPrincipals *principals,
  1.4873 +                   const JS::CompartmentOptions &options)
  1.4874 +{
  1.4875 +    JSRuntime *rt = cx->runtime();
  1.4876 +    JS_AbortIfWrongThread(rt);
  1.4877 +
  1.4878 +    ScopedJSDeletePtr<Zone> zoneHolder;
  1.4879 +    if (!zone) {
  1.4880 +        zone = cx->new_<Zone>(rt);
  1.4881 +        if (!zone)
  1.4882 +            return nullptr;
  1.4883 +
  1.4884 +        zoneHolder.reset(zone);
  1.4885 +
  1.4886 +        zone->setGCLastBytes(8192, GC_NORMAL);
  1.4887 +
  1.4888 +        const JSPrincipals *trusted = rt->trustedPrincipals();
  1.4889 +        zone->isSystem = principals && principals == trusted;
  1.4890 +    }
  1.4891 +
  1.4892 +    ScopedJSDeletePtr<JSCompartment> compartment(cx->new_<JSCompartment>(zone, options));
  1.4893 +    if (!compartment || !compartment->init(cx))
  1.4894 +        return nullptr;
  1.4895 +
  1.4896 +    // Set up the principals.
  1.4897 +    JS_SetCompartmentPrincipals(compartment, principals);
  1.4898 +
  1.4899 +    AutoLockGC lock(rt);
  1.4900 +
  1.4901 +    if (!zone->compartments.append(compartment.get())) {
  1.4902 +        js_ReportOutOfMemory(cx);
  1.4903 +        return nullptr;
  1.4904 +    }
  1.4905 +
  1.4906 +    if (zoneHolder && !rt->zones.append(zone)) {
  1.4907 +        js_ReportOutOfMemory(cx);
  1.4908 +        return nullptr;
  1.4909 +    }
  1.4910 +
  1.4911 +    zoneHolder.forget();
  1.4912 +    return compartment.forget();
  1.4913 +}
  1.4914 +
  1.4915 +void
  1.4916 +gc::MergeCompartments(JSCompartment *source, JSCompartment *target)
  1.4917 +{
  1.4918 +    // The source compartment must be specifically flagged as mergable.  This
  1.4919 +    // also implies that the compartment is not visible to the debugger.
  1.4920 +    JS_ASSERT(source->options_.mergeable());
  1.4921 +
  1.4922 +    JSRuntime *rt = source->runtimeFromMainThread();
  1.4923 +
  1.4924 +    AutoPrepareForTracing prepare(rt, SkipAtoms);
  1.4925 +
  1.4926 +    // Cleanup tables and other state in the source compartment that will be
  1.4927 +    // meaningless after merging into the target compartment.
  1.4928 +
  1.4929 +    source->clearTables();
  1.4930 +
  1.4931 +    // Fixup compartment pointers in source to refer to target.
  1.4932 +
  1.4933 +    for (CellIter iter(source->zone(), FINALIZE_SCRIPT); !iter.done(); iter.next()) {
  1.4934 +        JSScript *script = iter.get<JSScript>();
  1.4935 +        JS_ASSERT(script->compartment() == source);
  1.4936 +        script->compartment_ = target;
  1.4937 +    }
  1.4938 +
  1.4939 +    for (CellIter iter(source->zone(), FINALIZE_BASE_SHAPE); !iter.done(); iter.next()) {
  1.4940 +        BaseShape *base = iter.get<BaseShape>();
  1.4941 +        JS_ASSERT(base->compartment() == source);
  1.4942 +        base->compartment_ = target;
  1.4943 +    }
  1.4944 +
  1.4945 +    // Fixup zone pointers in source's zone to refer to target's zone.
  1.4946 +
  1.4947 +    for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) {
  1.4948 +        for (ArenaIter aiter(source->zone(), AllocKind(thingKind)); !aiter.done(); aiter.next()) {
  1.4949 +            ArenaHeader *aheader = aiter.get();
  1.4950 +            aheader->zone = target->zone();
  1.4951 +        }
  1.4952 +    }
  1.4953 +
  1.4954 +    // The source should be the only compartment in its zone.
  1.4955 +    for (CompartmentsInZoneIter c(source->zone()); !c.done(); c.next())
  1.4956 +        JS_ASSERT(c.get() == source);
  1.4957 +
  1.4958 +    // Merge the allocator in source's zone into target's zone.
  1.4959 +    target->zone()->allocator.arenas.adoptArenas(rt, &source->zone()->allocator.arenas);
  1.4960 +    target->zone()->gcBytes += source->zone()->gcBytes;
  1.4961 +    source->zone()->gcBytes = 0;
  1.4962 +
  1.4963 +    // Merge other info in source's zone into target's zone.
  1.4964 +    target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc);
  1.4965 +}
  1.4966 +
  1.4967 +void
  1.4968 +gc::RunDebugGC(JSContext *cx)
  1.4969 +{
  1.4970 +#ifdef JS_GC_ZEAL
  1.4971 +    JSRuntime *rt = cx->runtime();
  1.4972 +    int type = rt->gcZeal();
  1.4973 +
  1.4974 +    if (rt->mainThread.suppressGC)
  1.4975 +        return;
  1.4976 +
  1.4977 +    if (type == js::gc::ZealGenerationalGCValue)
  1.4978 +        return MinorGC(rt, JS::gcreason::DEBUG_GC);
  1.4979 +
  1.4980 +    PrepareForDebugGC(cx->runtime());
  1.4981 +
  1.4982 +    if (type == ZealIncrementalRootsThenFinish ||
  1.4983 +        type == ZealIncrementalMarkAllThenFinish ||
  1.4984 +        type == ZealIncrementalMultipleSlices)
  1.4985 +    {
  1.4986 +        js::gc::State initialState = rt->gcIncrementalState;
  1.4987 +        int64_t budget;
  1.4988 +        if (type == ZealIncrementalMultipleSlices) {
  1.4989 +            /*
  1.4990 +             * Start with a small slice limit and double it every slice. This
  1.4991 +             * ensure that we get multiple slices, and collection runs to
  1.4992 +             * completion.
  1.4993 +             */
  1.4994 +            if (initialState == NO_INCREMENTAL)
  1.4995 +                rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
  1.4996 +            else
  1.4997 +                rt->gcIncrementalLimit *= 2;
  1.4998 +            budget = SliceBudget::WorkBudget(rt->gcIncrementalLimit);
  1.4999 +        } else {
  1.5000 +            // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
  1.5001 +            budget = SliceBudget::WorkBudget(1);
  1.5002 +        }
  1.5003 +
  1.5004 +        Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
  1.5005 +
  1.5006 +        /*
  1.5007 +         * For multi-slice zeal, reset the slice size when we get to the sweep
  1.5008 +         * phase.
  1.5009 +         */
  1.5010 +        if (type == ZealIncrementalMultipleSlices &&
  1.5011 +            initialState == MARK && rt->gcIncrementalState == SWEEP)
  1.5012 +        {
  1.5013 +            rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
  1.5014 +        }
  1.5015 +    } else {
  1.5016 +        Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, JS::gcreason::DEBUG_GC);
  1.5017 +    }
  1.5018 +
  1.5019 +#endif
  1.5020 +}
  1.5021 +
  1.5022 +void
  1.5023 +gc::SetDeterministicGC(JSContext *cx, bool enabled)
  1.5024 +{
  1.5025 +#ifdef JS_GC_ZEAL
  1.5026 +    JSRuntime *rt = cx->runtime();
  1.5027 +    rt->gcDeterministicOnly = enabled;
  1.5028 +#endif
  1.5029 +}
  1.5030 +
  1.5031 +void
  1.5032 +gc::SetValidateGC(JSContext *cx, bool enabled)
  1.5033 +{
  1.5034 +    JSRuntime *rt = cx->runtime();
  1.5035 +    rt->gcValidate = enabled;
  1.5036 +}
  1.5037 +
  1.5038 +void
  1.5039 +gc::SetFullCompartmentChecks(JSContext *cx, bool enabled)
  1.5040 +{
  1.5041 +    JSRuntime *rt = cx->runtime();
  1.5042 +    rt->gcFullCompartmentChecks = enabled;
  1.5043 +}
  1.5044 +
  1.5045 +#ifdef DEBUG
  1.5046 +
  1.5047 +/* Should only be called manually under gdb */
  1.5048 +void PreventGCDuringInteractiveDebug()
  1.5049 +{
  1.5050 +    TlsPerThreadData.get()->suppressGC++;
  1.5051 +}
  1.5052 +
  1.5053 +#endif
  1.5054 +
  1.5055 +void
  1.5056 +js::ReleaseAllJITCode(FreeOp *fop)
  1.5057 +{
  1.5058 +#ifdef JS_ION
  1.5059 +
  1.5060 +# ifdef JSGC_GENERATIONAL
  1.5061 +    /*
  1.5062 +     * Scripts can entrain nursery things, inserting references to the script
  1.5063 +     * into the store buffer. Clear the store buffer before discarding scripts.
  1.5064 +     */
  1.5065 +    MinorGC(fop->runtime(), JS::gcreason::EVICT_NURSERY);
  1.5066 +# endif
  1.5067 +
  1.5068 +    for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) {
  1.5069 +        if (!zone->jitZone())
  1.5070 +            continue;
  1.5071 +
  1.5072 +# ifdef DEBUG
  1.5073 +        /* Assert no baseline scripts are marked as active. */
  1.5074 +        for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
  1.5075 +            JSScript *script = i.get<JSScript>();
  1.5076 +            JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
  1.5077 +        }
  1.5078 +# endif
  1.5079 +
  1.5080 +        /* Mark baseline scripts on the stack as active. */
  1.5081 +        jit::MarkActiveBaselineScripts(zone);
  1.5082 +
  1.5083 +        jit::InvalidateAll(fop, zone);
  1.5084 +
  1.5085 +        for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
  1.5086 +            JSScript *script = i.get<JSScript>();
  1.5087 +            jit::FinishInvalidation<SequentialExecution>(fop, script);
  1.5088 +            jit::FinishInvalidation<ParallelExecution>(fop, script);
  1.5089 +
  1.5090 +            /*
  1.5091 +             * Discard baseline script if it's not marked as active. Note that
  1.5092 +             * this also resets the active flag.
  1.5093 +             */
  1.5094 +            jit::FinishDiscardBaselineScript(fop, script);
  1.5095 +        }
  1.5096 +
  1.5097 +        zone->jitZone()->optimizedStubSpace()->free();
  1.5098 +    }
  1.5099 +#endif
  1.5100 +}
  1.5101 +
  1.5102 +/*
  1.5103 + * There are three possible PCCount profiling states:
  1.5104 + *
  1.5105 + * 1. None: Neither scripts nor the runtime have count information.
  1.5106 + * 2. Profile: Active scripts have count information, the runtime does not.
  1.5107 + * 3. Query: Scripts do not have count information, the runtime does.
  1.5108 + *
  1.5109 + * When starting to profile scripts, counting begins immediately, with all JIT
  1.5110 + * code discarded and recompiled with counts as necessary. Active interpreter
  1.5111 + * frames will not begin profiling until they begin executing another script
  1.5112 + * (via a call or return).
  1.5113 + *
  1.5114 + * The below API functions manage transitions to new states, according
  1.5115 + * to the table below.
  1.5116 + *
  1.5117 + *                                  Old State
  1.5118 + *                          -------------------------
  1.5119 + * Function                 None      Profile   Query
  1.5120 + * --------
  1.5121 + * StartPCCountProfiling    Profile   Profile   Profile
  1.5122 + * StopPCCountProfiling     None      Query     Query
  1.5123 + * PurgePCCounts            None      None      None
  1.5124 + */
  1.5125 +
  1.5126 +static void
  1.5127 +ReleaseScriptCounts(FreeOp *fop)
  1.5128 +{
  1.5129 +    JSRuntime *rt = fop->runtime();
  1.5130 +    JS_ASSERT(rt->scriptAndCountsVector);
  1.5131 +
  1.5132 +    ScriptAndCountsVector &vec = *rt->scriptAndCountsVector;
  1.5133 +
  1.5134 +    for (size_t i = 0; i < vec.length(); i++)
  1.5135 +        vec[i].scriptCounts.destroy(fop);
  1.5136 +
  1.5137 +    fop->delete_(rt->scriptAndCountsVector);
  1.5138 +    rt->scriptAndCountsVector = nullptr;
  1.5139 +}
  1.5140 +
  1.5141 +JS_FRIEND_API(void)
  1.5142 +js::StartPCCountProfiling(JSContext *cx)
  1.5143 +{
  1.5144 +    JSRuntime *rt = cx->runtime();
  1.5145 +
  1.5146 +    if (rt->profilingScripts)
  1.5147 +        return;
  1.5148 +
  1.5149 +    if (rt->scriptAndCountsVector)
  1.5150 +        ReleaseScriptCounts(rt->defaultFreeOp());
  1.5151 +
  1.5152 +    ReleaseAllJITCode(rt->defaultFreeOp());
  1.5153 +
  1.5154 +    rt->profilingScripts = true;
  1.5155 +}
  1.5156 +
  1.5157 +JS_FRIEND_API(void)
  1.5158 +js::StopPCCountProfiling(JSContext *cx)
  1.5159 +{
  1.5160 +    JSRuntime *rt = cx->runtime();
  1.5161 +
  1.5162 +    if (!rt->profilingScripts)
  1.5163 +        return;
  1.5164 +    JS_ASSERT(!rt->scriptAndCountsVector);
  1.5165 +
  1.5166 +    ReleaseAllJITCode(rt->defaultFreeOp());
  1.5167 +
  1.5168 +    ScriptAndCountsVector *vec = cx->new_<ScriptAndCountsVector>(SystemAllocPolicy());
  1.5169 +    if (!vec)
  1.5170 +        return;
  1.5171 +
  1.5172 +    for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
  1.5173 +        for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
  1.5174 +            JSScript *script = i.get<JSScript>();
  1.5175 +            if (script->hasScriptCounts() && script->types) {
  1.5176 +                ScriptAndCounts sac;
  1.5177 +                sac.script = script;
  1.5178 +                sac.scriptCounts.set(script->releaseScriptCounts());
  1.5179 +                if (!vec->append(sac))
  1.5180 +                    sac.scriptCounts.destroy(rt->defaultFreeOp());
  1.5181 +            }
  1.5182 +        }
  1.5183 +    }
  1.5184 +
  1.5185 +    rt->profilingScripts = false;
  1.5186 +    rt->scriptAndCountsVector = vec;
  1.5187 +}
  1.5188 +
  1.5189 +JS_FRIEND_API(void)
  1.5190 +js::PurgePCCounts(JSContext *cx)
  1.5191 +{
  1.5192 +    JSRuntime *rt = cx->runtime();
  1.5193 +
  1.5194 +    if (!rt->scriptAndCountsVector)
  1.5195 +        return;
  1.5196 +    JS_ASSERT(!rt->profilingScripts);
  1.5197 +
  1.5198 +    ReleaseScriptCounts(rt->defaultFreeOp());
  1.5199 +}
  1.5200 +
  1.5201 +void
  1.5202 +js::PurgeJITCaches(Zone *zone)
  1.5203 +{
  1.5204 +#ifdef JS_ION
  1.5205 +    for (CellIterUnderGC i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) {
  1.5206 +        JSScript *script = i.get<JSScript>();
  1.5207 +
  1.5208 +        /* Discard Ion caches. */
  1.5209 +        jit::PurgeCaches(script);
  1.5210 +    }
  1.5211 +#endif
  1.5212 +}
  1.5213 +
  1.5214 +void
  1.5215 +ArenaLists::normalizeBackgroundFinalizeState(AllocKind thingKind)
  1.5216 +{
  1.5217 +    volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind];
  1.5218 +    switch (*bfs) {
  1.5219 +      case BFS_DONE:
  1.5220 +        break;
  1.5221 +      case BFS_JUST_FINISHED:
  1.5222 +        // No allocations between end of last sweep and now.
  1.5223 +        // Transfering over arenas is a kind of allocation.
  1.5224 +        *bfs = BFS_DONE;
  1.5225 +        break;
  1.5226 +      default:
  1.5227 +        JS_ASSERT(!"Background finalization in progress, but it should not be.");
  1.5228 +        break;
  1.5229 +    }
  1.5230 +}
  1.5231 +
  1.5232 +void
  1.5233 +ArenaLists::adoptArenas(JSRuntime *rt, ArenaLists *fromArenaLists)
  1.5234 +{
  1.5235 +    // The other parallel threads have all completed now, and GC
  1.5236 +    // should be inactive, but still take the lock as a kind of read
  1.5237 +    // fence.
  1.5238 +    AutoLockGC lock(rt);
  1.5239 +
  1.5240 +    fromArenaLists->purge();
  1.5241 +
  1.5242 +    for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) {
  1.5243 +#ifdef JS_THREADSAFE
  1.5244 +        // When we enter a parallel section, we join the background
  1.5245 +        // thread, and we do not run GC while in the parallel section,
  1.5246 +        // so no finalizer should be active!
  1.5247 +        normalizeBackgroundFinalizeState(AllocKind(thingKind));
  1.5248 +        fromArenaLists->normalizeBackgroundFinalizeState(AllocKind(thingKind));
  1.5249 +#endif
  1.5250 +        ArenaList *fromList = &fromArenaLists->arenaLists[thingKind];
  1.5251 +        ArenaList *toList = &arenaLists[thingKind];
  1.5252 +        while (fromList->head != nullptr) {
  1.5253 +            // Remove entry from |fromList|
  1.5254 +            ArenaHeader *fromHeader = fromList->head;
  1.5255 +            fromList->head = fromHeader->next;
  1.5256 +            fromHeader->next = nullptr;
  1.5257 +
  1.5258 +            // During parallel execution, we sometimes keep empty arenas
  1.5259 +            // on the lists rather than sending them back to the chunk.
  1.5260 +            // Therefore, if fromHeader is empty, send it back to the
  1.5261 +            // chunk now. Otherwise, attach to |toList|.
  1.5262 +            if (fromHeader->isEmpty())
  1.5263 +                fromHeader->chunk()->releaseArena(fromHeader);
  1.5264 +            else
  1.5265 +                toList->insert(fromHeader);
  1.5266 +        }
  1.5267 +        fromList->cursor = &fromList->head;
  1.5268 +    }
  1.5269 +}
  1.5270 +
  1.5271 +bool
  1.5272 +ArenaLists::containsArena(JSRuntime *rt, ArenaHeader *needle)
  1.5273 +{
  1.5274 +    AutoLockGC lock(rt);
  1.5275 +    size_t allocKind = needle->getAllocKind();
  1.5276 +    for (ArenaHeader *aheader = arenaLists[allocKind].head;
  1.5277 +         aheader != nullptr;
  1.5278 +         aheader = aheader->next)
  1.5279 +    {
  1.5280 +        if (aheader == needle)
  1.5281 +            return true;
  1.5282 +    }
  1.5283 +    return false;
  1.5284 +}
  1.5285 +
  1.5286 +
  1.5287 +AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSContext *cx)
  1.5288 +  : runtime(cx->runtime()),
  1.5289 +    markCount(runtime->gcObjectsMarkedInDeadZones),
  1.5290 +    inIncremental(JS::IsIncrementalGCInProgress(runtime)),
  1.5291 +    manipulatingDeadZones(runtime->gcManipulatingDeadZones)
  1.5292 +{
  1.5293 +    runtime->gcManipulatingDeadZones = true;
  1.5294 +}
  1.5295 +
  1.5296 +AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSObject *obj)
  1.5297 +  : runtime(obj->compartment()->runtimeFromMainThread()),
  1.5298 +    markCount(runtime->gcObjectsMarkedInDeadZones),
  1.5299 +    inIncremental(JS::IsIncrementalGCInProgress(runtime)),
  1.5300 +    manipulatingDeadZones(runtime->gcManipulatingDeadZones)
  1.5301 +{
  1.5302 +    runtime->gcManipulatingDeadZones = true;
  1.5303 +}
  1.5304 +
  1.5305 +AutoMaybeTouchDeadZones::~AutoMaybeTouchDeadZones()
  1.5306 +{
  1.5307 +    runtime->gcManipulatingDeadZones = manipulatingDeadZones;
  1.5308 +
  1.5309 +    if (inIncremental && runtime->gcObjectsMarkedInDeadZones != markCount) {
  1.5310 +        JS::PrepareForFullGC(runtime);
  1.5311 +        js::GC(runtime, GC_NORMAL, JS::gcreason::TRANSPLANT);
  1.5312 +    }
  1.5313 +}
  1.5314 +
  1.5315 +AutoSuppressGC::AutoSuppressGC(ExclusiveContext *cx)
  1.5316 +  : suppressGC_(cx->perThreadData->suppressGC)
  1.5317 +{
  1.5318 +    suppressGC_++;
  1.5319 +}
  1.5320 +
  1.5321 +AutoSuppressGC::AutoSuppressGC(JSCompartment *comp)
  1.5322 +  : suppressGC_(comp->runtimeFromMainThread()->mainThread.suppressGC)
  1.5323 +{
  1.5324 +    suppressGC_++;
  1.5325 +}
  1.5326 +
  1.5327 +AutoSuppressGC::AutoSuppressGC(JSRuntime *rt)
  1.5328 +  : suppressGC_(rt->mainThread.suppressGC)
  1.5329 +{
  1.5330 +    suppressGC_++;
  1.5331 +}
  1.5332 +
  1.5333 +bool
  1.5334 +js::UninlinedIsInsideNursery(JSRuntime *rt, const void *thing)
  1.5335 +{
  1.5336 +    return IsInsideNursery(rt, thing);
  1.5337 +}
  1.5338 +
  1.5339 +#ifdef DEBUG
  1.5340 +AutoDisableProxyCheck::AutoDisableProxyCheck(JSRuntime *rt
  1.5341 +                                             MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
  1.5342 +  : count(rt->gcDisableStrictProxyCheckingCount)
  1.5343 +{
  1.5344 +    MOZ_GUARD_OBJECT_NOTIFIER_INIT;
  1.5345 +    count++;
  1.5346 +}
  1.5347 +
  1.5348 +JS_FRIEND_API(void)
  1.5349 +JS::AssertGCThingMustBeTenured(JSObject *obj)
  1.5350 +{
  1.5351 +    JS_ASSERT((!IsNurseryAllocable(obj->tenuredGetAllocKind()) || obj->getClass()->finalize) &&
  1.5352 +              obj->isTenured());
  1.5353 +}
  1.5354 +
  1.5355 +JS_FRIEND_API(size_t)
  1.5356 +JS::GetGCNumber()
  1.5357 +{
  1.5358 +    JSRuntime *rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
  1.5359 +    if (!rt)
  1.5360 +        return 0;
  1.5361 +    return rt->gcNumber;
  1.5362 +}
  1.5363 +
  1.5364 +JS::AutoAssertNoGC::AutoAssertNoGC()
  1.5365 +  : runtime(nullptr), gcNumber(0)
  1.5366 +{
  1.5367 +    js::PerThreadData *data = js::TlsPerThreadData.get();
  1.5368 +    if (data) {
  1.5369 +        /*
  1.5370 +         * GC's from off-thread will always assert, so off-thread is implicitly
  1.5371 +         * AutoAssertNoGC. We still need to allow AutoAssertNoGC to be used in
  1.5372 +         * code that works from both threads, however. We also use this to
  1.5373 +         * annotate the off thread run loops.
  1.5374 +         */
  1.5375 +        runtime = data->runtimeIfOnOwnerThread();
  1.5376 +        if (runtime)
  1.5377 +            gcNumber = runtime->gcNumber;
  1.5378 +    }
  1.5379 +}
  1.5380 +
  1.5381 +JS::AutoAssertNoGC::AutoAssertNoGC(JSRuntime *rt)
  1.5382 +  : runtime(rt), gcNumber(rt->gcNumber)
  1.5383 +{
  1.5384 +}
  1.5385 +
  1.5386 +JS::AutoAssertNoGC::~AutoAssertNoGC()
  1.5387 +{
  1.5388 +    if (runtime)
  1.5389 +        MOZ_ASSERT(gcNumber == runtime->gcNumber, "GC ran inside an AutoAssertNoGC scope.");
  1.5390 +}
  1.5391 +#endif

mercurial