michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: /* michael@0: * This code implements an incremental mark-and-sweep garbage collector, with michael@0: * most sweeping carried out in the background on a parallel thread. michael@0: * michael@0: * Full vs. zone GC michael@0: * ---------------- michael@0: * michael@0: * The collector can collect all zones at once, or a subset. These types of michael@0: * collection are referred to as a full GC and a zone GC respectively. michael@0: * michael@0: * The atoms zone is only collected in a full GC since objects in any zone may michael@0: * have pointers to atoms, and these are not recorded in the cross compartment michael@0: * pointer map. Also, the atoms zone is not collected if any thread has an michael@0: * AutoKeepAtoms instance on the stack, or there are any exclusive threads using michael@0: * the runtime. michael@0: * michael@0: * It is possible for an incremental collection that started out as a full GC to michael@0: * become a zone GC if new zones are created during the course of the michael@0: * collection. michael@0: * michael@0: * Incremental collection michael@0: * ---------------------- michael@0: * michael@0: * For a collection to be carried out incrementally the following conditions michael@0: * must be met: michael@0: * - the collection must be run by calling js::GCSlice() rather than js::GC() michael@0: * - the GC mode must have been set to JSGC_MODE_INCREMENTAL with michael@0: * JS_SetGCParameter() michael@0: * - no thread may have an AutoKeepAtoms instance on the stack michael@0: * - all native objects that have their own trace hook must indicate that they michael@0: * implement read and write barriers with the JSCLASS_IMPLEMENTS_BARRIERS michael@0: * flag michael@0: * michael@0: * The last condition is an engine-internal mechanism to ensure that incremental michael@0: * collection is not carried out without the correct barriers being implemented. michael@0: * For more information see 'Incremental marking' below. michael@0: * michael@0: * If the collection is not incremental, all foreground activity happens inside michael@0: * a single call to GC() or GCSlice(). However the collection is not complete michael@0: * until the background sweeping activity has finished. michael@0: * michael@0: * An incremental collection proceeds as a series of slices, interleaved with michael@0: * mutator activity, i.e. running JavaScript code. Slices are limited by a time michael@0: * budget. The slice finishes as soon as possible after the requested time has michael@0: * passed. michael@0: * michael@0: * Collector states michael@0: * ---------------- michael@0: * michael@0: * The collector proceeds through the following states, the current state being michael@0: * held in JSRuntime::gcIncrementalState: michael@0: * michael@0: * - MARK_ROOTS - marks the stack and other roots michael@0: * - MARK - incrementally marks reachable things michael@0: * - SWEEP - sweeps zones in groups and continues marking unswept zones michael@0: * michael@0: * The MARK_ROOTS activity always takes place in the first slice. The next two michael@0: * states can take place over one or more slices. michael@0: * michael@0: * In other words an incremental collection proceeds like this: michael@0: * michael@0: * Slice 1: MARK_ROOTS: Roots pushed onto the mark stack. michael@0: * MARK: The mark stack is processed by popping an element, michael@0: * marking it, and pushing its children. michael@0: * michael@0: * ... JS code runs ... michael@0: * michael@0: * Slice 2: MARK: More mark stack processing. michael@0: * michael@0: * ... JS code runs ... michael@0: * michael@0: * Slice n-1: MARK: More mark stack processing. michael@0: * michael@0: * ... JS code runs ... michael@0: * michael@0: * Slice n: MARK: Mark stack is completely drained. michael@0: * SWEEP: Select first group of zones to sweep and sweep them. michael@0: * michael@0: * ... JS code runs ... michael@0: * michael@0: * Slice n+1: SWEEP: Mark objects in unswept zones that were newly michael@0: * identified as alive (see below). Then sweep more zone michael@0: * groups. michael@0: * michael@0: * ... JS code runs ... michael@0: * michael@0: * Slice n+2: SWEEP: Mark objects in unswept zones that were newly michael@0: * identified as alive. Then sweep more zone groups. michael@0: * michael@0: * ... JS code runs ... michael@0: * michael@0: * Slice m: SWEEP: Sweeping is finished, and background sweeping michael@0: * started on the helper thread. michael@0: * michael@0: * ... JS code runs, remaining sweeping done on background thread ... michael@0: * michael@0: * When background sweeping finishes the GC is complete. michael@0: * michael@0: * Incremental marking michael@0: * ------------------- michael@0: * michael@0: * Incremental collection requires close collaboration with the mutator (i.e., michael@0: * JS code) to guarantee correctness. michael@0: * michael@0: * - During an incremental GC, if a memory location (except a root) is written michael@0: * to, then the value it previously held must be marked. Write barriers michael@0: * ensure this. michael@0: * michael@0: * - Any object that is allocated during incremental GC must start out marked. michael@0: * michael@0: * - Roots are marked in the first slice and hence don't need write barriers. michael@0: * Roots are things like the C stack and the VM stack. michael@0: * michael@0: * The problem that write barriers solve is that between slices the mutator can michael@0: * change the object graph. We must ensure that it cannot do this in such a way michael@0: * that makes us fail to mark a reachable object (marking an unreachable object michael@0: * is tolerable). michael@0: * michael@0: * We use a snapshot-at-the-beginning algorithm to do this. This means that we michael@0: * promise to mark at least everything that is reachable at the beginning of michael@0: * collection. To implement it we mark the old contents of every non-root memory michael@0: * location written to by the mutator while the collection is in progress, using michael@0: * write barriers. This is described in gc/Barrier.h. michael@0: * michael@0: * Incremental sweeping michael@0: * -------------------- michael@0: * michael@0: * Sweeping is difficult to do incrementally because object finalizers must be michael@0: * run at the start of sweeping, before any mutator code runs. The reason is michael@0: * that some objects use their finalizers to remove themselves from caches. If michael@0: * mutator code was allowed to run after the start of sweeping, it could observe michael@0: * the state of the cache and create a new reference to an object that was just michael@0: * about to be destroyed. michael@0: * michael@0: * Sweeping all finalizable objects in one go would introduce long pauses, so michael@0: * instead sweeping broken up into groups of zones. Zones which are not yet michael@0: * being swept are still marked, so the issue above does not apply. michael@0: * michael@0: * The order of sweeping is restricted by cross compartment pointers - for michael@0: * example say that object |a| from zone A points to object |b| in zone B and michael@0: * neither object was marked when we transitioned to the SWEEP phase. Imagine we michael@0: * sweep B first and then return to the mutator. It's possible that the mutator michael@0: * could cause |a| to become alive through a read barrier (perhaps it was a michael@0: * shape that was accessed via a shape table). Then we would need to mark |b|, michael@0: * which |a| points to, but |b| has already been swept. michael@0: * michael@0: * So if there is such a pointer then marking of zone B must not finish before michael@0: * marking of zone A. Pointers which form a cycle between zones therefore michael@0: * restrict those zones to being swept at the same time, and these are found michael@0: * using Tarjan's algorithm for finding the strongly connected components of a michael@0: * graph. michael@0: * michael@0: * GC things without finalizers, and things with finalizers that are able to run michael@0: * in the background, are swept on the background thread. This accounts for most michael@0: * of the sweeping work. michael@0: * michael@0: * Reset michael@0: * ----- michael@0: * michael@0: * During incremental collection it is possible, although unlikely, for michael@0: * conditions to change such that incremental collection is no longer safe. In michael@0: * this case, the collection is 'reset' by ResetIncrementalGC(). If we are in michael@0: * the mark state, this just stops marking, but if we have started sweeping michael@0: * already, we continue until we have swept the current zone group. Following a michael@0: * reset, a new non-incremental collection is started. michael@0: */ michael@0: michael@0: #include "jsgcinlines.h" michael@0: michael@0: #include "mozilla/ArrayUtils.h" michael@0: #include "mozilla/DebugOnly.h" michael@0: #include "mozilla/MemoryReporting.h" michael@0: #include "mozilla/Move.h" michael@0: michael@0: #include /* for memset used when DEBUG */ michael@0: #ifndef XP_WIN michael@0: # include michael@0: #endif michael@0: michael@0: #include "jsapi.h" michael@0: #include "jsatom.h" michael@0: #include "jscntxt.h" michael@0: #include "jscompartment.h" michael@0: #include "jsobj.h" michael@0: #include "jsscript.h" michael@0: #include "jstypes.h" michael@0: #include "jsutil.h" michael@0: #include "jswatchpoint.h" michael@0: #include "jsweakmap.h" michael@0: #ifdef XP_WIN michael@0: # include "jswin.h" michael@0: #endif michael@0: #include "prmjtime.h" michael@0: michael@0: #include "gc/FindSCCs.h" michael@0: #include "gc/GCInternals.h" michael@0: #include "gc/Marking.h" michael@0: #include "gc/Memory.h" michael@0: #ifdef JS_ION michael@0: # include "jit/BaselineJIT.h" michael@0: #endif michael@0: #include "jit/IonCode.h" michael@0: #include "js/SliceBudget.h" michael@0: #include "vm/Debugger.h" michael@0: #include "vm/ForkJoin.h" michael@0: #include "vm/ProxyObject.h" michael@0: #include "vm/Shape.h" michael@0: #include "vm/String.h" michael@0: #include "vm/TraceLogging.h" michael@0: #include "vm/WrapperObject.h" michael@0: michael@0: #include "jsobjinlines.h" michael@0: #include "jsscriptinlines.h" michael@0: michael@0: #include "vm/Stack-inl.h" michael@0: #include "vm/String-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::gc; michael@0: michael@0: using mozilla::ArrayEnd; michael@0: using mozilla::DebugOnly; michael@0: using mozilla::Maybe; michael@0: using mozilla::Swap; michael@0: michael@0: /* Perform a Full GC every 20 seconds if MaybeGC is called */ michael@0: static const uint64_t GC_IDLE_FULL_SPAN = 20 * 1000 * 1000; michael@0: michael@0: /* Increase the IGC marking slice time if we are in highFrequencyGC mode. */ michael@0: static const int IGC_MARK_SLICE_MULTIPLIER = 2; michael@0: michael@0: #if defined(ANDROID) || defined(MOZ_B2G) michael@0: static const int MAX_EMPTY_CHUNK_COUNT = 2; michael@0: #else michael@0: static const int MAX_EMPTY_CHUNK_COUNT = 30; michael@0: #endif michael@0: michael@0: /* This array should be const, but that doesn't link right under GCC. */ michael@0: const AllocKind gc::slotsToThingKind[] = { michael@0: /* 0 */ FINALIZE_OBJECT0, FINALIZE_OBJECT2, FINALIZE_OBJECT2, FINALIZE_OBJECT4, michael@0: /* 4 */ FINALIZE_OBJECT4, FINALIZE_OBJECT8, FINALIZE_OBJECT8, FINALIZE_OBJECT8, michael@0: /* 8 */ FINALIZE_OBJECT8, FINALIZE_OBJECT12, FINALIZE_OBJECT12, FINALIZE_OBJECT12, michael@0: /* 12 */ FINALIZE_OBJECT12, FINALIZE_OBJECT16, FINALIZE_OBJECT16, FINALIZE_OBJECT16, michael@0: /* 16 */ FINALIZE_OBJECT16 michael@0: }; michael@0: michael@0: static_assert(JS_ARRAY_LENGTH(slotsToThingKind) == SLOTS_TO_THING_KIND_LIMIT, michael@0: "We have defined a slot count for each kind."); michael@0: michael@0: const uint32_t Arena::ThingSizes[] = { michael@0: sizeof(JSObject), /* FINALIZE_OBJECT0 */ michael@0: sizeof(JSObject), /* FINALIZE_OBJECT0_BACKGROUND */ michael@0: sizeof(JSObject_Slots2), /* FINALIZE_OBJECT2 */ michael@0: sizeof(JSObject_Slots2), /* FINALIZE_OBJECT2_BACKGROUND */ michael@0: sizeof(JSObject_Slots4), /* FINALIZE_OBJECT4 */ michael@0: sizeof(JSObject_Slots4), /* FINALIZE_OBJECT4_BACKGROUND */ michael@0: sizeof(JSObject_Slots8), /* FINALIZE_OBJECT8 */ michael@0: sizeof(JSObject_Slots8), /* FINALIZE_OBJECT8_BACKGROUND */ michael@0: sizeof(JSObject_Slots12), /* FINALIZE_OBJECT12 */ michael@0: sizeof(JSObject_Slots12), /* FINALIZE_OBJECT12_BACKGROUND */ michael@0: sizeof(JSObject_Slots16), /* FINALIZE_OBJECT16 */ michael@0: sizeof(JSObject_Slots16), /* FINALIZE_OBJECT16_BACKGROUND */ michael@0: sizeof(JSScript), /* FINALIZE_SCRIPT */ michael@0: sizeof(LazyScript), /* FINALIZE_LAZY_SCRIPT */ michael@0: sizeof(Shape), /* FINALIZE_SHAPE */ michael@0: sizeof(BaseShape), /* FINALIZE_BASE_SHAPE */ michael@0: sizeof(types::TypeObject), /* FINALIZE_TYPE_OBJECT */ michael@0: sizeof(JSFatInlineString), /* FINALIZE_FAT_INLINE_STRING */ michael@0: sizeof(JSString), /* FINALIZE_STRING */ michael@0: sizeof(JSExternalString), /* FINALIZE_EXTERNAL_STRING */ michael@0: sizeof(jit::JitCode), /* FINALIZE_JITCODE */ michael@0: }; michael@0: michael@0: #define OFFSET(type) uint32_t(sizeof(ArenaHeader) + (ArenaSize - sizeof(ArenaHeader)) % sizeof(type)) michael@0: michael@0: const uint32_t Arena::FirstThingOffsets[] = { michael@0: OFFSET(JSObject), /* FINALIZE_OBJECT0 */ michael@0: OFFSET(JSObject), /* FINALIZE_OBJECT0_BACKGROUND */ michael@0: OFFSET(JSObject_Slots2), /* FINALIZE_OBJECT2 */ michael@0: OFFSET(JSObject_Slots2), /* FINALIZE_OBJECT2_BACKGROUND */ michael@0: OFFSET(JSObject_Slots4), /* FINALIZE_OBJECT4 */ michael@0: OFFSET(JSObject_Slots4), /* FINALIZE_OBJECT4_BACKGROUND */ michael@0: OFFSET(JSObject_Slots8), /* FINALIZE_OBJECT8 */ michael@0: OFFSET(JSObject_Slots8), /* FINALIZE_OBJECT8_BACKGROUND */ michael@0: OFFSET(JSObject_Slots12), /* FINALIZE_OBJECT12 */ michael@0: OFFSET(JSObject_Slots12), /* FINALIZE_OBJECT12_BACKGROUND */ michael@0: OFFSET(JSObject_Slots16), /* FINALIZE_OBJECT16 */ michael@0: OFFSET(JSObject_Slots16), /* FINALIZE_OBJECT16_BACKGROUND */ michael@0: OFFSET(JSScript), /* FINALIZE_SCRIPT */ michael@0: OFFSET(LazyScript), /* FINALIZE_LAZY_SCRIPT */ michael@0: OFFSET(Shape), /* FINALIZE_SHAPE */ michael@0: OFFSET(BaseShape), /* FINALIZE_BASE_SHAPE */ michael@0: OFFSET(types::TypeObject), /* FINALIZE_TYPE_OBJECT */ michael@0: OFFSET(JSFatInlineString), /* FINALIZE_FAT_INLINE_STRING */ michael@0: OFFSET(JSString), /* FINALIZE_STRING */ michael@0: OFFSET(JSExternalString), /* FINALIZE_EXTERNAL_STRING */ michael@0: OFFSET(jit::JitCode), /* FINALIZE_JITCODE */ michael@0: }; michael@0: michael@0: #undef OFFSET michael@0: michael@0: /* michael@0: * Finalization order for incrementally swept things. michael@0: */ michael@0: michael@0: static const AllocKind FinalizePhaseStrings[] = { michael@0: FINALIZE_EXTERNAL_STRING michael@0: }; michael@0: michael@0: static const AllocKind FinalizePhaseScripts[] = { michael@0: FINALIZE_SCRIPT, michael@0: FINALIZE_LAZY_SCRIPT michael@0: }; michael@0: michael@0: static const AllocKind FinalizePhaseJitCode[] = { michael@0: FINALIZE_JITCODE michael@0: }; michael@0: michael@0: static const AllocKind * const FinalizePhases[] = { michael@0: FinalizePhaseStrings, michael@0: FinalizePhaseScripts, michael@0: FinalizePhaseJitCode michael@0: }; michael@0: static const int FinalizePhaseCount = sizeof(FinalizePhases) / sizeof(AllocKind*); michael@0: michael@0: static const int FinalizePhaseLength[] = { michael@0: sizeof(FinalizePhaseStrings) / sizeof(AllocKind), michael@0: sizeof(FinalizePhaseScripts) / sizeof(AllocKind), michael@0: sizeof(FinalizePhaseJitCode) / sizeof(AllocKind) michael@0: }; michael@0: michael@0: static const gcstats::Phase FinalizePhaseStatsPhase[] = { michael@0: gcstats::PHASE_SWEEP_STRING, michael@0: gcstats::PHASE_SWEEP_SCRIPT, michael@0: gcstats::PHASE_SWEEP_JITCODE michael@0: }; michael@0: michael@0: /* michael@0: * Finalization order for things swept in the background. michael@0: */ michael@0: michael@0: static const AllocKind BackgroundPhaseObjects[] = { michael@0: FINALIZE_OBJECT0_BACKGROUND, michael@0: FINALIZE_OBJECT2_BACKGROUND, michael@0: FINALIZE_OBJECT4_BACKGROUND, michael@0: FINALIZE_OBJECT8_BACKGROUND, michael@0: FINALIZE_OBJECT12_BACKGROUND, michael@0: FINALIZE_OBJECT16_BACKGROUND michael@0: }; michael@0: michael@0: static const AllocKind BackgroundPhaseStrings[] = { michael@0: FINALIZE_FAT_INLINE_STRING, michael@0: FINALIZE_STRING michael@0: }; michael@0: michael@0: static const AllocKind BackgroundPhaseShapes[] = { michael@0: FINALIZE_SHAPE, michael@0: FINALIZE_BASE_SHAPE, michael@0: FINALIZE_TYPE_OBJECT michael@0: }; michael@0: michael@0: static const AllocKind * const BackgroundPhases[] = { michael@0: BackgroundPhaseObjects, michael@0: BackgroundPhaseStrings, michael@0: BackgroundPhaseShapes michael@0: }; michael@0: static const int BackgroundPhaseCount = sizeof(BackgroundPhases) / sizeof(AllocKind*); michael@0: michael@0: static const int BackgroundPhaseLength[] = { michael@0: sizeof(BackgroundPhaseObjects) / sizeof(AllocKind), michael@0: sizeof(BackgroundPhaseStrings) / sizeof(AllocKind), michael@0: sizeof(BackgroundPhaseShapes) / sizeof(AllocKind) michael@0: }; michael@0: michael@0: #ifdef DEBUG michael@0: void michael@0: ArenaHeader::checkSynchronizedWithFreeList() const michael@0: { michael@0: /* michael@0: * Do not allow to access the free list when its real head is still stored michael@0: * in FreeLists and is not synchronized with this one. michael@0: */ michael@0: JS_ASSERT(allocated()); michael@0: michael@0: /* michael@0: * We can be called from the background finalization thread when the free michael@0: * list in the zone can mutate at any moment. We cannot do any michael@0: * checks in this case. michael@0: */ michael@0: if (IsBackgroundFinalized(getAllocKind()) && zone->runtimeFromAnyThread()->gcHelperThread.onBackgroundThread()) michael@0: return; michael@0: michael@0: FreeSpan firstSpan = FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets); michael@0: if (firstSpan.isEmpty()) michael@0: return; michael@0: const FreeSpan *list = zone->allocator.arenas.getFreeList(getAllocKind()); michael@0: if (list->isEmpty() || firstSpan.arenaAddress() != list->arenaAddress()) michael@0: return; michael@0: michael@0: /* michael@0: * Here this arena has free things, FreeList::lists[thingKind] is not michael@0: * empty and also points to this arena. Thus they must the same. michael@0: */ michael@0: JS_ASSERT(firstSpan.isSameNonEmptySpan(list)); michael@0: } michael@0: #endif michael@0: michael@0: /* static */ void michael@0: Arena::staticAsserts() michael@0: { michael@0: static_assert(JS_ARRAY_LENGTH(ThingSizes) == FINALIZE_LIMIT, "We have defined all thing sizes."); michael@0: static_assert(JS_ARRAY_LENGTH(FirstThingOffsets) == FINALIZE_LIMIT, "We have defined all offsets."); michael@0: } michael@0: michael@0: void michael@0: Arena::setAsFullyUnused(AllocKind thingKind) michael@0: { michael@0: FreeSpan entireList; michael@0: entireList.first = thingsStart(thingKind); michael@0: uintptr_t arenaAddr = aheader.arenaAddress(); michael@0: entireList.last = arenaAddr | ArenaMask; michael@0: aheader.setFirstFreeSpan(&entireList); michael@0: } michael@0: michael@0: template michael@0: inline bool michael@0: Arena::finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize) michael@0: { michael@0: /* Enforce requirements on size of T. */ michael@0: JS_ASSERT(thingSize % CellSize == 0); michael@0: JS_ASSERT(thingSize <= 255); michael@0: michael@0: JS_ASSERT(aheader.allocated()); michael@0: JS_ASSERT(thingKind == aheader.getAllocKind()); michael@0: JS_ASSERT(thingSize == aheader.getThingSize()); michael@0: JS_ASSERT(!aheader.hasDelayedMarking); michael@0: JS_ASSERT(!aheader.markOverflow); michael@0: JS_ASSERT(!aheader.allocatedDuringIncremental); michael@0: michael@0: uintptr_t thing = thingsStart(thingKind); michael@0: uintptr_t lastByte = thingsEnd() - 1; michael@0: michael@0: FreeSpan nextFree(aheader.getFirstFreeSpan()); michael@0: nextFree.checkSpan(); michael@0: michael@0: FreeSpan newListHead; michael@0: FreeSpan *newListTail = &newListHead; michael@0: uintptr_t newFreeSpanStart = 0; michael@0: bool allClear = true; michael@0: DebugOnly nmarked = 0; michael@0: for (;; thing += thingSize) { michael@0: JS_ASSERT(thing <= lastByte + 1); michael@0: if (thing == nextFree.first) { michael@0: JS_ASSERT(nextFree.last <= lastByte); michael@0: if (nextFree.last == lastByte) michael@0: break; michael@0: JS_ASSERT(Arena::isAligned(nextFree.last, thingSize)); michael@0: if (!newFreeSpanStart) michael@0: newFreeSpanStart = thing; michael@0: thing = nextFree.last; michael@0: nextFree = *nextFree.nextSpan(); michael@0: nextFree.checkSpan(); michael@0: } else { michael@0: T *t = reinterpret_cast(thing); michael@0: if (t->isMarked()) { michael@0: allClear = false; michael@0: nmarked++; michael@0: if (newFreeSpanStart) { michael@0: JS_ASSERT(thing >= thingsStart(thingKind) + thingSize); michael@0: newListTail->first = newFreeSpanStart; michael@0: newListTail->last = thing - thingSize; michael@0: newListTail = newListTail->nextSpanUnchecked(thingSize); michael@0: newFreeSpanStart = 0; michael@0: } michael@0: } else { michael@0: if (!newFreeSpanStart) michael@0: newFreeSpanStart = thing; michael@0: t->finalize(fop); michael@0: JS_POISON(t, JS_SWEPT_TENURED_PATTERN, thingSize); michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (allClear) { michael@0: JS_ASSERT(newListTail == &newListHead); michael@0: JS_ASSERT(!newFreeSpanStart || michael@0: newFreeSpanStart == thingsStart(thingKind)); michael@0: JS_EXTRA_POISON(data, JS_SWEPT_TENURED_PATTERN, sizeof(data)); michael@0: return true; michael@0: } michael@0: michael@0: newListTail->first = newFreeSpanStart ? newFreeSpanStart : nextFree.first; michael@0: JS_ASSERT(Arena::isAligned(newListTail->first, thingSize)); michael@0: newListTail->last = lastByte; michael@0: michael@0: #ifdef DEBUG michael@0: size_t nfree = 0; michael@0: for (const FreeSpan *span = &newListHead; span != newListTail; span = span->nextSpan()) { michael@0: span->checkSpan(); michael@0: JS_ASSERT(Arena::isAligned(span->first, thingSize)); michael@0: JS_ASSERT(Arena::isAligned(span->last, thingSize)); michael@0: nfree += (span->last - span->first) / thingSize + 1; michael@0: JS_ASSERT(nfree + nmarked <= thingsPerArena(thingSize)); michael@0: } michael@0: nfree += (newListTail->last + 1 - newListTail->first) / thingSize; michael@0: JS_ASSERT(nfree + nmarked == thingsPerArena(thingSize)); michael@0: #endif michael@0: aheader.setFirstFreeSpan(&newListHead); michael@0: michael@0: return false; michael@0: } michael@0: michael@0: /* michael@0: * Insert an arena into the list in appropriate position and update the cursor michael@0: * to ensure that any arena before the cursor is full. michael@0: */ michael@0: void ArenaList::insert(ArenaHeader *a) michael@0: { michael@0: JS_ASSERT(a); michael@0: JS_ASSERT_IF(!head, cursor == &head); michael@0: a->next = *cursor; michael@0: *cursor = a; michael@0: if (!a->hasFreeThings()) michael@0: cursor = &a->next; michael@0: } michael@0: michael@0: template michael@0: static inline bool michael@0: FinalizeTypedArenas(FreeOp *fop, michael@0: ArenaHeader **src, michael@0: ArenaList &dest, michael@0: AllocKind thingKind, michael@0: SliceBudget &budget) michael@0: { michael@0: /* michael@0: * Finalize arenas from src list, releasing empty arenas and inserting the michael@0: * others into dest in an appropriate position. michael@0: */ michael@0: michael@0: /* michael@0: * During parallel sections, we sometimes finalize the parallel arenas, michael@0: * but in that case, we want to hold on to the memory in our arena michael@0: * lists, not offer it up for reuse. michael@0: */ michael@0: bool releaseArenas = !InParallelSection(); michael@0: michael@0: size_t thingSize = Arena::thingSize(thingKind); michael@0: michael@0: while (ArenaHeader *aheader = *src) { michael@0: *src = aheader->next; michael@0: bool allClear = aheader->getArena()->finalize(fop, thingKind, thingSize); michael@0: if (!allClear) michael@0: dest.insert(aheader); michael@0: else if (releaseArenas) michael@0: aheader->chunk()->releaseArena(aheader); michael@0: else michael@0: aheader->chunk()->recycleArena(aheader, dest, thingKind); michael@0: michael@0: budget.step(Arena::thingsPerArena(thingSize)); michael@0: if (budget.isOverBudget()) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: /* michael@0: * Finalize the list. On return al->cursor points to the first non-empty arena michael@0: * after the al->head. michael@0: */ michael@0: static bool michael@0: FinalizeArenas(FreeOp *fop, michael@0: ArenaHeader **src, michael@0: ArenaList &dest, michael@0: AllocKind thingKind, michael@0: SliceBudget &budget) michael@0: { michael@0: switch(thingKind) { michael@0: case FINALIZE_OBJECT0: michael@0: case FINALIZE_OBJECT0_BACKGROUND: michael@0: case FINALIZE_OBJECT2: michael@0: case FINALIZE_OBJECT2_BACKGROUND: michael@0: case FINALIZE_OBJECT4: michael@0: case FINALIZE_OBJECT4_BACKGROUND: michael@0: case FINALIZE_OBJECT8: michael@0: case FINALIZE_OBJECT8_BACKGROUND: michael@0: case FINALIZE_OBJECT12: michael@0: case FINALIZE_OBJECT12_BACKGROUND: michael@0: case FINALIZE_OBJECT16: michael@0: case FINALIZE_OBJECT16_BACKGROUND: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_SCRIPT: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_LAZY_SCRIPT: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_SHAPE: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_BASE_SHAPE: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_TYPE_OBJECT: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_STRING: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_FAT_INLINE_STRING: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_EXTERNAL_STRING: michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: case FINALIZE_JITCODE: michael@0: #ifdef JS_ION michael@0: { michael@0: // JitCode finalization may release references on an executable michael@0: // allocator that is accessed when requesting interrupts. michael@0: JSRuntime::AutoLockForInterrupt lock(fop->runtime()); michael@0: return FinalizeTypedArenas(fop, src, dest, thingKind, budget); michael@0: } michael@0: #endif michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Invalid alloc kind"); michael@0: } michael@0: } michael@0: michael@0: static inline Chunk * michael@0: AllocChunk(JSRuntime *rt) michael@0: { michael@0: return static_cast(MapAlignedPages(rt, ChunkSize, ChunkSize)); michael@0: } michael@0: michael@0: static inline void michael@0: FreeChunk(JSRuntime *rt, Chunk *p) michael@0: { michael@0: UnmapPages(rt, static_cast(p), ChunkSize); michael@0: } michael@0: michael@0: inline bool michael@0: ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const michael@0: { michael@0: /* michael@0: * To minimize memory waste we do not want to run the background chunk michael@0: * allocation if we have empty chunks or when the runtime needs just few michael@0: * of them. michael@0: */ michael@0: return rt->gcHelperThread.canBackgroundAllocate() && michael@0: emptyCount == 0 && michael@0: rt->gcChunkSet.count() >= 4; michael@0: } michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: inline Chunk * michael@0: ChunkPool::get(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(this == &rt->gcChunkPool); michael@0: michael@0: Chunk *chunk = emptyChunkListHead; michael@0: if (chunk) { michael@0: JS_ASSERT(emptyCount); michael@0: emptyChunkListHead = chunk->info.next; michael@0: --emptyCount; michael@0: } else { michael@0: JS_ASSERT(!emptyCount); michael@0: chunk = Chunk::allocate(rt); michael@0: if (!chunk) michael@0: return nullptr; michael@0: JS_ASSERT(chunk->info.numArenasFreeCommitted == 0); michael@0: } michael@0: JS_ASSERT(chunk->unused()); michael@0: JS_ASSERT(!rt->gcChunkSet.has(chunk)); michael@0: michael@0: if (wantBackgroundAllocation(rt)) michael@0: rt->gcHelperThread.startBackgroundAllocationIfIdle(); michael@0: michael@0: return chunk; michael@0: } michael@0: michael@0: /* Must be called either during the GC or with the GC lock taken. */ michael@0: inline void michael@0: ChunkPool::put(Chunk *chunk) michael@0: { michael@0: chunk->info.age = 0; michael@0: chunk->info.next = emptyChunkListHead; michael@0: emptyChunkListHead = chunk; michael@0: emptyCount++; michael@0: } michael@0: michael@0: /* Must be called either during the GC or with the GC lock taken. */ michael@0: Chunk * michael@0: ChunkPool::expire(JSRuntime *rt, bool releaseAll) michael@0: { michael@0: JS_ASSERT(this == &rt->gcChunkPool); michael@0: michael@0: /* michael@0: * Return old empty chunks to the system while preserving the order of michael@0: * other chunks in the list. This way, if the GC runs several times michael@0: * without emptying the list, the older chunks will stay at the tail michael@0: * and are more likely to reach the max age. michael@0: */ michael@0: Chunk *freeList = nullptr; michael@0: int freeChunkCount = 0; michael@0: for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) { michael@0: JS_ASSERT(emptyCount); michael@0: Chunk *chunk = *chunkp; michael@0: JS_ASSERT(chunk->unused()); michael@0: JS_ASSERT(!rt->gcChunkSet.has(chunk)); michael@0: JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE); michael@0: if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE || michael@0: freeChunkCount++ > MAX_EMPTY_CHUNK_COUNT) michael@0: { michael@0: *chunkp = chunk->info.next; michael@0: --emptyCount; michael@0: chunk->prepareToBeFreed(rt); michael@0: chunk->info.next = freeList; michael@0: freeList = chunk; michael@0: } else { michael@0: /* Keep the chunk but increase its age. */ michael@0: ++chunk->info.age; michael@0: chunkp = &chunk->info.next; michael@0: } michael@0: } michael@0: JS_ASSERT_IF(releaseAll, !emptyCount); michael@0: return freeList; michael@0: } michael@0: michael@0: static void michael@0: FreeChunkList(JSRuntime *rt, Chunk *chunkListHead) michael@0: { michael@0: while (Chunk *chunk = chunkListHead) { michael@0: JS_ASSERT(!chunk->info.numArenasFreeCommitted); michael@0: chunkListHead = chunk->info.next; michael@0: FreeChunk(rt, chunk); michael@0: } michael@0: } michael@0: michael@0: void michael@0: ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll) michael@0: { michael@0: FreeChunkList(rt, expire(rt, releaseAll)); michael@0: } michael@0: michael@0: /* static */ Chunk * michael@0: Chunk::allocate(JSRuntime *rt) michael@0: { michael@0: Chunk *chunk = AllocChunk(rt); michael@0: if (!chunk) michael@0: return nullptr; michael@0: chunk->init(rt); michael@0: rt->gcStats.count(gcstats::STAT_NEW_CHUNK); michael@0: return chunk; michael@0: } michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: /* static */ inline void michael@0: Chunk::release(JSRuntime *rt, Chunk *chunk) michael@0: { michael@0: JS_ASSERT(chunk); michael@0: chunk->prepareToBeFreed(rt); michael@0: FreeChunk(rt, chunk); michael@0: } michael@0: michael@0: inline void michael@0: Chunk::prepareToBeFreed(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(rt->gcNumArenasFreeCommitted >= info.numArenasFreeCommitted); michael@0: rt->gcNumArenasFreeCommitted -= info.numArenasFreeCommitted; michael@0: rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK); michael@0: michael@0: #ifdef DEBUG michael@0: /* michael@0: * Let FreeChunkList detect a missing prepareToBeFreed call before it michael@0: * frees chunk. michael@0: */ michael@0: info.numArenasFreeCommitted = 0; michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: Chunk::init(JSRuntime *rt) michael@0: { michael@0: JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize); michael@0: michael@0: /* michael@0: * We clear the bitmap to guard against xpc_IsGrayGCThing being called on michael@0: * uninitialized data, which would happen before the first GC cycle. michael@0: */ michael@0: bitmap.clear(); michael@0: michael@0: /* michael@0: * Decommit the arenas. We do this after poisoning so that if the OS does michael@0: * not have to recycle the pages, we still get the benefit of poisoning. michael@0: */ michael@0: decommitAllArenas(rt); michael@0: michael@0: /* Initialize the chunk info. */ michael@0: info.age = 0; michael@0: info.trailer.location = ChunkLocationTenuredHeap; michael@0: info.trailer.runtime = rt; michael@0: michael@0: /* The rest of info fields are initialized in PickChunk. */ michael@0: } michael@0: michael@0: static inline Chunk ** michael@0: GetAvailableChunkList(Zone *zone) michael@0: { michael@0: JSRuntime *rt = zone->runtimeFromAnyThread(); michael@0: return zone->isSystem michael@0: ? &rt->gcSystemAvailableChunkListHead michael@0: : &rt->gcUserAvailableChunkListHead; michael@0: } michael@0: michael@0: inline void michael@0: Chunk::addToAvailableList(Zone *zone) michael@0: { michael@0: insertToAvailableList(GetAvailableChunkList(zone)); michael@0: } michael@0: michael@0: inline void michael@0: Chunk::insertToAvailableList(Chunk **insertPoint) michael@0: { michael@0: JS_ASSERT(hasAvailableArenas()); michael@0: JS_ASSERT(!info.prevp); michael@0: JS_ASSERT(!info.next); michael@0: info.prevp = insertPoint; michael@0: Chunk *insertBefore = *insertPoint; michael@0: if (insertBefore) { michael@0: JS_ASSERT(insertBefore->info.prevp == insertPoint); michael@0: insertBefore->info.prevp = &info.next; michael@0: } michael@0: info.next = insertBefore; michael@0: *insertPoint = this; michael@0: } michael@0: michael@0: inline void michael@0: Chunk::removeFromAvailableList() michael@0: { michael@0: JS_ASSERT(info.prevp); michael@0: *info.prevp = info.next; michael@0: if (info.next) { michael@0: JS_ASSERT(info.next->info.prevp == &info.next); michael@0: info.next->info.prevp = info.prevp; michael@0: } michael@0: info.prevp = nullptr; michael@0: info.next = nullptr; michael@0: } michael@0: michael@0: /* michael@0: * Search for and return the next decommitted Arena. Our goal is to keep michael@0: * lastDecommittedArenaOffset "close" to a free arena. We do this by setting michael@0: * it to the most recently freed arena when we free, and forcing it to michael@0: * the last alloc + 1 when we allocate. michael@0: */ michael@0: uint32_t michael@0: Chunk::findDecommittedArenaOffset() michael@0: { michael@0: /* Note: lastFreeArenaOffset can be past the end of the list. */ michael@0: for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) michael@0: if (decommittedArenas.get(i)) michael@0: return i; michael@0: for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) michael@0: if (decommittedArenas.get(i)) michael@0: return i; michael@0: MOZ_ASSUME_UNREACHABLE("No decommitted arenas found."); michael@0: } michael@0: michael@0: ArenaHeader * michael@0: Chunk::fetchNextDecommittedArena() michael@0: { michael@0: JS_ASSERT(info.numArenasFreeCommitted == 0); michael@0: JS_ASSERT(info.numArenasFree > 0); michael@0: michael@0: unsigned offset = findDecommittedArenaOffset(); michael@0: info.lastDecommittedArenaOffset = offset + 1; michael@0: --info.numArenasFree; michael@0: decommittedArenas.unset(offset); michael@0: michael@0: Arena *arena = &arenas[offset]; michael@0: MarkPagesInUse(info.trailer.runtime, arena, ArenaSize); michael@0: arena->aheader.setAsNotAllocated(); michael@0: michael@0: return &arena->aheader; michael@0: } michael@0: michael@0: inline ArenaHeader * michael@0: Chunk::fetchNextFreeArena(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(info.numArenasFreeCommitted > 0); michael@0: JS_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree); michael@0: JS_ASSERT(info.numArenasFreeCommitted <= rt->gcNumArenasFreeCommitted); michael@0: michael@0: ArenaHeader *aheader = info.freeArenasHead; michael@0: info.freeArenasHead = aheader->next; michael@0: --info.numArenasFreeCommitted; michael@0: --info.numArenasFree; michael@0: --rt->gcNumArenasFreeCommitted; michael@0: michael@0: return aheader; michael@0: } michael@0: michael@0: ArenaHeader * michael@0: Chunk::allocateArena(Zone *zone, AllocKind thingKind) michael@0: { michael@0: JS_ASSERT(hasAvailableArenas()); michael@0: michael@0: JSRuntime *rt = zone->runtimeFromAnyThread(); michael@0: if (!rt->isHeapMinorCollecting() && rt->gcBytes >= rt->gcMaxBytes) michael@0: return nullptr; michael@0: michael@0: ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0) michael@0: ? fetchNextFreeArena(rt) michael@0: : fetchNextDecommittedArena(); michael@0: aheader->init(zone, thingKind); michael@0: if (MOZ_UNLIKELY(!hasAvailableArenas())) michael@0: removeFromAvailableList(); michael@0: michael@0: rt->gcBytes += ArenaSize; michael@0: zone->gcBytes += ArenaSize; michael@0: michael@0: if (zone->gcBytes >= zone->gcTriggerBytes) { michael@0: AutoUnlockGC unlock(rt); michael@0: TriggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER); michael@0: } michael@0: michael@0: return aheader; michael@0: } michael@0: michael@0: inline void michael@0: Chunk::addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader) michael@0: { michael@0: JS_ASSERT(!aheader->allocated()); michael@0: aheader->next = info.freeArenasHead; michael@0: info.freeArenasHead = aheader; michael@0: ++info.numArenasFreeCommitted; michael@0: ++info.numArenasFree; michael@0: ++rt->gcNumArenasFreeCommitted; michael@0: } michael@0: michael@0: void michael@0: Chunk::recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind) michael@0: { michael@0: aheader->getArena()->setAsFullyUnused(thingKind); michael@0: dest.insert(aheader); michael@0: } michael@0: michael@0: void michael@0: Chunk::releaseArena(ArenaHeader *aheader) michael@0: { michael@0: JS_ASSERT(aheader->allocated()); michael@0: JS_ASSERT(!aheader->hasDelayedMarking); michael@0: Zone *zone = aheader->zone; michael@0: JSRuntime *rt = zone->runtimeFromAnyThread(); michael@0: AutoLockGC maybeLock; michael@0: if (rt->gcHelperThread.sweeping()) michael@0: maybeLock.lock(rt); michael@0: michael@0: JS_ASSERT(rt->gcBytes >= ArenaSize); michael@0: JS_ASSERT(zone->gcBytes >= ArenaSize); michael@0: if (rt->gcHelperThread.sweeping()) michael@0: zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize); michael@0: rt->gcBytes -= ArenaSize; michael@0: zone->gcBytes -= ArenaSize; michael@0: michael@0: aheader->setAsNotAllocated(); michael@0: addArenaToFreeList(rt, aheader); michael@0: michael@0: if (info.numArenasFree == 1) { michael@0: JS_ASSERT(!info.prevp); michael@0: JS_ASSERT(!info.next); michael@0: addToAvailableList(zone); michael@0: } else if (!unused()) { michael@0: JS_ASSERT(info.prevp); michael@0: } else { michael@0: rt->gcChunkSet.remove(this); michael@0: removeFromAvailableList(); michael@0: JS_ASSERT(info.numArenasFree == ArenasPerChunk); michael@0: decommitAllArenas(rt); michael@0: rt->gcChunkPool.put(this); michael@0: } michael@0: } michael@0: michael@0: /* The caller must hold the GC lock. */ michael@0: static Chunk * michael@0: PickChunk(Zone *zone) michael@0: { michael@0: JSRuntime *rt = zone->runtimeFromAnyThread(); michael@0: Chunk **listHeadp = GetAvailableChunkList(zone); michael@0: Chunk *chunk = *listHeadp; michael@0: if (chunk) michael@0: return chunk; michael@0: michael@0: chunk = rt->gcChunkPool.get(rt); michael@0: if (!chunk) michael@0: return nullptr; michael@0: michael@0: rt->gcChunkAllocationSinceLastGC = true; michael@0: michael@0: /* michael@0: * FIXME bug 583732 - chunk is newly allocated and cannot be present in michael@0: * the table so using ordinary lookupForAdd is suboptimal here. michael@0: */ michael@0: GCChunkSet::AddPtr p = rt->gcChunkSet.lookupForAdd(chunk); michael@0: JS_ASSERT(!p); michael@0: if (!rt->gcChunkSet.add(p, chunk)) { michael@0: Chunk::release(rt, chunk); michael@0: return nullptr; michael@0: } michael@0: michael@0: chunk->info.prevp = nullptr; michael@0: chunk->info.next = nullptr; michael@0: chunk->addToAvailableList(zone); michael@0: michael@0: return chunk; michael@0: } michael@0: michael@0: #ifdef JS_GC_ZEAL michael@0: michael@0: extern void michael@0: js::SetGCZeal(JSRuntime *rt, uint8_t zeal, uint32_t frequency) michael@0: { michael@0: if (rt->gcVerifyPreData) michael@0: VerifyBarriers(rt, PreBarrierVerifier); michael@0: if (rt->gcVerifyPostData) michael@0: VerifyBarriers(rt, PostBarrierVerifier); michael@0: michael@0: #ifdef JSGC_GENERATIONAL michael@0: if (rt->gcZeal_ == ZealGenerationalGCValue) { michael@0: MinorGC(rt, JS::gcreason::DEBUG_GC); michael@0: rt->gcNursery.leaveZealMode(); michael@0: } michael@0: michael@0: if (zeal == ZealGenerationalGCValue) michael@0: rt->gcNursery.enterZealMode(); michael@0: #endif michael@0: michael@0: bool schedule = zeal >= js::gc::ZealAllocValue; michael@0: rt->gcZeal_ = zeal; michael@0: rt->gcZealFrequency = frequency; michael@0: rt->gcNextScheduled = schedule ? frequency : 0; michael@0: } michael@0: michael@0: static bool michael@0: InitGCZeal(JSRuntime *rt) michael@0: { michael@0: const char *env = getenv("JS_GC_ZEAL"); michael@0: if (!env) michael@0: return true; michael@0: michael@0: int zeal = -1; michael@0: int frequency = JS_DEFAULT_ZEAL_FREQ; michael@0: if (strcmp(env, "help") != 0) { michael@0: zeal = atoi(env); michael@0: const char *p = strchr(env, ','); michael@0: if (p) michael@0: frequency = atoi(p + 1); michael@0: } michael@0: michael@0: if (zeal < 0 || zeal > ZealLimit || frequency < 0) { michael@0: fprintf(stderr, michael@0: "Format: JS_GC_ZEAL=N[,F]\n" michael@0: "N indicates \"zealousness\":\n" michael@0: " 0: no additional GCs\n" michael@0: " 1: additional GCs at common danger points\n" michael@0: " 2: GC every F allocations (default: 100)\n" michael@0: " 3: GC when the window paints (browser only)\n" michael@0: " 4: Verify pre write barriers between instructions\n" michael@0: " 5: Verify pre write barriers between paints\n" michael@0: " 6: Verify stack rooting\n" michael@0: " 7: Collect the nursery every N nursery allocations\n" michael@0: " 8: Incremental GC in two slices: 1) mark roots 2) finish collection\n" michael@0: " 9: Incremental GC in two slices: 1) mark all 2) new marking and finish\n" michael@0: " 10: Incremental GC in multiple slices\n" michael@0: " 11: Verify post write barriers between instructions\n" michael@0: " 12: Verify post write barriers between paints\n" michael@0: " 13: Purge analysis state every F allocations (default: 100)\n"); michael@0: return false; michael@0: } michael@0: michael@0: SetGCZeal(rt, zeal, frequency); michael@0: return true; michael@0: } michael@0: michael@0: #endif michael@0: michael@0: /* Lifetime for type sets attached to scripts containing observed types. */ michael@0: static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000; michael@0: michael@0: bool michael@0: js_InitGC(JSRuntime *rt, uint32_t maxbytes) michael@0: { michael@0: InitMemorySubsystem(rt); michael@0: michael@0: if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY)) michael@0: return false; michael@0: michael@0: if (!rt->gcRootsHash.init(256)) michael@0: return false; michael@0: michael@0: if (!rt->gcHelperThread.init()) michael@0: return false; michael@0: michael@0: /* michael@0: * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes michael@0: * for default backward API compatibility. michael@0: */ michael@0: rt->gcMaxBytes = maxbytes; michael@0: rt->setGCMaxMallocBytes(maxbytes); michael@0: michael@0: #ifndef JS_MORE_DETERMINISTIC michael@0: rt->gcJitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL; michael@0: #endif michael@0: michael@0: #ifdef JSGC_GENERATIONAL michael@0: if (!rt->gcNursery.init()) michael@0: return false; michael@0: michael@0: if (!rt->gcStoreBuffer.enable()) michael@0: return false; michael@0: #endif michael@0: michael@0: #ifdef JS_GC_ZEAL michael@0: if (!InitGCZeal(rt)) michael@0: return false; michael@0: #endif michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static void michael@0: RecordNativeStackTopForGC(JSRuntime *rt) michael@0: { michael@0: ConservativeGCData *cgcd = &rt->conservativeGC; michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: /* Record the stack top here only if we are called from a request. */ michael@0: if (!rt->requestDepth) michael@0: return; michael@0: #endif michael@0: cgcd->recordStackTop(); michael@0: } michael@0: michael@0: void michael@0: js_FinishGC(JSRuntime *rt) michael@0: { michael@0: /* michael@0: * Wait until the background finalization stops and the helper thread michael@0: * shuts down before we forcefully release any remaining GC memory. michael@0: */ michael@0: rt->gcHelperThread.finish(); michael@0: michael@0: #ifdef JS_GC_ZEAL michael@0: /* Free memory associated with GC verification. */ michael@0: FinishVerifier(rt); michael@0: #endif michael@0: michael@0: /* Delete all remaining zones. */ michael@0: if (rt->gcInitialized) { michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) michael@0: js_delete(comp.get()); michael@0: js_delete(zone.get()); michael@0: } michael@0: } michael@0: michael@0: rt->zones.clear(); michael@0: michael@0: rt->gcSystemAvailableChunkListHead = nullptr; michael@0: rt->gcUserAvailableChunkListHead = nullptr; michael@0: if (rt->gcChunkSet.initialized()) { michael@0: for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront()) michael@0: Chunk::release(rt, r.front()); michael@0: rt->gcChunkSet.clear(); michael@0: } michael@0: michael@0: rt->gcChunkPool.expireAndFree(rt, true); michael@0: michael@0: if (rt->gcRootsHash.initialized()) michael@0: rt->gcRootsHash.clear(); michael@0: michael@0: rt->functionPersistentRooteds.clear(); michael@0: rt->idPersistentRooteds.clear(); michael@0: rt->objectPersistentRooteds.clear(); michael@0: rt->scriptPersistentRooteds.clear(); michael@0: rt->stringPersistentRooteds.clear(); michael@0: rt->valuePersistentRooteds.clear(); michael@0: } michael@0: michael@0: template struct BarrierOwner {}; michael@0: template struct BarrierOwner { typedef T result; }; michael@0: template <> struct BarrierOwner { typedef HeapValue result; }; michael@0: michael@0: template michael@0: static bool michael@0: AddRoot(JSRuntime *rt, T *rp, const char *name, JSGCRootType rootType) michael@0: { michael@0: /* michael@0: * Sometimes Firefox will hold weak references to objects and then convert michael@0: * them to strong references by calling AddRoot (e.g., via PreserveWrapper, michael@0: * or ModifyBusyCount in workers). We need a read barrier to cover these michael@0: * cases. michael@0: */ michael@0: if (rt->gcIncrementalState != NO_INCREMENTAL) michael@0: BarrierOwner::result::writeBarrierPre(*rp); michael@0: michael@0: return rt->gcRootsHash.put((void *)rp, RootInfo(name, rootType)); michael@0: } michael@0: michael@0: template michael@0: static bool michael@0: AddRoot(JSContext *cx, T *rp, const char *name, JSGCRootType rootType) michael@0: { michael@0: bool ok = AddRoot(cx->runtime(), rp, name, rootType); michael@0: if (!ok) michael@0: JS_ReportOutOfMemory(cx); michael@0: return ok; michael@0: } michael@0: michael@0: bool michael@0: js::AddValueRoot(JSContext *cx, Value *vp, const char *name) michael@0: { michael@0: return AddRoot(cx, vp, name, JS_GC_ROOT_VALUE_PTR); michael@0: } michael@0: michael@0: extern bool michael@0: js::AddValueRootRT(JSRuntime *rt, js::Value *vp, const char *name) michael@0: { michael@0: return AddRoot(rt, vp, name, JS_GC_ROOT_VALUE_PTR); michael@0: } michael@0: michael@0: extern bool michael@0: js::AddStringRoot(JSContext *cx, JSString **rp, const char *name) michael@0: { michael@0: return AddRoot(cx, rp, name, JS_GC_ROOT_STRING_PTR); michael@0: } michael@0: michael@0: extern bool michael@0: js::AddObjectRoot(JSContext *cx, JSObject **rp, const char *name) michael@0: { michael@0: return AddRoot(cx, rp, name, JS_GC_ROOT_OBJECT_PTR); michael@0: } michael@0: michael@0: extern bool michael@0: js::AddObjectRoot(JSRuntime *rt, JSObject **rp, const char *name) michael@0: { michael@0: return AddRoot(rt, rp, name, JS_GC_ROOT_OBJECT_PTR); michael@0: } michael@0: michael@0: extern bool michael@0: js::AddScriptRoot(JSContext *cx, JSScript **rp, const char *name) michael@0: { michael@0: return AddRoot(cx, rp, name, JS_GC_ROOT_SCRIPT_PTR); michael@0: } michael@0: michael@0: extern JS_FRIEND_API(bool) michael@0: js::AddRawValueRoot(JSContext *cx, Value *vp, const char *name) michael@0: { michael@0: return AddRoot(cx, vp, name, JS_GC_ROOT_VALUE_PTR); michael@0: } michael@0: michael@0: extern JS_FRIEND_API(void) michael@0: js::RemoveRawValueRoot(JSContext *cx, Value *vp) michael@0: { michael@0: RemoveRoot(cx->runtime(), vp); michael@0: } michael@0: michael@0: void michael@0: js::RemoveRoot(JSRuntime *rt, void *rp) michael@0: { michael@0: rt->gcRootsHash.remove(rp); michael@0: rt->gcPoke = true; michael@0: } michael@0: michael@0: typedef RootedValueMap::Range RootRange; michael@0: typedef RootedValueMap::Entry RootEntry; michael@0: typedef RootedValueMap::Enum RootEnum; michael@0: michael@0: static size_t michael@0: ComputeTriggerBytes(Zone *zone, size_t lastBytes, size_t maxBytes, JSGCInvocationKind gckind) michael@0: { michael@0: size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, zone->runtimeFromMainThread()->gcAllocationThreshold); michael@0: double trigger = double(base) * zone->gcHeapGrowthFactor; michael@0: return size_t(Min(double(maxBytes), trigger)); michael@0: } michael@0: michael@0: void michael@0: Zone::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind) michael@0: { michael@0: /* michael@0: * The heap growth factor depends on the heap size after a GC and the GC frequency. michael@0: * For low frequency GCs (more than 1sec between GCs) we let the heap grow to 150%. michael@0: * For high frequency GCs we let the heap grow depending on the heap size: michael@0: * lastBytes < highFrequencyLowLimit: 300% michael@0: * lastBytes > highFrequencyHighLimit: 150% michael@0: * otherwise: linear interpolation between 150% and 300% based on lastBytes michael@0: */ michael@0: JSRuntime *rt = runtimeFromMainThread(); michael@0: michael@0: if (!rt->gcDynamicHeapGrowth) { michael@0: gcHeapGrowthFactor = 3.0; michael@0: } else if (lastBytes < 1 * 1024 * 1024) { michael@0: gcHeapGrowthFactor = rt->gcLowFrequencyHeapGrowth; michael@0: } else { michael@0: JS_ASSERT(rt->gcHighFrequencyHighLimitBytes > rt->gcHighFrequencyLowLimitBytes); michael@0: uint64_t now = PRMJ_Now(); michael@0: if (rt->gcLastGCTime && rt->gcLastGCTime + rt->gcHighFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > now) { michael@0: if (lastBytes <= rt->gcHighFrequencyLowLimitBytes) { michael@0: gcHeapGrowthFactor = rt->gcHighFrequencyHeapGrowthMax; michael@0: } else if (lastBytes >= rt->gcHighFrequencyHighLimitBytes) { michael@0: gcHeapGrowthFactor = rt->gcHighFrequencyHeapGrowthMin; michael@0: } else { michael@0: double k = (rt->gcHighFrequencyHeapGrowthMin - rt->gcHighFrequencyHeapGrowthMax) michael@0: / (double)(rt->gcHighFrequencyHighLimitBytes - rt->gcHighFrequencyLowLimitBytes); michael@0: gcHeapGrowthFactor = (k * (lastBytes - rt->gcHighFrequencyLowLimitBytes) michael@0: + rt->gcHighFrequencyHeapGrowthMax); michael@0: JS_ASSERT(gcHeapGrowthFactor <= rt->gcHighFrequencyHeapGrowthMax michael@0: && gcHeapGrowthFactor >= rt->gcHighFrequencyHeapGrowthMin); michael@0: } michael@0: rt->gcHighFrequencyGC = true; michael@0: } else { michael@0: gcHeapGrowthFactor = rt->gcLowFrequencyHeapGrowth; michael@0: rt->gcHighFrequencyGC = false; michael@0: } michael@0: } michael@0: gcTriggerBytes = ComputeTriggerBytes(this, lastBytes, rt->gcMaxBytes, gckind); michael@0: } michael@0: michael@0: void michael@0: Zone::reduceGCTriggerBytes(size_t amount) michael@0: { michael@0: JS_ASSERT(amount > 0); michael@0: JS_ASSERT(gcTriggerBytes >= amount); michael@0: if (gcTriggerBytes - amount < runtimeFromAnyThread()->gcAllocationThreshold * gcHeapGrowthFactor) michael@0: return; michael@0: gcTriggerBytes -= amount; michael@0: } michael@0: michael@0: Allocator::Allocator(Zone *zone) michael@0: : zone_(zone) michael@0: {} michael@0: michael@0: inline void michael@0: GCMarker::delayMarkingArena(ArenaHeader *aheader) michael@0: { michael@0: if (aheader->hasDelayedMarking) { michael@0: /* Arena already scheduled to be marked later */ michael@0: return; michael@0: } michael@0: aheader->setNextDelayedMarking(unmarkedArenaStackTop); michael@0: unmarkedArenaStackTop = aheader; michael@0: markLaterArenas++; michael@0: } michael@0: michael@0: void michael@0: GCMarker::delayMarkingChildren(const void *thing) michael@0: { michael@0: const Cell *cell = reinterpret_cast(thing); michael@0: cell->arenaHeader()->markOverflow = 1; michael@0: delayMarkingArena(cell->arenaHeader()); michael@0: } michael@0: michael@0: inline void michael@0: ArenaLists::prepareForIncrementalGC(JSRuntime *rt) michael@0: { michael@0: for (size_t i = 0; i != FINALIZE_LIMIT; ++i) { michael@0: FreeSpan *headSpan = &freeLists[i]; michael@0: if (!headSpan->isEmpty()) { michael@0: ArenaHeader *aheader = headSpan->arenaHeader(); michael@0: aheader->allocatedDuringIncremental = true; michael@0: rt->gcMarker.delayMarkingArena(aheader); michael@0: } michael@0: } michael@0: } michael@0: michael@0: static inline void michael@0: PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena) michael@0: { michael@0: arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep); michael@0: runtime->gcArenasAllocatedDuringSweep = arena; michael@0: } michael@0: michael@0: inline void * michael@0: ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind) michael@0: { michael@0: /* michael@0: * Parallel JS Note: michael@0: * michael@0: * This function can be called from parallel threads all of which michael@0: * are associated with the same compartment. In that case, each michael@0: * thread will have a distinct ArenaLists. Therefore, whenever we michael@0: * fall through to PickChunk() we must be sure that we are holding michael@0: * a lock. michael@0: */ michael@0: michael@0: Chunk *chunk = nullptr; michael@0: michael@0: ArenaList *al = &arenaLists[thingKind]; michael@0: AutoLockGC maybeLock; michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind]; michael@0: if (*bfs != BFS_DONE) { michael@0: /* michael@0: * We cannot search the arena list for free things while the michael@0: * background finalization runs and can modify head or cursor at any michael@0: * moment. So we always allocate a new arena in that case. michael@0: */ michael@0: maybeLock.lock(zone->runtimeFromAnyThread()); michael@0: if (*bfs == BFS_RUN) { michael@0: JS_ASSERT(!*al->cursor); michael@0: chunk = PickChunk(zone); michael@0: if (!chunk) { michael@0: /* michael@0: * Let the caller to wait for the background allocation to michael@0: * finish and restart the allocation attempt. michael@0: */ michael@0: return nullptr; michael@0: } michael@0: } else if (*bfs == BFS_JUST_FINISHED) { michael@0: /* See comments before BackgroundFinalizeState definition. */ michael@0: *bfs = BFS_DONE; michael@0: } else { michael@0: JS_ASSERT(*bfs == BFS_DONE); michael@0: } michael@0: } michael@0: #endif /* JS_THREADSAFE */ michael@0: michael@0: if (!chunk) { michael@0: if (ArenaHeader *aheader = *al->cursor) { michael@0: JS_ASSERT(aheader->hasFreeThings()); michael@0: michael@0: /* michael@0: * Normally, the empty arenas are returned to the chunk michael@0: * and should not present on the list. In parallel michael@0: * execution, however, we keep empty arenas in the arena michael@0: * list to avoid synchronizing on the chunk. michael@0: */ michael@0: JS_ASSERT(!aheader->isEmpty() || InParallelSection()); michael@0: al->cursor = &aheader->next; michael@0: michael@0: /* michael@0: * Move the free span stored in the arena to the free list and michael@0: * allocate from it. michael@0: */ michael@0: freeLists[thingKind] = aheader->getFirstFreeSpan(); michael@0: aheader->setAsFullyUsed(); michael@0: if (MOZ_UNLIKELY(zone->wasGCStarted())) { michael@0: if (zone->needsBarrier()) { michael@0: aheader->allocatedDuringIncremental = true; michael@0: zone->runtimeFromMainThread()->gcMarker.delayMarkingArena(aheader); michael@0: } else if (zone->isGCSweeping()) { michael@0: PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader); michael@0: } michael@0: } michael@0: return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind)); michael@0: } michael@0: michael@0: /* Make sure we hold the GC lock before we call PickChunk. */ michael@0: if (!maybeLock.locked()) michael@0: maybeLock.lock(zone->runtimeFromAnyThread()); michael@0: chunk = PickChunk(zone); michael@0: if (!chunk) michael@0: return nullptr; michael@0: } michael@0: michael@0: /* michael@0: * While we still hold the GC lock get an arena from some chunk, mark it michael@0: * as full as its single free span is moved to the free lits, and insert michael@0: * it to the list as a fully allocated arena. michael@0: * michael@0: * We add the arena before the the head, not after the tail pointed by the michael@0: * cursor, so after the GC the most recently added arena will be used first michael@0: * for allocations improving cache locality. michael@0: */ michael@0: JS_ASSERT(!*al->cursor); michael@0: ArenaHeader *aheader = chunk->allocateArena(zone, thingKind); michael@0: if (!aheader) michael@0: return nullptr; michael@0: michael@0: if (MOZ_UNLIKELY(zone->wasGCStarted())) { michael@0: if (zone->needsBarrier()) { michael@0: aheader->allocatedDuringIncremental = true; michael@0: zone->runtimeFromMainThread()->gcMarker.delayMarkingArena(aheader); michael@0: } else if (zone->isGCSweeping()) { michael@0: PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader); michael@0: } michael@0: } michael@0: aheader->next = al->head; michael@0: if (!al->head) { michael@0: JS_ASSERT(al->cursor == &al->head); michael@0: al->cursor = &aheader->next; michael@0: } michael@0: al->head = aheader; michael@0: michael@0: /* See comments before allocateFromNewArena about this assert. */ michael@0: JS_ASSERT(!aheader->hasFreeThings()); michael@0: uintptr_t arenaAddr = aheader->arenaAddress(); michael@0: return freeLists[thingKind].allocateFromNewArena(arenaAddr, michael@0: Arena::firstThingOffset(thingKind), michael@0: Arena::thingSize(thingKind)); michael@0: } michael@0: michael@0: void * michael@0: ArenaLists::allocateFromArena(JS::Zone *zone, AllocKind thingKind) michael@0: { michael@0: return allocateFromArenaInline(zone, thingKind); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::wipeDuringParallelExecution(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(InParallelSection()); michael@0: michael@0: // First, check that we all objects we have allocated are eligible michael@0: // for background finalization. The idea is that we will free michael@0: // (below) ALL background finalizable objects, because we know (by michael@0: // the rules of parallel execution) they are not reachable except michael@0: // by other thread-local objects. However, if there were any michael@0: // object ineligible for background finalization, it might retain michael@0: // a reference to one of these background finalizable objects, and michael@0: // that'd be bad. michael@0: for (unsigned i = 0; i < FINALIZE_LAST; i++) { michael@0: AllocKind thingKind = AllocKind(i); michael@0: if (!IsBackgroundFinalized(thingKind) && arenaLists[thingKind].head) michael@0: return; michael@0: } michael@0: michael@0: // Finalize all background finalizable objects immediately and michael@0: // return the (now empty) arenas back to arena list. michael@0: FreeOp fop(rt, false); michael@0: for (unsigned i = 0; i < FINALIZE_OBJECT_LAST; i++) { michael@0: AllocKind thingKind = AllocKind(i); michael@0: michael@0: if (!IsBackgroundFinalized(thingKind)) michael@0: continue; michael@0: michael@0: if (arenaLists[i].head) { michael@0: purge(thingKind); michael@0: forceFinalizeNow(&fop, thingKind); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: ArenaLists::finalizeNow(FreeOp *fop, AllocKind thingKind) michael@0: { michael@0: JS_ASSERT(!IsBackgroundFinalized(thingKind)); michael@0: forceFinalizeNow(fop, thingKind); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::forceFinalizeNow(FreeOp *fop, AllocKind thingKind) michael@0: { michael@0: JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE); michael@0: michael@0: ArenaHeader *arenas = arenaLists[thingKind].head; michael@0: arenaLists[thingKind].clear(); michael@0: michael@0: SliceBudget budget; michael@0: FinalizeArenas(fop, &arenas, arenaLists[thingKind], thingKind, budget); michael@0: JS_ASSERT(!arenas); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::queueForForegroundSweep(FreeOp *fop, AllocKind thingKind) michael@0: { michael@0: JS_ASSERT(!IsBackgroundFinalized(thingKind)); michael@0: JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE); michael@0: JS_ASSERT(!arenaListsToSweep[thingKind]); michael@0: michael@0: arenaListsToSweep[thingKind] = arenaLists[thingKind].head; michael@0: arenaLists[thingKind].clear(); michael@0: } michael@0: michael@0: inline void michael@0: ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind) michael@0: { michael@0: JS_ASSERT(IsBackgroundFinalized(thingKind)); michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: JS_ASSERT(!fop->runtime()->gcHelperThread.sweeping()); michael@0: #endif michael@0: michael@0: ArenaList *al = &arenaLists[thingKind]; michael@0: if (!al->head) { michael@0: JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE); michael@0: JS_ASSERT(al->cursor == &al->head); michael@0: return; michael@0: } michael@0: michael@0: /* michael@0: * The state can be done, or just-finished if we have not allocated any GC michael@0: * things from the arena list after the previous background finalization. michael@0: */ michael@0: JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE || michael@0: backgroundFinalizeState[thingKind] == BFS_JUST_FINISHED); michael@0: michael@0: arenaListsToSweep[thingKind] = al->head; michael@0: al->clear(); michael@0: backgroundFinalizeState[thingKind] = BFS_RUN; michael@0: } michael@0: michael@0: /*static*/ void michael@0: ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead, bool onBackgroundThread) michael@0: { michael@0: JS_ASSERT(listHead); michael@0: AllocKind thingKind = listHead->getAllocKind(); michael@0: Zone *zone = listHead->zone; michael@0: michael@0: ArenaList finalized; michael@0: SliceBudget budget; michael@0: FinalizeArenas(fop, &listHead, finalized, thingKind, budget); michael@0: JS_ASSERT(!listHead); michael@0: michael@0: /* michael@0: * After we finish the finalization al->cursor must point to the end of michael@0: * the head list as we emptied the list before the background finalization michael@0: * and the allocation adds new arenas before the cursor. michael@0: */ michael@0: ArenaLists *lists = &zone->allocator.arenas; michael@0: ArenaList *al = &lists->arenaLists[thingKind]; michael@0: michael@0: AutoLockGC lock(fop->runtime()); michael@0: JS_ASSERT(lists->backgroundFinalizeState[thingKind] == BFS_RUN); michael@0: JS_ASSERT(!*al->cursor); michael@0: michael@0: if (finalized.head) { michael@0: *al->cursor = finalized.head; michael@0: if (finalized.cursor != &finalized.head) michael@0: al->cursor = finalized.cursor; michael@0: } michael@0: michael@0: /* michael@0: * We must set the state to BFS_JUST_FINISHED if we are running on the michael@0: * background thread and we have touched arenaList list, even if we add to michael@0: * the list only fully allocated arenas without any free things. It ensures michael@0: * that the allocation thread takes the GC lock and all writes to the free michael@0: * list elements are propagated. As we always take the GC lock when michael@0: * allocating new arenas from the chunks we can set the state to BFS_DONE if michael@0: * we have released all finalized arenas back to their chunks. michael@0: */ michael@0: if (onBackgroundThread && finalized.head) michael@0: lists->backgroundFinalizeState[thingKind] = BFS_JUST_FINISHED; michael@0: else michael@0: lists->backgroundFinalizeState[thingKind] = BFS_DONE; michael@0: michael@0: lists->arenaListsToSweep[thingKind] = nullptr; michael@0: } michael@0: michael@0: void michael@0: ArenaLists::queueObjectsForSweep(FreeOp *fop) michael@0: { michael@0: gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_OBJECT); michael@0: michael@0: finalizeNow(fop, FINALIZE_OBJECT0); michael@0: finalizeNow(fop, FINALIZE_OBJECT2); michael@0: finalizeNow(fop, FINALIZE_OBJECT4); michael@0: finalizeNow(fop, FINALIZE_OBJECT8); michael@0: finalizeNow(fop, FINALIZE_OBJECT12); michael@0: finalizeNow(fop, FINALIZE_OBJECT16); michael@0: michael@0: queueForBackgroundSweep(fop, FINALIZE_OBJECT0_BACKGROUND); michael@0: queueForBackgroundSweep(fop, FINALIZE_OBJECT2_BACKGROUND); michael@0: queueForBackgroundSweep(fop, FINALIZE_OBJECT4_BACKGROUND); michael@0: queueForBackgroundSweep(fop, FINALIZE_OBJECT8_BACKGROUND); michael@0: queueForBackgroundSweep(fop, FINALIZE_OBJECT12_BACKGROUND); michael@0: queueForBackgroundSweep(fop, FINALIZE_OBJECT16_BACKGROUND); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::queueStringsForSweep(FreeOp *fop) michael@0: { michael@0: gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_STRING); michael@0: michael@0: queueForBackgroundSweep(fop, FINALIZE_FAT_INLINE_STRING); michael@0: queueForBackgroundSweep(fop, FINALIZE_STRING); michael@0: michael@0: queueForForegroundSweep(fop, FINALIZE_EXTERNAL_STRING); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::queueScriptsForSweep(FreeOp *fop) michael@0: { michael@0: gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_SCRIPT); michael@0: queueForForegroundSweep(fop, FINALIZE_SCRIPT); michael@0: queueForForegroundSweep(fop, FINALIZE_LAZY_SCRIPT); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::queueJitCodeForSweep(FreeOp *fop) michael@0: { michael@0: gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_JITCODE); michael@0: queueForForegroundSweep(fop, FINALIZE_JITCODE); michael@0: } michael@0: michael@0: void michael@0: ArenaLists::queueShapesForSweep(FreeOp *fop) michael@0: { michael@0: gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_SHAPE); michael@0: michael@0: queueForBackgroundSweep(fop, FINALIZE_SHAPE); michael@0: queueForBackgroundSweep(fop, FINALIZE_BASE_SHAPE); michael@0: queueForBackgroundSweep(fop, FINALIZE_TYPE_OBJECT); michael@0: } michael@0: michael@0: static void * michael@0: RunLastDitchGC(JSContext *cx, JS::Zone *zone, AllocKind thingKind) michael@0: { michael@0: /* michael@0: * In parallel sections, we do not attempt to refill the free list michael@0: * and hence do not encounter last ditch GC. michael@0: */ michael@0: JS_ASSERT(!InParallelSection()); michael@0: michael@0: PrepareZoneForGC(zone); michael@0: michael@0: JSRuntime *rt = cx->runtime(); michael@0: michael@0: /* The last ditch GC preserves all atoms. */ michael@0: AutoKeepAtoms keepAtoms(cx->perThreadData); michael@0: GC(rt, GC_NORMAL, JS::gcreason::LAST_DITCH); michael@0: michael@0: /* michael@0: * The JSGC_END callback can legitimately allocate new GC michael@0: * things and populate the free list. If that happens, just michael@0: * return that list head. michael@0: */ michael@0: size_t thingSize = Arena::thingSize(thingKind); michael@0: if (void *thing = zone->allocator.arenas.allocateFromFreeList(thingKind, thingSize)) michael@0: return thing; michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: template michael@0: /* static */ void * michael@0: ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind) michael@0: { michael@0: JS_ASSERT(cx->allocator()->arenas.freeLists[thingKind].isEmpty()); michael@0: JS_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->runtime()->isHeapBusy()); michael@0: michael@0: Zone *zone = cx->allocator()->zone_; michael@0: michael@0: bool runGC = cx->allowGC() && allowGC && michael@0: cx->asJSContext()->runtime()->gcIncrementalState != NO_INCREMENTAL && michael@0: zone->gcBytes > zone->gcTriggerBytes; michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: JS_ASSERT_IF(cx->isJSContext() && allowGC, michael@0: !cx->asJSContext()->runtime()->currentThreadHasExclusiveAccess()); michael@0: #endif michael@0: michael@0: for (;;) { michael@0: if (MOZ_UNLIKELY(runGC)) { michael@0: if (void *thing = RunLastDitchGC(cx->asJSContext(), zone, thingKind)) michael@0: return thing; michael@0: } michael@0: michael@0: if (cx->isJSContext()) { michael@0: /* michael@0: * allocateFromArena may fail while the background finalization still michael@0: * run. If we are on the main thread, we want to wait for it to finish michael@0: * and restart. However, checking for that is racy as the background michael@0: * finalization could free some things after allocateFromArena decided michael@0: * to fail but at this point it may have already stopped. To avoid michael@0: * this race we always try to allocate twice. michael@0: */ michael@0: for (bool secondAttempt = false; ; secondAttempt = true) { michael@0: void *thing = cx->allocator()->arenas.allocateFromArenaInline(zone, thingKind); michael@0: if (MOZ_LIKELY(!!thing)) michael@0: return thing; michael@0: if (secondAttempt) michael@0: break; michael@0: michael@0: cx->asJSContext()->runtime()->gcHelperThread.waitBackgroundSweepEnd(); michael@0: } michael@0: } else { michael@0: #ifdef JS_THREADSAFE michael@0: /* michael@0: * If we're off the main thread, we try to allocate once and michael@0: * return whatever value we get. If we aren't in a ForkJoin michael@0: * session (i.e. we are in a worker thread async with the main michael@0: * thread), we need to first ensure the main thread is not in a GC michael@0: * session. michael@0: */ michael@0: mozilla::Maybe lock; michael@0: JSRuntime *rt = zone->runtimeFromAnyThread(); michael@0: if (rt->exclusiveThreadsPresent()) { michael@0: lock.construct(); michael@0: while (rt->isHeapBusy()) michael@0: WorkerThreadState().wait(GlobalWorkerThreadState::PRODUCER); michael@0: } michael@0: michael@0: void *thing = cx->allocator()->arenas.allocateFromArenaInline(zone, thingKind); michael@0: if (thing) michael@0: return thing; michael@0: #else michael@0: MOZ_CRASH(); michael@0: #endif michael@0: } michael@0: michael@0: if (!cx->allowGC() || !allowGC) michael@0: return nullptr; michael@0: michael@0: /* michael@0: * We failed to allocate. Run the GC if we haven't done it already. michael@0: * Otherwise report OOM. michael@0: */ michael@0: if (runGC) michael@0: break; michael@0: runGC = true; michael@0: } michael@0: michael@0: JS_ASSERT(allowGC); michael@0: js_ReportOutOfMemory(cx); michael@0: return nullptr; michael@0: } michael@0: michael@0: template void * michael@0: ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind); michael@0: michael@0: template void * michael@0: ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind); michael@0: michael@0: JSGCTraceKind michael@0: js_GetGCThingTraceKind(void *thing) michael@0: { michael@0: return GetGCThingTraceKind(thing); michael@0: } michael@0: michael@0: /* static */ int64_t michael@0: SliceBudget::TimeBudget(int64_t millis) michael@0: { michael@0: return millis * PRMJ_USEC_PER_MSEC; michael@0: } michael@0: michael@0: /* static */ int64_t michael@0: SliceBudget::WorkBudget(int64_t work) michael@0: { michael@0: /* For work = 0 not to mean Unlimited, we subtract 1. */ michael@0: return -work - 1; michael@0: } michael@0: michael@0: SliceBudget::SliceBudget() michael@0: : deadline(INT64_MAX), michael@0: counter(INTPTR_MAX) michael@0: { michael@0: } michael@0: michael@0: SliceBudget::SliceBudget(int64_t budget) michael@0: { michael@0: if (budget == Unlimited) { michael@0: deadline = INT64_MAX; michael@0: counter = INTPTR_MAX; michael@0: } else if (budget > 0) { michael@0: deadline = PRMJ_Now() + budget; michael@0: counter = CounterReset; michael@0: } else { michael@0: deadline = 0; michael@0: counter = -budget - 1; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: SliceBudget::checkOverBudget() michael@0: { michael@0: bool over = PRMJ_Now() > deadline; michael@0: if (!over) michael@0: counter = CounterReset; michael@0: return over; michael@0: } michael@0: michael@0: void michael@0: js::MarkCompartmentActive(InterpreterFrame *fp) michael@0: { michael@0: fp->script()->compartment()->zone()->active = true; michael@0: } michael@0: michael@0: static void michael@0: RequestInterrupt(JSRuntime *rt, JS::gcreason::Reason reason) michael@0: { michael@0: if (rt->gcIsNeeded) michael@0: return; michael@0: michael@0: rt->gcIsNeeded = true; michael@0: rt->gcTriggerReason = reason; michael@0: rt->requestInterrupt(JSRuntime::RequestInterruptMainThread); michael@0: } michael@0: michael@0: bool michael@0: js::TriggerGC(JSRuntime *rt, JS::gcreason::Reason reason) michael@0: { michael@0: /* Wait till end of parallel section to trigger GC. */ michael@0: if (InParallelSection()) { michael@0: ForkJoinContext::current()->requestGC(reason); michael@0: return true; michael@0: } michael@0: michael@0: /* Don't trigger GCs when allocating under the interrupt callback lock. */ michael@0: if (rt->currentThreadOwnsInterruptLock()) michael@0: return false; michael@0: michael@0: JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); michael@0: michael@0: /* GC is already running. */ michael@0: if (rt->isHeapCollecting()) michael@0: return false; michael@0: michael@0: JS::PrepareForFullGC(rt); michael@0: RequestInterrupt(rt, reason); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: js::TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason) michael@0: { michael@0: /* michael@0: * If parallel threads are running, wait till they michael@0: * are stopped to trigger GC. michael@0: */ michael@0: if (InParallelSection()) { michael@0: ForkJoinContext::current()->requestZoneGC(zone, reason); michael@0: return true; michael@0: } michael@0: michael@0: /* Zones in use by a thread with an exclusive context can't be collected. */ michael@0: if (zone->usedByExclusiveThread) michael@0: return false; michael@0: michael@0: JSRuntime *rt = zone->runtimeFromMainThread(); michael@0: michael@0: /* Don't trigger GCs when allocating under the interrupt callback lock. */ michael@0: if (rt->currentThreadOwnsInterruptLock()) michael@0: return false; michael@0: michael@0: /* GC is already running. */ michael@0: if (rt->isHeapCollecting()) michael@0: return false; michael@0: michael@0: if (rt->gcZeal() == ZealAllocValue) { michael@0: TriggerGC(rt, reason); michael@0: return true; michael@0: } michael@0: michael@0: if (rt->isAtomsZone(zone)) { michael@0: /* We can't do a zone GC of the atoms compartment. */ michael@0: TriggerGC(rt, reason); michael@0: return true; michael@0: } michael@0: michael@0: PrepareZoneForGC(zone); michael@0: RequestInterrupt(rt, reason); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: js::MaybeGC(JSContext *cx) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); michael@0: michael@0: if (rt->gcZeal() == ZealAllocValue || rt->gcZeal() == ZealPokeValue) { michael@0: JS::PrepareForFullGC(rt); michael@0: GC(rt, GC_NORMAL, JS::gcreason::MAYBEGC); michael@0: return; michael@0: } michael@0: michael@0: if (rt->gcIsNeeded) { michael@0: GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC); michael@0: return; michael@0: } michael@0: michael@0: double factor = rt->gcHighFrequencyGC ? 0.85 : 0.9; michael@0: Zone *zone = cx->zone(); michael@0: if (zone->gcBytes > 1024 * 1024 && michael@0: zone->gcBytes >= factor * zone->gcTriggerBytes && michael@0: rt->gcIncrementalState == NO_INCREMENTAL && michael@0: !rt->gcHelperThread.sweeping()) michael@0: { michael@0: PrepareZoneForGC(zone); michael@0: GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC); michael@0: return; michael@0: } michael@0: michael@0: #ifndef JS_MORE_DETERMINISTIC michael@0: /* michael@0: * Access to the counters and, on 32 bit, setting gcNextFullGCTime below michael@0: * is not atomic and a race condition could trigger or suppress the GC. We michael@0: * tolerate this. michael@0: */ michael@0: int64_t now = PRMJ_Now(); michael@0: if (rt->gcNextFullGCTime && rt->gcNextFullGCTime <= now) { michael@0: if (rt->gcChunkAllocationSinceLastGC || michael@0: rt->gcNumArenasFreeCommitted > rt->gcDecommitThreshold) michael@0: { michael@0: JS::PrepareForFullGC(rt); michael@0: GCSlice(rt, GC_SHRINK, JS::gcreason::MAYBEGC); michael@0: } else { michael@0: rt->gcNextFullGCTime = now + GC_IDLE_FULL_SPAN; michael@0: } michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp) michael@0: { michael@0: Chunk *chunk = *availableListHeadp; michael@0: if (!chunk) michael@0: return; michael@0: michael@0: /* michael@0: * Decommit is expensive so we avoid holding the GC lock while calling it. michael@0: * michael@0: * We decommit from the tail of the list to minimize interference with the michael@0: * main thread that may start to allocate things at this point. michael@0: * michael@0: * The arena that is been decommitted outside the GC lock must not be michael@0: * available for allocations either via the free list or via the michael@0: * decommittedArenas bitmap. For that we just fetch the arena from the michael@0: * free list before the decommit pretending as it was allocated. If this michael@0: * arena also is the single free arena in the chunk, then we must remove michael@0: * from the available list before we release the lock so the allocation michael@0: * thread would not see chunks with no free arenas on the available list. michael@0: * michael@0: * After we retake the lock, we mark the arena as free and decommitted if michael@0: * the decommit was successful. We must also add the chunk back to the michael@0: * available list if we removed it previously or when the main thread michael@0: * have allocated all remaining free arenas in the chunk. michael@0: * michael@0: * We also must make sure that the aheader is not accessed again after we michael@0: * decommit the arena. michael@0: */ michael@0: JS_ASSERT(chunk->info.prevp == availableListHeadp); michael@0: while (Chunk *next = chunk->info.next) { michael@0: JS_ASSERT(next->info.prevp == &chunk->info.next); michael@0: chunk = next; michael@0: } michael@0: michael@0: for (;;) { michael@0: while (chunk->info.numArenasFreeCommitted != 0) { michael@0: ArenaHeader *aheader = chunk->fetchNextFreeArena(rt); michael@0: michael@0: Chunk **savedPrevp = chunk->info.prevp; michael@0: if (!chunk->hasAvailableArenas()) michael@0: chunk->removeFromAvailableList(); michael@0: michael@0: size_t arenaIndex = Chunk::arenaIndex(aheader->arenaAddress()); michael@0: bool ok; michael@0: { michael@0: /* michael@0: * If the main thread waits for the decommit to finish, skip michael@0: * potentially expensive unlock/lock pair on the contested michael@0: * lock. michael@0: */ michael@0: Maybe maybeUnlock; michael@0: if (!rt->isHeapBusy()) michael@0: maybeUnlock.construct(rt); michael@0: ok = MarkPagesUnused(rt, aheader->getArena(), ArenaSize); michael@0: } michael@0: michael@0: if (ok) { michael@0: ++chunk->info.numArenasFree; michael@0: chunk->decommittedArenas.set(arenaIndex); michael@0: } else { michael@0: chunk->addArenaToFreeList(rt, aheader); michael@0: } michael@0: JS_ASSERT(chunk->hasAvailableArenas()); michael@0: JS_ASSERT(!chunk->unused()); michael@0: if (chunk->info.numArenasFree == 1) { michael@0: /* michael@0: * Put the chunk back to the available list either at the michael@0: * point where it was before to preserve the available list michael@0: * that we enumerate, or, when the allocation thread has fully michael@0: * used all the previous chunks, at the beginning of the michael@0: * available list. michael@0: */ michael@0: Chunk **insertPoint = savedPrevp; michael@0: if (savedPrevp != availableListHeadp) { michael@0: Chunk *prev = Chunk::fromPointerToNext(savedPrevp); michael@0: if (!prev->hasAvailableArenas()) michael@0: insertPoint = availableListHeadp; michael@0: } michael@0: chunk->insertToAvailableList(insertPoint); michael@0: } else { michael@0: JS_ASSERT(chunk->info.prevp); michael@0: } michael@0: michael@0: if (rt->gcChunkAllocationSinceLastGC || !ok) { michael@0: /* michael@0: * The allocator thread has started to get new chunks. We should stop michael@0: * to avoid decommitting arenas in just allocated chunks. michael@0: */ michael@0: return; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * chunk->info.prevp becomes null when the allocator thread consumed michael@0: * all chunks from the available list. michael@0: */ michael@0: JS_ASSERT_IF(chunk->info.prevp, *chunk->info.prevp == chunk); michael@0: if (chunk->info.prevp == availableListHeadp || !chunk->info.prevp) michael@0: break; michael@0: michael@0: /* michael@0: * prevp exists and is not the list head. It must point to the next michael@0: * field of the previous chunk. michael@0: */ michael@0: chunk = chunk->getPrevious(); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: DecommitArenas(JSRuntime *rt) michael@0: { michael@0: DecommitArenasFromAvailableList(rt, &rt->gcSystemAvailableChunkListHead); michael@0: DecommitArenasFromAvailableList(rt, &rt->gcUserAvailableChunkListHead); michael@0: } michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: static void michael@0: ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink) michael@0: { michael@0: if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) { michael@0: AutoUnlockGC unlock(rt); michael@0: FreeChunkList(rt, toFree); michael@0: } michael@0: michael@0: if (shouldShrink) michael@0: DecommitArenas(rt); michael@0: } michael@0: michael@0: static void michael@0: SweepBackgroundThings(JSRuntime* rt, bool onBackgroundThread) michael@0: { michael@0: /* michael@0: * We must finalize in the correct order, see comments in michael@0: * finalizeObjects. michael@0: */ michael@0: FreeOp fop(rt, false); michael@0: for (int phase = 0 ; phase < BackgroundPhaseCount ; ++phase) { michael@0: for (Zone *zone = rt->gcSweepingZones; zone; zone = zone->gcNextGraphNode) { michael@0: for (int index = 0 ; index < BackgroundPhaseLength[phase] ; ++index) { michael@0: AllocKind kind = BackgroundPhases[phase][index]; michael@0: ArenaHeader *arenas = zone->allocator.arenas.arenaListsToSweep[kind]; michael@0: if (arenas) michael@0: ArenaLists::backgroundFinalize(&fop, arenas, onBackgroundThread); michael@0: } michael@0: } michael@0: } michael@0: michael@0: rt->gcSweepingZones = nullptr; michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: static void michael@0: AssertBackgroundSweepingFinished(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(!rt->gcSweepingZones); michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: for (unsigned i = 0; i < FINALIZE_LIMIT; ++i) { michael@0: JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]); michael@0: JS_ASSERT(zone->allocator.arenas.doneBackgroundFinalize(AllocKind(i))); michael@0: } michael@0: } michael@0: } michael@0: michael@0: unsigned michael@0: js::GetCPUCount() michael@0: { michael@0: static unsigned ncpus = 0; michael@0: if (ncpus == 0) { michael@0: # ifdef XP_WIN michael@0: SYSTEM_INFO sysinfo; michael@0: GetSystemInfo(&sysinfo); michael@0: ncpus = unsigned(sysinfo.dwNumberOfProcessors); michael@0: # else michael@0: long n = sysconf(_SC_NPROCESSORS_ONLN); michael@0: ncpus = (n > 0) ? unsigned(n) : 1; michael@0: # endif michael@0: } michael@0: return ncpus; michael@0: } michael@0: #endif /* JS_THREADSAFE */ michael@0: michael@0: bool michael@0: GCHelperThread::init() michael@0: { michael@0: if (!rt->useHelperThreads()) { michael@0: backgroundAllocation = false; michael@0: return true; michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: if (!(wakeup = PR_NewCondVar(rt->gcLock))) michael@0: return false; michael@0: if (!(done = PR_NewCondVar(rt->gcLock))) michael@0: return false; michael@0: michael@0: thread = PR_CreateThread(PR_USER_THREAD, threadMain, this, PR_PRIORITY_NORMAL, michael@0: PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0); michael@0: if (!thread) michael@0: return false; michael@0: michael@0: backgroundAllocation = (GetCPUCount() >= 2); michael@0: #endif /* JS_THREADSAFE */ michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: GCHelperThread::finish() michael@0: { michael@0: if (!rt->useHelperThreads() || !rt->gcLock) { michael@0: JS_ASSERT(state == IDLE); michael@0: return; michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: PRThread *join = nullptr; michael@0: { michael@0: AutoLockGC lock(rt); michael@0: if (thread && state != SHUTDOWN) { michael@0: /* michael@0: * We cannot be in the ALLOCATING or CANCEL_ALLOCATION states as michael@0: * the allocations should have been stopped during the last GC. michael@0: */ michael@0: JS_ASSERT(state == IDLE || state == SWEEPING); michael@0: if (state == IDLE) michael@0: PR_NotifyCondVar(wakeup); michael@0: state = SHUTDOWN; michael@0: join = thread; michael@0: } michael@0: } michael@0: if (join) { michael@0: /* PR_DestroyThread is not necessary. */ michael@0: PR_JoinThread(join); michael@0: } michael@0: if (wakeup) michael@0: PR_DestroyCondVar(wakeup); michael@0: if (done) michael@0: PR_DestroyCondVar(done); michael@0: #endif /* JS_THREADSAFE */ michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: #ifdef MOZ_NUWA_PROCESS michael@0: extern "C" { michael@0: MFBT_API bool IsNuwaProcess(); michael@0: MFBT_API void NuwaMarkCurrentThread(void (*recreate)(void *), void *arg); michael@0: } michael@0: #endif michael@0: michael@0: /* static */ michael@0: void michael@0: GCHelperThread::threadMain(void *arg) michael@0: { michael@0: PR_SetCurrentThreadName("JS GC Helper"); michael@0: michael@0: #ifdef MOZ_NUWA_PROCESS michael@0: if (IsNuwaProcess && IsNuwaProcess()) { michael@0: JS_ASSERT(NuwaMarkCurrentThread != nullptr); michael@0: NuwaMarkCurrentThread(nullptr, nullptr); michael@0: } michael@0: #endif michael@0: michael@0: static_cast(arg)->threadLoop(); michael@0: } michael@0: michael@0: void michael@0: GCHelperThread::wait(PRCondVar *which) michael@0: { michael@0: rt->gcLockOwner = nullptr; michael@0: PR_WaitCondVar(which, PR_INTERVAL_NO_TIMEOUT); michael@0: #ifdef DEBUG michael@0: rt->gcLockOwner = PR_GetCurrentThread(); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: GCHelperThread::threadLoop() michael@0: { michael@0: AutoLockGC lock(rt); michael@0: michael@0: TraceLogger *logger = TraceLoggerForCurrentThread(); michael@0: michael@0: /* michael@0: * Even on the first iteration the state can be SHUTDOWN or SWEEPING if michael@0: * the stop request or the GC and the corresponding startBackgroundSweep call michael@0: * happen before this thread has a chance to run. michael@0: */ michael@0: for (;;) { michael@0: switch (state) { michael@0: case SHUTDOWN: michael@0: return; michael@0: case IDLE: michael@0: wait(wakeup); michael@0: break; michael@0: case SWEEPING: { michael@0: AutoTraceLog logSweeping(logger, TraceLogger::GCSweeping); michael@0: doSweep(); michael@0: if (state == SWEEPING) michael@0: state = IDLE; michael@0: PR_NotifyAllCondVar(done); michael@0: break; michael@0: } michael@0: case ALLOCATING: { michael@0: AutoTraceLog logAllocating(logger, TraceLogger::GCAllocation); michael@0: do { michael@0: Chunk *chunk; michael@0: { michael@0: AutoUnlockGC unlock(rt); michael@0: chunk = Chunk::allocate(rt); michael@0: } michael@0: michael@0: /* OOM stops the background allocation. */ michael@0: if (!chunk) michael@0: break; michael@0: JS_ASSERT(chunk->info.numArenasFreeCommitted == 0); michael@0: rt->gcChunkPool.put(chunk); michael@0: } while (state == ALLOCATING && rt->gcChunkPool.wantBackgroundAllocation(rt)); michael@0: if (state == ALLOCATING) michael@0: state = IDLE; michael@0: break; michael@0: } michael@0: case CANCEL_ALLOCATION: michael@0: state = IDLE; michael@0: PR_NotifyAllCondVar(done); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: #endif /* JS_THREADSAFE */ michael@0: michael@0: void michael@0: GCHelperThread::startBackgroundSweep(bool shouldShrink) michael@0: { michael@0: JS_ASSERT(rt->useHelperThreads()); michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: AutoLockGC lock(rt); michael@0: JS_ASSERT(state == IDLE); michael@0: JS_ASSERT(!sweepFlag); michael@0: sweepFlag = true; michael@0: shrinkFlag = shouldShrink; michael@0: state = SWEEPING; michael@0: PR_NotifyCondVar(wakeup); michael@0: #endif /* JS_THREADSAFE */ michael@0: } michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: void michael@0: GCHelperThread::startBackgroundShrink() michael@0: { michael@0: JS_ASSERT(rt->useHelperThreads()); michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: switch (state) { michael@0: case IDLE: michael@0: JS_ASSERT(!sweepFlag); michael@0: shrinkFlag = true; michael@0: state = SWEEPING; michael@0: PR_NotifyCondVar(wakeup); michael@0: break; michael@0: case SWEEPING: michael@0: shrinkFlag = true; michael@0: break; michael@0: case ALLOCATING: michael@0: case CANCEL_ALLOCATION: michael@0: /* michael@0: * If we have started background allocation there is nothing to michael@0: * shrink. michael@0: */ michael@0: break; michael@0: case SHUTDOWN: michael@0: MOZ_ASSUME_UNREACHABLE("No shrink on shutdown"); michael@0: } michael@0: #endif /* JS_THREADSAFE */ michael@0: } michael@0: michael@0: void michael@0: GCHelperThread::waitBackgroundSweepEnd() michael@0: { michael@0: if (!rt->useHelperThreads()) { michael@0: JS_ASSERT(state == IDLE); michael@0: return; michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: AutoLockGC lock(rt); michael@0: while (state == SWEEPING) michael@0: wait(done); michael@0: if (rt->gcIncrementalState == NO_INCREMENTAL) michael@0: AssertBackgroundSweepingFinished(rt); michael@0: #endif /* JS_THREADSAFE */ michael@0: } michael@0: michael@0: void michael@0: GCHelperThread::waitBackgroundSweepOrAllocEnd() michael@0: { michael@0: if (!rt->useHelperThreads()) { michael@0: JS_ASSERT(state == IDLE); michael@0: return; michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: AutoLockGC lock(rt); michael@0: if (state == ALLOCATING) michael@0: state = CANCEL_ALLOCATION; michael@0: while (state == SWEEPING || state == CANCEL_ALLOCATION) michael@0: wait(done); michael@0: if (rt->gcIncrementalState == NO_INCREMENTAL) michael@0: AssertBackgroundSweepingFinished(rt); michael@0: #endif /* JS_THREADSAFE */ michael@0: } michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: inline void michael@0: GCHelperThread::startBackgroundAllocationIfIdle() michael@0: { michael@0: JS_ASSERT(rt->useHelperThreads()); michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: if (state == IDLE) { michael@0: state = ALLOCATING; michael@0: PR_NotifyCondVar(wakeup); michael@0: } michael@0: #endif /* JS_THREADSAFE */ michael@0: } michael@0: michael@0: void michael@0: GCHelperThread::replenishAndFreeLater(void *ptr) michael@0: { michael@0: JS_ASSERT(freeCursor == freeCursorEnd); michael@0: do { michael@0: if (freeCursor && !freeVector.append(freeCursorEnd - FREE_ARRAY_LENGTH)) michael@0: break; michael@0: freeCursor = (void **) js_malloc(FREE_ARRAY_SIZE); michael@0: if (!freeCursor) { michael@0: freeCursorEnd = nullptr; michael@0: break; michael@0: } michael@0: freeCursorEnd = freeCursor + FREE_ARRAY_LENGTH; michael@0: *freeCursor++ = ptr; michael@0: return; michael@0: } while (false); michael@0: js_free(ptr); michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: /* Must be called with the GC lock taken. */ michael@0: void michael@0: GCHelperThread::doSweep() michael@0: { michael@0: if (sweepFlag) { michael@0: sweepFlag = false; michael@0: AutoUnlockGC unlock(rt); michael@0: michael@0: SweepBackgroundThings(rt, true); michael@0: michael@0: if (freeCursor) { michael@0: void **array = freeCursorEnd - FREE_ARRAY_LENGTH; michael@0: freeElementsAndArray(array, freeCursor); michael@0: freeCursor = freeCursorEnd = nullptr; michael@0: } else { michael@0: JS_ASSERT(!freeCursorEnd); michael@0: } michael@0: for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) { michael@0: void **array = *iter; michael@0: freeElementsAndArray(array, array + FREE_ARRAY_LENGTH); michael@0: } michael@0: freeVector.resize(0); michael@0: michael@0: rt->freeLifoAlloc.freeAll(); michael@0: } michael@0: michael@0: bool shrinking = shrinkFlag; michael@0: ExpireChunksAndArenas(rt, shrinking); michael@0: michael@0: /* michael@0: * The main thread may have called ShrinkGCBuffers while michael@0: * ExpireChunksAndArenas(rt, false) was running, so we recheck the flag michael@0: * afterwards. michael@0: */ michael@0: if (!shrinking && shrinkFlag) { michael@0: shrinkFlag = false; michael@0: ExpireChunksAndArenas(rt, true); michael@0: } michael@0: } michael@0: #endif /* JS_THREADSAFE */ michael@0: michael@0: bool michael@0: GCHelperThread::onBackgroundThread() michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: return PR_GetCurrentThread() == getThread(); michael@0: #else michael@0: return false; michael@0: #endif michael@0: } michael@0: michael@0: static bool michael@0: ReleaseObservedTypes(JSRuntime *rt) michael@0: { michael@0: bool releaseTypes = rt->gcZeal() != 0; michael@0: michael@0: #ifndef JS_MORE_DETERMINISTIC michael@0: int64_t now = PRMJ_Now(); michael@0: if (now >= rt->gcJitReleaseTime) michael@0: releaseTypes = true; michael@0: if (releaseTypes) michael@0: rt->gcJitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL; michael@0: #endif michael@0: michael@0: return releaseTypes; michael@0: } michael@0: michael@0: /* michael@0: * It's simpler if we preserve the invariant that every zone has at least one michael@0: * compartment. If we know we're deleting the entire zone, then michael@0: * SweepCompartments is allowed to delete all compartments. In this case, michael@0: * |keepAtleastOne| is false. If some objects remain in the zone so that it michael@0: * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits michael@0: * SweepCompartments from deleting every compartment. Instead, it preserves an michael@0: * arbitrary compartment in the zone. michael@0: */ michael@0: static void michael@0: SweepCompartments(FreeOp *fop, Zone *zone, bool keepAtleastOne, bool lastGC) michael@0: { michael@0: JSRuntime *rt = zone->runtimeFromMainThread(); michael@0: JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback; michael@0: michael@0: JSCompartment **read = zone->compartments.begin(); michael@0: JSCompartment **end = zone->compartments.end(); michael@0: JSCompartment **write = read; michael@0: bool foundOne = false; michael@0: while (read < end) { michael@0: JSCompartment *comp = *read++; michael@0: JS_ASSERT(!rt->isAtomsCompartment(comp)); michael@0: michael@0: /* michael@0: * Don't delete the last compartment if all the ones before it were michael@0: * deleted and keepAtleastOne is true. michael@0: */ michael@0: bool dontDelete = read == end && !foundOne && keepAtleastOne; michael@0: if ((!comp->marked && !dontDelete) || lastGC) { michael@0: if (callback) michael@0: callback(fop, comp); michael@0: if (comp->principals) michael@0: JS_DropPrincipals(rt, comp->principals); michael@0: js_delete(comp); michael@0: } else { michael@0: *write++ = comp; michael@0: foundOne = true; michael@0: } michael@0: } michael@0: zone->compartments.resize(write - zone->compartments.begin()); michael@0: JS_ASSERT_IF(keepAtleastOne, !zone->compartments.empty()); michael@0: } michael@0: michael@0: static void michael@0: SweepZones(FreeOp *fop, bool lastGC) michael@0: { michael@0: JSRuntime *rt = fop->runtime(); michael@0: JSZoneCallback callback = rt->destroyZoneCallback; michael@0: michael@0: /* Skip the atomsCompartment zone. */ michael@0: Zone **read = rt->zones.begin() + 1; michael@0: Zone **end = rt->zones.end(); michael@0: Zone **write = read; michael@0: JS_ASSERT(rt->zones.length() >= 1); michael@0: JS_ASSERT(rt->isAtomsZone(rt->zones[0])); michael@0: michael@0: while (read < end) { michael@0: Zone *zone = *read++; michael@0: michael@0: if (zone->wasGCStarted()) { michael@0: if ((zone->allocator.arenas.arenaListsAreEmpty() && !zone->hasMarkedCompartments()) || michael@0: lastGC) michael@0: { michael@0: zone->allocator.arenas.checkEmptyFreeLists(); michael@0: if (callback) michael@0: callback(zone); michael@0: SweepCompartments(fop, zone, false, lastGC); michael@0: JS_ASSERT(zone->compartments.empty()); michael@0: fop->delete_(zone); michael@0: continue; michael@0: } michael@0: SweepCompartments(fop, zone, true, lastGC); michael@0: } michael@0: *write++ = zone; michael@0: } michael@0: rt->zones.resize(write - rt->zones.begin()); michael@0: } michael@0: michael@0: static void michael@0: PurgeRuntime(JSRuntime *rt) michael@0: { michael@0: for (GCCompartmentsIter comp(rt); !comp.done(); comp.next()) michael@0: comp->purge(); michael@0: michael@0: rt->freeLifoAlloc.transferUnusedFrom(&rt->tempLifoAlloc); michael@0: rt->interpreterStack().purge(rt); michael@0: michael@0: rt->gsnCache.purge(); michael@0: rt->scopeCoordinateNameCache.purge(); michael@0: rt->newObjectCache.purge(); michael@0: rt->nativeIterCache.purge(); michael@0: rt->sourceDataCache.purge(); michael@0: rt->evalCache.clear(); michael@0: michael@0: if (!rt->hasActiveCompilations()) michael@0: rt->parseMapPool().purgeAll(); michael@0: } michael@0: michael@0: static bool michael@0: ShouldPreserveJITCode(JSCompartment *comp, int64_t currentTime) michael@0: { michael@0: JSRuntime *rt = comp->runtimeFromMainThread(); michael@0: if (rt->gcShouldCleanUpEverything) michael@0: return false; michael@0: michael@0: if (rt->alwaysPreserveCode) michael@0: return true; michael@0: if (comp->lastAnimationTime + PRMJ_USEC_PER_SEC >= currentTime) michael@0: return true; michael@0: michael@0: return false; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: class CompartmentCheckTracer : public JSTracer michael@0: { michael@0: public: michael@0: CompartmentCheckTracer(JSRuntime *rt, JSTraceCallback callback) michael@0: : JSTracer(rt, callback) michael@0: {} michael@0: michael@0: Cell *src; michael@0: JSGCTraceKind srcKind; michael@0: Zone *zone; michael@0: JSCompartment *compartment; michael@0: }; michael@0: michael@0: static bool michael@0: InCrossCompartmentMap(JSObject *src, Cell *dst, JSGCTraceKind dstKind) michael@0: { michael@0: JSCompartment *srccomp = src->compartment(); michael@0: michael@0: if (dstKind == JSTRACE_OBJECT) { michael@0: Value key = ObjectValue(*static_cast(dst)); michael@0: if (WrapperMap::Ptr p = srccomp->lookupWrapper(key)) { michael@0: if (*p->value().unsafeGet() == ObjectValue(*src)) michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * If the cross-compartment edge is caused by the debugger, then we don't michael@0: * know the right hashtable key, so we have to iterate. michael@0: */ michael@0: for (JSCompartment::WrapperEnum e(srccomp); !e.empty(); e.popFront()) { michael@0: if (e.front().key().wrapped == dst && ToMarkable(e.front().value()) == src) michael@0: return true; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: static void michael@0: CheckCompartment(CompartmentCheckTracer *trc, JSCompartment *thingCompartment, michael@0: Cell *thing, JSGCTraceKind kind) michael@0: { michael@0: JS_ASSERT(thingCompartment == trc->compartment || michael@0: trc->runtime()->isAtomsCompartment(thingCompartment) || michael@0: (trc->srcKind == JSTRACE_OBJECT && michael@0: InCrossCompartmentMap((JSObject *)trc->src, thing, kind))); michael@0: } michael@0: michael@0: static JSCompartment * michael@0: CompartmentOfCell(Cell *thing, JSGCTraceKind kind) michael@0: { michael@0: if (kind == JSTRACE_OBJECT) michael@0: return static_cast(thing)->compartment(); michael@0: else if (kind == JSTRACE_SHAPE) michael@0: return static_cast(thing)->compartment(); michael@0: else if (kind == JSTRACE_BASE_SHAPE) michael@0: return static_cast(thing)->compartment(); michael@0: else if (kind == JSTRACE_SCRIPT) michael@0: return static_cast(thing)->compartment(); michael@0: else michael@0: return nullptr; michael@0: } michael@0: michael@0: static void michael@0: CheckCompartmentCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind kind) michael@0: { michael@0: CompartmentCheckTracer *trc = static_cast(trcArg); michael@0: Cell *thing = (Cell *)*thingp; michael@0: michael@0: JSCompartment *comp = CompartmentOfCell(thing, kind); michael@0: if (comp && trc->compartment) { michael@0: CheckCompartment(trc, comp, thing, kind); michael@0: } else { michael@0: JS_ASSERT(thing->tenuredZone() == trc->zone || michael@0: trc->runtime()->isAtomsZone(thing->tenuredZone())); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: CheckForCompartmentMismatches(JSRuntime *rt) michael@0: { michael@0: if (rt->gcDisableStrictProxyCheckingCount) michael@0: return; michael@0: michael@0: CompartmentCheckTracer trc(rt, CheckCompartmentCallback); michael@0: for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) { michael@0: trc.zone = zone; michael@0: for (size_t thingKind = 0; thingKind < FINALIZE_LAST; thingKind++) { michael@0: for (CellIterUnderGC i(zone, AllocKind(thingKind)); !i.done(); i.next()) { michael@0: trc.src = i.getCell(); michael@0: trc.srcKind = MapAllocToTraceKind(AllocKind(thingKind)); michael@0: trc.compartment = CompartmentOfCell(trc.src, trc.srcKind); michael@0: JS_TraceChildren(&trc, trc.src, trc.srcKind); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: static bool michael@0: BeginMarkPhase(JSRuntime *rt) michael@0: { michael@0: int64_t currentTime = PRMJ_Now(); michael@0: michael@0: #ifdef DEBUG michael@0: if (rt->gcFullCompartmentChecks) michael@0: CheckForCompartmentMismatches(rt); michael@0: #endif michael@0: michael@0: rt->gcIsFull = true; michael@0: bool any = false; michael@0: michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: /* Assert that zone state is as we expect */ michael@0: JS_ASSERT(!zone->isCollecting()); michael@0: JS_ASSERT(!zone->compartments.empty()); michael@0: for (unsigned i = 0; i < FINALIZE_LIMIT; ++i) michael@0: JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]); michael@0: michael@0: /* Set up which zones will be collected. */ michael@0: if (zone->isGCScheduled()) { michael@0: if (!rt->isAtomsZone(zone)) { michael@0: any = true; michael@0: zone->setGCState(Zone::Mark); michael@0: } michael@0: } else { michael@0: rt->gcIsFull = false; michael@0: } michael@0: michael@0: zone->scheduledForDestruction = false; michael@0: zone->maybeAlive = false; michael@0: zone->setPreservingCode(false); michael@0: } michael@0: michael@0: for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) { michael@0: JS_ASSERT(c->gcLiveArrayBuffers.empty()); michael@0: c->marked = false; michael@0: if (ShouldPreserveJITCode(c, currentTime)) michael@0: c->zone()->setPreservingCode(true); michael@0: } michael@0: michael@0: /* michael@0: * Atoms are not in the cross-compartment map. So if there are any michael@0: * zones that are not being collected, we are not allowed to collect michael@0: * atoms. Otherwise, the non-collected zones could contain pointers michael@0: * to atoms that we would miss. michael@0: * michael@0: * keepAtoms() will only change on the main thread, which we are currently michael@0: * on. If the value of keepAtoms() changes between GC slices, then we'll michael@0: * cancel the incremental GC. See IsIncrementalGCSafe. michael@0: */ michael@0: if (rt->gcIsFull && !rt->keepAtoms()) { michael@0: Zone *atomsZone = rt->atomsCompartment()->zone(); michael@0: if (atomsZone->isGCScheduled()) { michael@0: JS_ASSERT(!atomsZone->isCollecting()); michael@0: atomsZone->setGCState(Zone::Mark); michael@0: any = true; michael@0: } michael@0: } michael@0: michael@0: /* Check that at least one zone is scheduled for collection. */ michael@0: if (!any) michael@0: return false; michael@0: michael@0: /* michael@0: * At the end of each incremental slice, we call prepareForIncrementalGC, michael@0: * which marks objects in all arenas that we're currently allocating michael@0: * into. This can cause leaks if unreachable objects are in these michael@0: * arenas. This purge call ensures that we only mark arenas that have had michael@0: * allocations after the incremental GC started. michael@0: */ michael@0: if (rt->gcIsIncremental) { michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) michael@0: zone->allocator.arenas.purge(); michael@0: } michael@0: michael@0: rt->gcMarker.start(); michael@0: JS_ASSERT(!rt->gcMarker.callback); michael@0: JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gcMarker)); michael@0: michael@0: /* For non-incremental GC the following sweep discards the jit code. */ michael@0: if (rt->gcIsIncremental) { michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK_DISCARD_CODE); michael@0: zone->discardJitCode(rt->defaultFreeOp()); michael@0: } michael@0: } michael@0: michael@0: GCMarker *gcmarker = &rt->gcMarker; michael@0: michael@0: rt->gcStartNumber = rt->gcNumber; michael@0: michael@0: /* michael@0: * We must purge the runtime at the beginning of an incremental GC. The michael@0: * danger if we purge later is that the snapshot invariant of incremental michael@0: * GC will be broken, as follows. If some object is reachable only through michael@0: * some cache (say the dtoaCache) then it will not be part of the snapshot. michael@0: * If we purge after root marking, then the mutator could obtain a pointer michael@0: * to the object and start using it. This object might never be marked, so michael@0: * a GC hazard would exist. michael@0: */ michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_PURGE); michael@0: PurgeRuntime(rt); michael@0: } michael@0: michael@0: /* michael@0: * Mark phase. michael@0: */ michael@0: gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_MARK); michael@0: gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_MARK_ROOTS); michael@0: michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: /* Unmark everything in the zones being collected. */ michael@0: zone->allocator.arenas.unmarkAll(); michael@0: } michael@0: michael@0: for (GCCompartmentsIter c(rt); !c.done(); c.next()) { michael@0: /* Reset weak map list for the compartments being collected. */ michael@0: WeakMapBase::resetCompartmentWeakMapList(c); michael@0: } michael@0: michael@0: if (rt->gcIsFull) michael@0: UnmarkScriptData(rt); michael@0: michael@0: MarkRuntime(gcmarker); michael@0: if (rt->gcIsIncremental) michael@0: BufferGrayRoots(gcmarker); michael@0: michael@0: /* michael@0: * This code ensures that if a zone is "dead", then it will be michael@0: * collected in this GC. A zone is considered dead if its maybeAlive michael@0: * flag is false. The maybeAlive flag is set if: michael@0: * (1) the zone has incoming cross-compartment edges, or michael@0: * (2) an object in the zone was marked during root marking, either michael@0: * as a black root or a gray root. michael@0: * If the maybeAlive is false, then we set the scheduledForDestruction flag. michael@0: * At any time later in the GC, if we try to mark an object whose michael@0: * zone is scheduled for destruction, we will assert. michael@0: * NOTE: Due to bug 811587, we only assert if gcManipulatingDeadCompartments michael@0: * is true (e.g., if we're doing a brain transplant). michael@0: * michael@0: * The purpose of this check is to ensure that a zone that we would michael@0: * normally destroy is not resurrected by a read barrier or an michael@0: * allocation. This might happen during a function like JS_TransplantObject, michael@0: * which iterates over all compartments, live or dead, and operates on their michael@0: * objects. See bug 803376 for details on this problem. To avoid the michael@0: * problem, we are very careful to avoid allocation and read barriers during michael@0: * JS_TransplantObject and the like. The code here ensures that we don't michael@0: * regress. michael@0: * michael@0: * Note that there are certain cases where allocations or read barriers in michael@0: * dead zone are difficult to avoid. We detect such cases (via the michael@0: * gcObjectsMarkedInDeadCompartment counter) and redo any ongoing GCs after michael@0: * the JS_TransplantObject function has finished. This ensures that the dead michael@0: * zones will be cleaned up. See AutoMarkInDeadZone and michael@0: * AutoMaybeTouchDeadZones for details. michael@0: */ michael@0: michael@0: /* Set the maybeAlive flag based on cross-compartment edges. */ michael@0: for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { michael@0: for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) { michael@0: Cell *dst = e.front().key().wrapped; michael@0: dst->tenuredZone()->maybeAlive = true; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * For black roots, code in gc/Marking.cpp will already have set maybeAlive michael@0: * during MarkRuntime. michael@0: */ michael@0: michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: if (!zone->maybeAlive && !rt->isAtomsZone(zone)) michael@0: zone->scheduledForDestruction = true; michael@0: } michael@0: rt->gcFoundBlackGrayEdges = false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: template michael@0: static void michael@0: MarkWeakReferences(JSRuntime *rt, gcstats::Phase phase) michael@0: { michael@0: GCMarker *gcmarker = &rt->gcMarker; michael@0: JS_ASSERT(gcmarker->isDrained()); michael@0: michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK); michael@0: gcstats::AutoPhase ap1(rt->gcStats, phase); michael@0: michael@0: for (;;) { michael@0: bool markedAny = false; michael@0: for (CompartmentIterT c(rt); !c.done(); c.next()) { michael@0: markedAny |= WatchpointMap::markCompartmentIteratively(c, gcmarker); michael@0: markedAny |= WeakMapBase::markCompartmentIteratively(c, gcmarker); michael@0: } michael@0: markedAny |= Debugger::markAllIteratively(gcmarker); michael@0: michael@0: if (!markedAny) michael@0: break; michael@0: michael@0: SliceBudget budget; michael@0: gcmarker->drainMarkStack(budget); michael@0: } michael@0: JS_ASSERT(gcmarker->isDrained()); michael@0: } michael@0: michael@0: static void michael@0: MarkWeakReferencesInCurrentGroup(JSRuntime *rt, gcstats::Phase phase) michael@0: { michael@0: MarkWeakReferences(rt, phase); michael@0: } michael@0: michael@0: template michael@0: static void michael@0: MarkGrayReferences(JSRuntime *rt) michael@0: { michael@0: GCMarker *gcmarker = &rt->gcMarker; michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK); michael@0: gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_SWEEP_MARK_GRAY); michael@0: gcmarker->setMarkColorGray(); michael@0: if (gcmarker->hasBufferedGrayRoots()) { michael@0: for (ZoneIterT zone(rt); !zone.done(); zone.next()) michael@0: gcmarker->markBufferedGrayRoots(zone); michael@0: } else { michael@0: JS_ASSERT(!rt->gcIsIncremental); michael@0: if (JSTraceDataOp op = rt->gcGrayRootTracer.op) michael@0: (*op)(gcmarker, rt->gcGrayRootTracer.data); michael@0: } michael@0: SliceBudget budget; michael@0: gcmarker->drainMarkStack(budget); michael@0: } michael@0: michael@0: MarkWeakReferences(rt, gcstats::PHASE_SWEEP_MARK_GRAY_WEAK); michael@0: michael@0: JS_ASSERT(gcmarker->isDrained()); michael@0: michael@0: gcmarker->setMarkColorBlack(); michael@0: } michael@0: michael@0: static void michael@0: MarkGrayReferencesInCurrentGroup(JSRuntime *rt) michael@0: { michael@0: MarkGrayReferences(rt); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: michael@0: static void michael@0: MarkAllWeakReferences(JSRuntime *rt, gcstats::Phase phase) michael@0: { michael@0: MarkWeakReferences(rt, phase); michael@0: } michael@0: michael@0: static void michael@0: MarkAllGrayReferences(JSRuntime *rt) michael@0: { michael@0: MarkGrayReferences(rt); michael@0: } michael@0: michael@0: class js::gc::MarkingValidator michael@0: { michael@0: public: michael@0: MarkingValidator(JSRuntime *rt); michael@0: ~MarkingValidator(); michael@0: void nonIncrementalMark(); michael@0: void validate(); michael@0: michael@0: private: michael@0: JSRuntime *runtime; michael@0: bool initialized; michael@0: michael@0: typedef HashMap BitmapMap; michael@0: BitmapMap map; michael@0: }; michael@0: michael@0: js::gc::MarkingValidator::MarkingValidator(JSRuntime *rt) michael@0: : runtime(rt), michael@0: initialized(false) michael@0: {} michael@0: michael@0: js::gc::MarkingValidator::~MarkingValidator() michael@0: { michael@0: if (!map.initialized()) michael@0: return; michael@0: michael@0: for (BitmapMap::Range r(map.all()); !r.empty(); r.popFront()) michael@0: js_delete(r.front().value()); michael@0: } michael@0: michael@0: void michael@0: js::gc::MarkingValidator::nonIncrementalMark() michael@0: { michael@0: /* michael@0: * Perform a non-incremental mark for all collecting zones and record michael@0: * the results for later comparison. michael@0: * michael@0: * Currently this does not validate gray marking. michael@0: */ michael@0: michael@0: if (!map.init()) michael@0: return; michael@0: michael@0: GCMarker *gcmarker = &runtime->gcMarker; michael@0: michael@0: /* Save existing mark bits. */ michael@0: for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) { michael@0: ChunkBitmap *bitmap = &r.front()->bitmap; michael@0: ChunkBitmap *entry = js_new(); michael@0: if (!entry) michael@0: return; michael@0: michael@0: memcpy((void *)entry->bitmap, (void *)bitmap->bitmap, sizeof(bitmap->bitmap)); michael@0: if (!map.putNew(r.front(), entry)) michael@0: return; michael@0: } michael@0: michael@0: /* michael@0: * Temporarily clear the lists of live weakmaps and array buffers for the michael@0: * compartments we are collecting. michael@0: */ michael@0: michael@0: WeakMapVector weakmaps; michael@0: ArrayBufferVector arrayBuffers; michael@0: for (GCCompartmentsIter c(runtime); !c.done(); c.next()) { michael@0: if (!WeakMapBase::saveCompartmentWeakMapList(c, weakmaps) || michael@0: !ArrayBufferObject::saveArrayBufferList(c, arrayBuffers)) michael@0: { michael@0: return; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * After this point, the function should run to completion, so we shouldn't michael@0: * do anything fallible. michael@0: */ michael@0: initialized = true; michael@0: michael@0: for (GCCompartmentsIter c(runtime); !c.done(); c.next()) { michael@0: WeakMapBase::resetCompartmentWeakMapList(c); michael@0: ArrayBufferObject::resetArrayBufferList(c); michael@0: } michael@0: michael@0: /* Re-do all the marking, but non-incrementally. */ michael@0: js::gc::State state = runtime->gcIncrementalState; michael@0: runtime->gcIncrementalState = MARK_ROOTS; michael@0: michael@0: JS_ASSERT(gcmarker->isDrained()); michael@0: gcmarker->reset(); michael@0: michael@0: for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) michael@0: r.front()->bitmap.clear(); michael@0: michael@0: { michael@0: gcstats::AutoPhase ap1(runtime->gcStats, gcstats::PHASE_MARK); michael@0: gcstats::AutoPhase ap2(runtime->gcStats, gcstats::PHASE_MARK_ROOTS); michael@0: MarkRuntime(gcmarker, true); michael@0: } michael@0: michael@0: { michael@0: gcstats::AutoPhase ap1(runtime->gcStats, gcstats::PHASE_MARK); michael@0: SliceBudget budget; michael@0: runtime->gcIncrementalState = MARK; michael@0: runtime->gcMarker.drainMarkStack(budget); michael@0: } michael@0: michael@0: runtime->gcIncrementalState = SWEEP; michael@0: { michael@0: gcstats::AutoPhase ap(runtime->gcStats, gcstats::PHASE_SWEEP); michael@0: MarkAllWeakReferences(runtime, gcstats::PHASE_SWEEP_MARK_WEAK); michael@0: michael@0: /* Update zone state for gray marking. */ michael@0: for (GCZonesIter zone(runtime); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCMarkingBlack()); michael@0: zone->setGCState(Zone::MarkGray); michael@0: } michael@0: michael@0: MarkAllGrayReferences(runtime); michael@0: michael@0: /* Restore zone state. */ michael@0: for (GCZonesIter zone(runtime); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCMarkingGray()); michael@0: zone->setGCState(Zone::Mark); michael@0: } michael@0: } michael@0: michael@0: /* Take a copy of the non-incremental mark state and restore the original. */ michael@0: for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) { michael@0: Chunk *chunk = r.front(); michael@0: ChunkBitmap *bitmap = &chunk->bitmap; michael@0: ChunkBitmap *entry = map.lookup(chunk)->value(); michael@0: Swap(*entry, *bitmap); michael@0: } michael@0: michael@0: for (GCCompartmentsIter c(runtime); !c.done(); c.next()) { michael@0: WeakMapBase::resetCompartmentWeakMapList(c); michael@0: ArrayBufferObject::resetArrayBufferList(c); michael@0: } michael@0: WeakMapBase::restoreCompartmentWeakMapLists(weakmaps); michael@0: ArrayBufferObject::restoreArrayBufferLists(arrayBuffers); michael@0: michael@0: runtime->gcIncrementalState = state; michael@0: } michael@0: michael@0: void michael@0: js::gc::MarkingValidator::validate() michael@0: { michael@0: /* michael@0: * Validates the incremental marking for a single compartment by comparing michael@0: * the mark bits to those previously recorded for a non-incremental mark. michael@0: */ michael@0: michael@0: if (!initialized) michael@0: return; michael@0: michael@0: for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) { michael@0: Chunk *chunk = r.front(); michael@0: BitmapMap::Ptr ptr = map.lookup(chunk); michael@0: if (!ptr) michael@0: continue; /* Allocated after we did the non-incremental mark. */ michael@0: michael@0: ChunkBitmap *bitmap = ptr->value(); michael@0: ChunkBitmap *incBitmap = &chunk->bitmap; michael@0: michael@0: for (size_t i = 0; i < ArenasPerChunk; i++) { michael@0: if (chunk->decommittedArenas.get(i)) michael@0: continue; michael@0: Arena *arena = &chunk->arenas[i]; michael@0: if (!arena->aheader.allocated()) michael@0: continue; michael@0: if (!arena->aheader.zone->isGCSweeping()) michael@0: continue; michael@0: if (arena->aheader.allocatedDuringIncremental) michael@0: continue; michael@0: michael@0: AllocKind kind = arena->aheader.getAllocKind(); michael@0: uintptr_t thing = arena->thingsStart(kind); michael@0: uintptr_t end = arena->thingsEnd(); michael@0: while (thing < end) { michael@0: Cell *cell = (Cell *)thing; michael@0: michael@0: /* michael@0: * If a non-incremental GC wouldn't have collected a cell, then michael@0: * an incremental GC won't collect it. michael@0: */ michael@0: JS_ASSERT_IF(bitmap->isMarked(cell, BLACK), incBitmap->isMarked(cell, BLACK)); michael@0: michael@0: /* michael@0: * If the cycle collector isn't allowed to collect an object michael@0: * after a non-incremental GC has run, then it isn't allowed to michael@0: * collected it after an incremental GC. michael@0: */ michael@0: JS_ASSERT_IF(!bitmap->isMarked(cell, GRAY), !incBitmap->isMarked(cell, GRAY)); michael@0: michael@0: thing += Arena::thingSize(kind); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: #endif michael@0: michael@0: static void michael@0: ComputeNonIncrementalMarkingForValidation(JSRuntime *rt) michael@0: { michael@0: #ifdef DEBUG michael@0: JS_ASSERT(!rt->gcMarkingValidator); michael@0: if (rt->gcIsIncremental && rt->gcValidate) michael@0: rt->gcMarkingValidator = js_new(rt); michael@0: if (rt->gcMarkingValidator) michael@0: rt->gcMarkingValidator->nonIncrementalMark(); michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: ValidateIncrementalMarking(JSRuntime *rt) michael@0: { michael@0: #ifdef DEBUG michael@0: if (rt->gcMarkingValidator) michael@0: rt->gcMarkingValidator->validate(); michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: FinishMarkingValidation(JSRuntime *rt) michael@0: { michael@0: #ifdef DEBUG michael@0: js_delete(rt->gcMarkingValidator); michael@0: rt->gcMarkingValidator = nullptr; michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: AssertNeedsBarrierFlagsConsistent(JSRuntime *rt) michael@0: { michael@0: #ifdef DEBUG michael@0: bool anyNeedsBarrier = false; michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) michael@0: anyNeedsBarrier |= zone->needsBarrier(); michael@0: JS_ASSERT(rt->needsBarrier() == anyNeedsBarrier); michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: DropStringWrappers(JSRuntime *rt) michael@0: { michael@0: /* michael@0: * String "wrappers" are dropped on GC because their presence would require michael@0: * us to sweep the wrappers in all compartments every time we sweep a michael@0: * compartment group. michael@0: */ michael@0: for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { michael@0: for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) { michael@0: if (e.front().key().kind == CrossCompartmentKey::StringWrapper) michael@0: e.removeFront(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Group zones that must be swept at the same time. michael@0: * michael@0: * If compartment A has an edge to an unmarked object in compartment B, then we michael@0: * must not sweep A in a later slice than we sweep B. That's because a write michael@0: * barrier in A that could lead to the unmarked object in B becoming michael@0: * marked. However, if we had already swept that object, we would be in trouble. michael@0: * michael@0: * If we consider these dependencies as a graph, then all the compartments in michael@0: * any strongly-connected component of this graph must be swept in the same michael@0: * slice. michael@0: * michael@0: * Tarjan's algorithm is used to calculate the components. michael@0: */ michael@0: michael@0: void michael@0: JSCompartment::findOutgoingEdges(ComponentFinder &finder) michael@0: { michael@0: for (js::WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) { michael@0: CrossCompartmentKey::Kind kind = e.front().key().kind; michael@0: JS_ASSERT(kind != CrossCompartmentKey::StringWrapper); michael@0: Cell *other = e.front().key().wrapped; michael@0: if (kind == CrossCompartmentKey::ObjectWrapper) { michael@0: /* michael@0: * Add edge to wrapped object compartment if wrapped object is not michael@0: * marked black to indicate that wrapper compartment not be swept michael@0: * after wrapped compartment. michael@0: */ michael@0: if (!other->isMarked(BLACK) || other->isMarked(GRAY)) { michael@0: JS::Zone *w = other->tenuredZone(); michael@0: if (w->isGCMarking()) michael@0: finder.addEdgeTo(w); michael@0: } michael@0: } else { michael@0: JS_ASSERT(kind == CrossCompartmentKey::DebuggerScript || michael@0: kind == CrossCompartmentKey::DebuggerSource || michael@0: kind == CrossCompartmentKey::DebuggerObject || michael@0: kind == CrossCompartmentKey::DebuggerEnvironment); michael@0: /* michael@0: * Add edge for debugger object wrappers, to ensure (in conjuction michael@0: * with call to Debugger::findCompartmentEdges below) that debugger michael@0: * and debuggee objects are always swept in the same group. michael@0: */ michael@0: JS::Zone *w = other->tenuredZone(); michael@0: if (w->isGCMarking()) michael@0: finder.addEdgeTo(w); michael@0: } michael@0: } michael@0: michael@0: Debugger::findCompartmentEdges(zone(), finder); michael@0: } michael@0: michael@0: void michael@0: Zone::findOutgoingEdges(ComponentFinder &finder) michael@0: { michael@0: /* michael@0: * Any compartment may have a pointer to an atom in the atoms michael@0: * compartment, and these aren't in the cross compartment map. michael@0: */ michael@0: JSRuntime *rt = runtimeFromMainThread(); michael@0: if (rt->atomsCompartment()->zone()->isGCMarking()) michael@0: finder.addEdgeTo(rt->atomsCompartment()->zone()); michael@0: michael@0: for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) michael@0: comp->findOutgoingEdges(finder); michael@0: } michael@0: michael@0: static void michael@0: FindZoneGroups(JSRuntime *rt) michael@0: { michael@0: ComponentFinder finder(rt->mainThread.nativeStackLimit[StackForSystemCode]); michael@0: if (!rt->gcIsIncremental) michael@0: finder.useOneComponent(); michael@0: michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCMarking()); michael@0: finder.addNode(zone); michael@0: } michael@0: rt->gcZoneGroups = finder.getResultsList(); michael@0: rt->gcCurrentZoneGroup = rt->gcZoneGroups; michael@0: rt->gcZoneGroupIndex = 0; michael@0: JS_ASSERT_IF(!rt->gcIsIncremental, !rt->gcCurrentZoneGroup->nextGroup()); michael@0: } michael@0: michael@0: static void michael@0: ResetGrayList(JSCompartment* comp); michael@0: michael@0: static void michael@0: GetNextZoneGroup(JSRuntime *rt) michael@0: { michael@0: rt->gcCurrentZoneGroup = rt->gcCurrentZoneGroup->nextGroup(); michael@0: ++rt->gcZoneGroupIndex; michael@0: if (!rt->gcCurrentZoneGroup) { michael@0: rt->gcAbortSweepAfterCurrentGroup = false; michael@0: return; michael@0: } michael@0: michael@0: if (!rt->gcIsIncremental) michael@0: ComponentFinder::mergeGroups(rt->gcCurrentZoneGroup); michael@0: michael@0: if (rt->gcAbortSweepAfterCurrentGroup) { michael@0: JS_ASSERT(!rt->gcIsIncremental); michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: JS_ASSERT(!zone->gcNextGraphComponent); michael@0: JS_ASSERT(zone->isGCMarking()); michael@0: zone->setNeedsBarrier(false, Zone::UpdateIon); michael@0: zone->setGCState(Zone::NoGC); michael@0: zone->gcGrayRoots.clearAndFree(); michael@0: } michael@0: rt->setNeedsBarrier(false); michael@0: AssertNeedsBarrierFlagsConsistent(rt); michael@0: michael@0: for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next()) { michael@0: ArrayBufferObject::resetArrayBufferList(comp); michael@0: ResetGrayList(comp); michael@0: } michael@0: michael@0: rt->gcAbortSweepAfterCurrentGroup = false; michael@0: rt->gcCurrentZoneGroup = nullptr; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Gray marking: michael@0: * michael@0: * At the end of collection, anything reachable from a gray root that has not michael@0: * otherwise been marked black must be marked gray. michael@0: * michael@0: * This means that when marking things gray we must not allow marking to leave michael@0: * the current compartment group, as that could result in things being marked michael@0: * grey when they might subsequently be marked black. To achieve this, when we michael@0: * find a cross compartment pointer we don't mark the referent but add it to a michael@0: * singly-linked list of incoming gray pointers that is stored with each michael@0: * compartment. michael@0: * michael@0: * The list head is stored in JSCompartment::gcIncomingGrayPointers and contains michael@0: * cross compartment wrapper objects. The next pointer is stored in the second michael@0: * extra slot of the cross compartment wrapper. michael@0: * michael@0: * The list is created during gray marking when one of the michael@0: * MarkCrossCompartmentXXX functions is called for a pointer that leaves the michael@0: * current compartent group. This calls DelayCrossCompartmentGrayMarking to michael@0: * push the referring object onto the list. michael@0: * michael@0: * The list is traversed and then unlinked in michael@0: * MarkIncomingCrossCompartmentPointers. michael@0: */ michael@0: michael@0: static bool michael@0: IsGrayListObject(JSObject *obj) michael@0: { michael@0: JS_ASSERT(obj); michael@0: return obj->is() && !IsDeadProxyObject(obj); michael@0: } michael@0: michael@0: /* static */ unsigned michael@0: ProxyObject::grayLinkSlot(JSObject *obj) michael@0: { michael@0: JS_ASSERT(IsGrayListObject(obj)); michael@0: return ProxyObject::EXTRA_SLOT + 1; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: static void michael@0: AssertNotOnGrayList(JSObject *obj) michael@0: { michael@0: JS_ASSERT_IF(IsGrayListObject(obj), michael@0: obj->getReservedSlot(ProxyObject::grayLinkSlot(obj)).isUndefined()); michael@0: } michael@0: #endif michael@0: michael@0: static JSObject * michael@0: CrossCompartmentPointerReferent(JSObject *obj) michael@0: { michael@0: JS_ASSERT(IsGrayListObject(obj)); michael@0: return &obj->as().private_().toObject(); michael@0: } michael@0: michael@0: static JSObject * michael@0: NextIncomingCrossCompartmentPointer(JSObject *prev, bool unlink) michael@0: { michael@0: unsigned slot = ProxyObject::grayLinkSlot(prev); michael@0: JSObject *next = prev->getReservedSlot(slot).toObjectOrNull(); michael@0: JS_ASSERT_IF(next, IsGrayListObject(next)); michael@0: michael@0: if (unlink) michael@0: prev->setSlot(slot, UndefinedValue()); michael@0: michael@0: return next; michael@0: } michael@0: michael@0: void michael@0: js::DelayCrossCompartmentGrayMarking(JSObject *src) michael@0: { michael@0: JS_ASSERT(IsGrayListObject(src)); michael@0: michael@0: /* Called from MarkCrossCompartmentXXX functions. */ michael@0: unsigned slot = ProxyObject::grayLinkSlot(src); michael@0: JSObject *dest = CrossCompartmentPointerReferent(src); michael@0: JSCompartment *comp = dest->compartment(); michael@0: michael@0: if (src->getReservedSlot(slot).isUndefined()) { michael@0: src->setCrossCompartmentSlot(slot, ObjectOrNullValue(comp->gcIncomingGrayPointers)); michael@0: comp->gcIncomingGrayPointers = src; michael@0: } else { michael@0: JS_ASSERT(src->getReservedSlot(slot).isObjectOrNull()); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: /* michael@0: * Assert that the object is in our list, also walking the list to check its michael@0: * integrity. michael@0: */ michael@0: JSObject *obj = comp->gcIncomingGrayPointers; michael@0: bool found = false; michael@0: while (obj) { michael@0: if (obj == src) michael@0: found = true; michael@0: obj = NextIncomingCrossCompartmentPointer(obj, false); michael@0: } michael@0: JS_ASSERT(found); michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: MarkIncomingCrossCompartmentPointers(JSRuntime *rt, const uint32_t color) michael@0: { michael@0: JS_ASSERT(color == BLACK || color == GRAY); michael@0: michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK); michael@0: static const gcstats::Phase statsPhases[] = { michael@0: gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK, michael@0: gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY michael@0: }; michael@0: gcstats::AutoPhase ap1(rt->gcStats, statsPhases[color]); michael@0: michael@0: bool unlinkList = color == GRAY; michael@0: michael@0: for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) { michael@0: JS_ASSERT_IF(color == GRAY, c->zone()->isGCMarkingGray()); michael@0: JS_ASSERT_IF(color == BLACK, c->zone()->isGCMarkingBlack()); michael@0: JS_ASSERT_IF(c->gcIncomingGrayPointers, IsGrayListObject(c->gcIncomingGrayPointers)); michael@0: michael@0: for (JSObject *src = c->gcIncomingGrayPointers; michael@0: src; michael@0: src = NextIncomingCrossCompartmentPointer(src, unlinkList)) michael@0: { michael@0: JSObject *dst = CrossCompartmentPointerReferent(src); michael@0: JS_ASSERT(dst->compartment() == c); michael@0: michael@0: if (color == GRAY) { michael@0: if (IsObjectMarked(&src) && src->isMarked(GRAY)) michael@0: MarkGCThingUnbarriered(&rt->gcMarker, (void**)&dst, michael@0: "cross-compartment gray pointer"); michael@0: } else { michael@0: if (IsObjectMarked(&src) && !src->isMarked(GRAY)) michael@0: MarkGCThingUnbarriered(&rt->gcMarker, (void**)&dst, michael@0: "cross-compartment black pointer"); michael@0: } michael@0: } michael@0: michael@0: if (unlinkList) michael@0: c->gcIncomingGrayPointers = nullptr; michael@0: } michael@0: michael@0: SliceBudget budget; michael@0: rt->gcMarker.drainMarkStack(budget); michael@0: } michael@0: michael@0: static bool michael@0: RemoveFromGrayList(JSObject *wrapper) michael@0: { michael@0: if (!IsGrayListObject(wrapper)) michael@0: return false; michael@0: michael@0: unsigned slot = ProxyObject::grayLinkSlot(wrapper); michael@0: if (wrapper->getReservedSlot(slot).isUndefined()) michael@0: return false; /* Not on our list. */ michael@0: michael@0: JSObject *tail = wrapper->getReservedSlot(slot).toObjectOrNull(); michael@0: wrapper->setReservedSlot(slot, UndefinedValue()); michael@0: michael@0: JSCompartment *comp = CrossCompartmentPointerReferent(wrapper)->compartment(); michael@0: JSObject *obj = comp->gcIncomingGrayPointers; michael@0: if (obj == wrapper) { michael@0: comp->gcIncomingGrayPointers = tail; michael@0: return true; michael@0: } michael@0: michael@0: while (obj) { michael@0: unsigned slot = ProxyObject::grayLinkSlot(obj); michael@0: JSObject *next = obj->getReservedSlot(slot).toObjectOrNull(); michael@0: if (next == wrapper) { michael@0: obj->setCrossCompartmentSlot(slot, ObjectOrNullValue(tail)); michael@0: return true; michael@0: } michael@0: obj = next; michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("object not found in gray link list"); michael@0: } michael@0: michael@0: static void michael@0: ResetGrayList(JSCompartment *comp) michael@0: { michael@0: JSObject *src = comp->gcIncomingGrayPointers; michael@0: while (src) michael@0: src = NextIncomingCrossCompartmentPointer(src, true); michael@0: comp->gcIncomingGrayPointers = nullptr; michael@0: } michael@0: michael@0: void michael@0: js::NotifyGCNukeWrapper(JSObject *obj) michael@0: { michael@0: /* michael@0: * References to target of wrapper are being removed, we no longer have to michael@0: * remember to mark it. michael@0: */ michael@0: RemoveFromGrayList(obj); michael@0: } michael@0: michael@0: enum { michael@0: JS_GC_SWAP_OBJECT_A_REMOVED = 1 << 0, michael@0: JS_GC_SWAP_OBJECT_B_REMOVED = 1 << 1 michael@0: }; michael@0: michael@0: unsigned michael@0: js::NotifyGCPreSwap(JSObject *a, JSObject *b) michael@0: { michael@0: /* michael@0: * Two objects in the same compartment are about to have had their contents michael@0: * swapped. If either of them are in our gray pointer list, then we remove michael@0: * them from the lists, returning a bitset indicating what happened. michael@0: */ michael@0: return (RemoveFromGrayList(a) ? JS_GC_SWAP_OBJECT_A_REMOVED : 0) | michael@0: (RemoveFromGrayList(b) ? JS_GC_SWAP_OBJECT_B_REMOVED : 0); michael@0: } michael@0: michael@0: void michael@0: js::NotifyGCPostSwap(JSObject *a, JSObject *b, unsigned removedFlags) michael@0: { michael@0: /* michael@0: * Two objects in the same compartment have had their contents swapped. If michael@0: * either of them were in our gray pointer list, we re-add them again. michael@0: */ michael@0: if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED) michael@0: DelayCrossCompartmentGrayMarking(b); michael@0: if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED) michael@0: DelayCrossCompartmentGrayMarking(a); michael@0: } michael@0: michael@0: static void michael@0: EndMarkingZoneGroup(JSRuntime *rt) michael@0: { michael@0: /* michael@0: * Mark any incoming black pointers from previously swept compartments michael@0: * whose referents are not marked. This can occur when gray cells become michael@0: * black by the action of UnmarkGray. michael@0: */ michael@0: MarkIncomingCrossCompartmentPointers(rt, BLACK); michael@0: michael@0: MarkWeakReferencesInCurrentGroup(rt, gcstats::PHASE_SWEEP_MARK_WEAK); michael@0: michael@0: /* michael@0: * Change state of current group to MarkGray to restrict marking to this michael@0: * group. Note that there may be pointers to the atoms compartment, and michael@0: * these will be marked through, as they are not marked with michael@0: * MarkCrossCompartmentXXX. michael@0: */ michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCMarkingBlack()); michael@0: zone->setGCState(Zone::MarkGray); michael@0: } michael@0: michael@0: /* Mark incoming gray pointers from previously swept compartments. */ michael@0: rt->gcMarker.setMarkColorGray(); michael@0: MarkIncomingCrossCompartmentPointers(rt, GRAY); michael@0: rt->gcMarker.setMarkColorBlack(); michael@0: michael@0: /* Mark gray roots and mark transitively inside the current compartment group. */ michael@0: MarkGrayReferencesInCurrentGroup(rt); michael@0: michael@0: /* Restore marking state. */ michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCMarkingGray()); michael@0: zone->setGCState(Zone::Mark); michael@0: } michael@0: michael@0: JS_ASSERT(rt->gcMarker.isDrained()); michael@0: } michael@0: michael@0: static void michael@0: BeginSweepingZoneGroup(JSRuntime *rt) michael@0: { michael@0: /* michael@0: * Begin sweeping the group of zones in gcCurrentZoneGroup, michael@0: * performing actions that must be done before yielding to caller. michael@0: */ michael@0: michael@0: bool sweepingAtoms = false; michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: /* Set the GC state to sweeping. */ michael@0: JS_ASSERT(zone->isGCMarking()); michael@0: zone->setGCState(Zone::Sweep); michael@0: michael@0: /* Purge the ArenaLists before sweeping. */ michael@0: zone->allocator.arenas.purge(); michael@0: michael@0: if (rt->isAtomsZone(zone)) michael@0: sweepingAtoms = true; michael@0: michael@0: if (rt->sweepZoneCallback) michael@0: rt->sweepZoneCallback(zone); michael@0: } michael@0: michael@0: ValidateIncrementalMarking(rt); michael@0: michael@0: FreeOp fop(rt, rt->gcSweepOnBackgroundThread); michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START); michael@0: if (rt->gcFinalizeCallback) michael@0: rt->gcFinalizeCallback(&fop, JSFINALIZE_GROUP_START, !rt->gcIsFull /* unused */); michael@0: } michael@0: michael@0: if (sweepingAtoms) { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_ATOMS); michael@0: rt->sweepAtoms(); michael@0: } michael@0: michael@0: /* Prune out dead views from ArrayBuffer's view lists. */ michael@0: for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) michael@0: ArrayBufferObject::sweep(c); michael@0: michael@0: /* Collect watch points associated with unreachable objects. */ michael@0: WatchpointMap::sweepAll(rt); michael@0: michael@0: /* Detach unreachable debuggers and global objects from each other. */ michael@0: Debugger::sweepAll(&fop); michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_COMPARTMENTS); michael@0: michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_DISCARD_CODE); michael@0: zone->discardJitCode(&fop); michael@0: } michael@0: michael@0: bool releaseTypes = ReleaseObservedTypes(rt); michael@0: for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: c->sweep(&fop, releaseTypes && !c->zone()->isPreservingCode()); michael@0: } michael@0: michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: michael@0: // If there is an OOM while sweeping types, the type information michael@0: // will be deoptimized so that it is still correct (i.e. michael@0: // overapproximates the possible types in the zone), but the michael@0: // constraints might not have been triggered on the deoptimization michael@0: // or even copied over completely. In this case, destroy all JIT michael@0: // code and new script addendums in the zone, the only things whose michael@0: // correctness depends on the type constraints. michael@0: bool oom = false; michael@0: zone->sweep(&fop, releaseTypes && !zone->isPreservingCode(), &oom); michael@0: michael@0: if (oom) { michael@0: zone->setPreservingCode(false); michael@0: zone->discardJitCode(&fop); michael@0: zone->types.clearAllNewScriptAddendumsOnOOM(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Queue all GC things in all zones for sweeping, either in the michael@0: * foreground or on the background thread. michael@0: * michael@0: * Note that order is important here for the background case. michael@0: * michael@0: * Objects are finalized immediately but this may change in the future. michael@0: */ michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: zone->allocator.arenas.queueObjectsForSweep(&fop); michael@0: } michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: zone->allocator.arenas.queueStringsForSweep(&fop); michael@0: } michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: zone->allocator.arenas.queueScriptsForSweep(&fop); michael@0: } michael@0: #ifdef JS_ION michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: zone->allocator.arenas.queueJitCodeForSweep(&fop); michael@0: } michael@0: #endif michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex); michael@0: zone->allocator.arenas.queueShapesForSweep(&fop); michael@0: zone->allocator.arenas.gcShapeArenasToSweep = michael@0: zone->allocator.arenas.arenaListsToSweep[FINALIZE_SHAPE]; michael@0: } michael@0: michael@0: rt->gcSweepPhase = 0; michael@0: rt->gcSweepZone = rt->gcCurrentZoneGroup; michael@0: rt->gcSweepKindIndex = 0; michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END); michael@0: if (rt->gcFinalizeCallback) michael@0: rt->gcFinalizeCallback(&fop, JSFINALIZE_GROUP_END, !rt->gcIsFull /* unused */); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: EndSweepingZoneGroup(JSRuntime *rt) michael@0: { michael@0: /* Update the GC state for zones we have swept and unlink the list. */ michael@0: for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCSweeping()); michael@0: zone->setGCState(Zone::Finished); michael@0: } michael@0: michael@0: /* Reset the list of arenas marked as being allocated during sweep phase. */ michael@0: while (ArenaHeader *arena = rt->gcArenasAllocatedDuringSweep) { michael@0: rt->gcArenasAllocatedDuringSweep = arena->getNextAllocDuringSweep(); michael@0: arena->unsetAllocDuringSweep(); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: BeginSweepPhase(JSRuntime *rt, bool lastGC) michael@0: { michael@0: /* michael@0: * Sweep phase. michael@0: * michael@0: * Finalize as we sweep, outside of rt->gcLock but with rt->isHeapBusy() michael@0: * true so that any attempt to allocate a GC-thing from a finalizer will michael@0: * fail, rather than nest badly and leave the unmarked newborn to be swept. michael@0: */ michael@0: michael@0: JS_ASSERT(!rt->gcAbortSweepAfterCurrentGroup); michael@0: michael@0: ComputeNonIncrementalMarkingForValidation(rt); michael@0: michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP); michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: rt->gcSweepOnBackgroundThread = !lastGC && rt->useHelperThreads(); michael@0: #endif michael@0: michael@0: #ifdef DEBUG michael@0: for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { michael@0: JS_ASSERT(!c->gcIncomingGrayPointers); michael@0: for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) { michael@0: if (e.front().key().kind != CrossCompartmentKey::StringWrapper) michael@0: AssertNotOnGrayList(&e.front().value().get().toObject()); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: DropStringWrappers(rt); michael@0: FindZoneGroups(rt); michael@0: EndMarkingZoneGroup(rt); michael@0: BeginSweepingZoneGroup(rt); michael@0: } michael@0: michael@0: bool michael@0: ArenaLists::foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget) michael@0: { michael@0: if (!arenaListsToSweep[thingKind]) michael@0: return true; michael@0: michael@0: ArenaList &dest = arenaLists[thingKind]; michael@0: return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget); michael@0: } michael@0: michael@0: static bool michael@0: DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget, gcstats::Phase phase) michael@0: { michael@0: /* Run a marking slice and return whether the stack is now empty. */ michael@0: gcstats::AutoPhase ap(rt->gcStats, phase); michael@0: return rt->gcMarker.drainMarkStack(sliceBudget); michael@0: } michael@0: michael@0: static bool michael@0: SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget) michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP); michael@0: FreeOp fop(rt, rt->gcSweepOnBackgroundThread); michael@0: michael@0: bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_SWEEP_MARK); michael@0: if (!finished) michael@0: return false; michael@0: michael@0: for (;;) { michael@0: /* Finalize foreground finalized things. */ michael@0: for (; rt->gcSweepPhase < FinalizePhaseCount ; ++rt->gcSweepPhase) { michael@0: gcstats::AutoPhase ap(rt->gcStats, FinalizePhaseStatsPhase[rt->gcSweepPhase]); michael@0: michael@0: for (; rt->gcSweepZone; rt->gcSweepZone = rt->gcSweepZone->nextNodeInGroup()) { michael@0: Zone *zone = rt->gcSweepZone; michael@0: michael@0: while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) { michael@0: AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex]; michael@0: michael@0: if (!zone->allocator.arenas.foregroundFinalize(&fop, kind, sliceBudget)) michael@0: return false; /* Yield to the mutator. */ michael@0: michael@0: ++rt->gcSweepKindIndex; michael@0: } michael@0: rt->gcSweepKindIndex = 0; michael@0: } michael@0: rt->gcSweepZone = rt->gcCurrentZoneGroup; michael@0: } michael@0: michael@0: /* Remove dead shapes from the shape tree, but don't finalize them yet. */ michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE); michael@0: michael@0: for (; rt->gcSweepZone; rt->gcSweepZone = rt->gcSweepZone->nextNodeInGroup()) { michael@0: Zone *zone = rt->gcSweepZone; michael@0: while (ArenaHeader *arena = zone->allocator.arenas.gcShapeArenasToSweep) { michael@0: for (CellIterUnderGC i(arena); !i.done(); i.next()) { michael@0: Shape *shape = i.get(); michael@0: if (!shape->isMarked()) michael@0: shape->sweep(); michael@0: } michael@0: michael@0: zone->allocator.arenas.gcShapeArenasToSweep = arena->next; michael@0: sliceBudget.step(Arena::thingsPerArena(Arena::thingSize(FINALIZE_SHAPE))); michael@0: if (sliceBudget.isOverBudget()) michael@0: return false; /* Yield to the mutator. */ michael@0: } michael@0: } michael@0: } michael@0: michael@0: EndSweepingZoneGroup(rt); michael@0: GetNextZoneGroup(rt); michael@0: if (!rt->gcCurrentZoneGroup) michael@0: return true; /* We're finished. */ michael@0: EndMarkingZoneGroup(rt); michael@0: BeginSweepingZoneGroup(rt); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC) michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP); michael@0: FreeOp fop(rt, rt->gcSweepOnBackgroundThread); michael@0: michael@0: JS_ASSERT_IF(lastGC, !rt->gcSweepOnBackgroundThread); michael@0: michael@0: JS_ASSERT(rt->gcMarker.isDrained()); michael@0: rt->gcMarker.stop(); michael@0: michael@0: /* michael@0: * Recalculate whether GC was full or not as this may have changed due to michael@0: * newly created zones. Can only change from full to not full. michael@0: */ michael@0: if (rt->gcIsFull) { michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: if (!zone->isCollecting()) { michael@0: rt->gcIsFull = false; michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * If we found any black->gray edges during marking, we completely clear the michael@0: * mark bits of all uncollected zones, or if a reset has occured, zones that michael@0: * will no longer be collected. This is safe, although it may michael@0: * prevent the cycle collector from collecting some dead objects. michael@0: */ michael@0: if (rt->gcFoundBlackGrayEdges) { michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: if (!zone->isCollecting()) michael@0: zone->allocator.arenas.unmarkAll(); michael@0: } michael@0: } michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY); michael@0: michael@0: /* michael@0: * Sweep script filenames after sweeping functions in the generic loop michael@0: * above. In this way when a scripted function's finalizer destroys the michael@0: * script and calls rt->destroyScriptHook, the hook can still access the michael@0: * script's filename. See bug 323267. michael@0: */ michael@0: if (rt->gcIsFull) michael@0: SweepScriptData(rt); michael@0: michael@0: /* Clear out any small pools that we're hanging on to. */ michael@0: if (JSC::ExecutableAllocator *execAlloc = rt->maybeExecAlloc()) michael@0: execAlloc->purge(); michael@0: michael@0: /* michael@0: * This removes compartments from rt->compartment, so we do it last to make michael@0: * sure we don't miss sweeping any compartments. michael@0: */ michael@0: if (!lastGC) michael@0: SweepZones(&fop, lastGC); michael@0: michael@0: if (!rt->gcSweepOnBackgroundThread) { michael@0: /* michael@0: * Destroy arenas after we finished the sweeping so finalizers can michael@0: * safely use IsAboutToBeFinalized(). This is done on the michael@0: * GCHelperThread if possible. We acquire the lock only because michael@0: * Expire needs to unlock it for other callers. michael@0: */ michael@0: AutoLockGC lock(rt); michael@0: ExpireChunksAndArenas(rt, gckind == GC_SHRINK); michael@0: } michael@0: } michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END); michael@0: michael@0: if (rt->gcFinalizeCallback) michael@0: rt->gcFinalizeCallback(&fop, JSFINALIZE_COLLECTION_END, !rt->gcIsFull); michael@0: michael@0: /* If we finished a full GC, then the gray bits are correct. */ michael@0: if (rt->gcIsFull) michael@0: rt->gcGrayBitsValid = true; michael@0: } michael@0: michael@0: /* Set up list of zones for sweeping of background things. */ michael@0: JS_ASSERT(!rt->gcSweepingZones); michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: zone->gcNextGraphNode = rt->gcSweepingZones; michael@0: rt->gcSweepingZones = zone; michael@0: } michael@0: michael@0: /* If not sweeping on background thread then we must do it here. */ michael@0: if (!rt->gcSweepOnBackgroundThread) { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY); michael@0: michael@0: SweepBackgroundThings(rt, false); michael@0: michael@0: rt->freeLifoAlloc.freeAll(); michael@0: michael@0: /* Ensure the compartments get swept if it's the last GC. */ michael@0: if (lastGC) michael@0: SweepZones(&fop, lastGC); michael@0: } michael@0: michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: zone->setGCLastBytes(zone->gcBytes, gckind); michael@0: if (zone->isCollecting()) { michael@0: JS_ASSERT(zone->isGCFinished()); michael@0: zone->setGCState(Zone::NoGC); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: JS_ASSERT(!zone->isCollecting()); michael@0: JS_ASSERT(!zone->wasGCStarted()); michael@0: michael@0: for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) { michael@0: JS_ASSERT_IF(!IsBackgroundFinalized(AllocKind(i)) || michael@0: !rt->gcSweepOnBackgroundThread, michael@0: !zone->allocator.arenas.arenaListsToSweep[i]); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { michael@0: JS_ASSERT(!c->gcIncomingGrayPointers); michael@0: JS_ASSERT(c->gcLiveArrayBuffers.empty()); michael@0: michael@0: for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) { michael@0: if (e.front().key().kind != CrossCompartmentKey::StringWrapper) michael@0: AssertNotOnGrayList(&e.front().value().get().toObject()); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: FinishMarkingValidation(rt); michael@0: michael@0: rt->gcLastGCTime = PRMJ_Now(); michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: /* ...while this class is to be used only for garbage collection. */ michael@0: class AutoGCSession michael@0: { michael@0: JSRuntime *runtime; michael@0: AutoTraceSession session; michael@0: bool canceled; michael@0: michael@0: public: michael@0: explicit AutoGCSession(JSRuntime *rt); michael@0: ~AutoGCSession(); michael@0: michael@0: void cancel() { canceled = true; } michael@0: }; michael@0: michael@0: } /* anonymous namespace */ michael@0: michael@0: /* Start a new heap session. */ michael@0: AutoTraceSession::AutoTraceSession(JSRuntime *rt, js::HeapState heapState) michael@0: : lock(rt), michael@0: runtime(rt), michael@0: prevState(rt->heapState) michael@0: { michael@0: JS_ASSERT(!rt->noGCOrAllocationCheck); michael@0: JS_ASSERT(!rt->isHeapBusy()); michael@0: JS_ASSERT(heapState != Idle); michael@0: #ifdef JSGC_GENERATIONAL michael@0: JS_ASSERT_IF(heapState == MajorCollecting, rt->gcNursery.isEmpty()); michael@0: #endif michael@0: michael@0: // Threads with an exclusive context can hit refillFreeList while holding michael@0: // the exclusive access lock. To avoid deadlocking when we try to acquire michael@0: // this lock during GC and the other thread is waiting, make sure we hold michael@0: // the exclusive access lock during GC sessions. michael@0: JS_ASSERT(rt->currentThreadHasExclusiveAccess()); michael@0: michael@0: if (rt->exclusiveThreadsPresent()) { michael@0: // Lock the worker thread state when changing the heap state in the michael@0: // presence of exclusive threads, to avoid racing with refillFreeList. michael@0: #ifdef JS_THREADSAFE michael@0: AutoLockWorkerThreadState lock; michael@0: rt->heapState = heapState; michael@0: #else michael@0: MOZ_CRASH(); michael@0: #endif michael@0: } else { michael@0: rt->heapState = heapState; michael@0: } michael@0: } michael@0: michael@0: AutoTraceSession::~AutoTraceSession() michael@0: { michael@0: JS_ASSERT(runtime->isHeapBusy()); michael@0: michael@0: if (runtime->exclusiveThreadsPresent()) { michael@0: #ifdef JS_THREADSAFE michael@0: AutoLockWorkerThreadState lock; michael@0: runtime->heapState = prevState; michael@0: michael@0: // Notify any worker threads waiting for the trace session to end. michael@0: WorkerThreadState().notifyAll(GlobalWorkerThreadState::PRODUCER); michael@0: #else michael@0: MOZ_CRASH(); michael@0: #endif michael@0: } else { michael@0: runtime->heapState = prevState; michael@0: } michael@0: } michael@0: michael@0: AutoGCSession::AutoGCSession(JSRuntime *rt) michael@0: : runtime(rt), michael@0: session(rt, MajorCollecting), michael@0: canceled(false) michael@0: { michael@0: runtime->gcIsNeeded = false; michael@0: runtime->gcInterFrameGC = true; michael@0: michael@0: runtime->gcNumber++; michael@0: michael@0: // It's ok if threads other than the main thread have suppressGC set, as michael@0: // they are operating on zones which will not be collected from here. michael@0: JS_ASSERT(!runtime->mainThread.suppressGC); michael@0: } michael@0: michael@0: AutoGCSession::~AutoGCSession() michael@0: { michael@0: if (canceled) michael@0: return; michael@0: michael@0: #ifndef JS_MORE_DETERMINISTIC michael@0: runtime->gcNextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN; michael@0: #endif michael@0: michael@0: runtime->gcChunkAllocationSinceLastGC = false; michael@0: michael@0: #ifdef JS_GC_ZEAL michael@0: /* Keeping these around after a GC is dangerous. */ michael@0: runtime->gcSelectedForMarking.clearAndFree(); michael@0: #endif michael@0: michael@0: /* Clear gcMallocBytes for all compartments */ michael@0: for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) { michael@0: zone->resetGCMallocBytes(); michael@0: zone->unscheduleGC(); michael@0: } michael@0: michael@0: runtime->resetGCMallocBytes(); michael@0: } michael@0: michael@0: AutoCopyFreeListToArenas::AutoCopyFreeListToArenas(JSRuntime *rt, ZoneSelector selector) michael@0: : runtime(rt), michael@0: selector(selector) michael@0: { michael@0: for (ZonesIter zone(rt, selector); !zone.done(); zone.next()) michael@0: zone->allocator.arenas.copyFreeListsToArenas(); michael@0: } michael@0: michael@0: AutoCopyFreeListToArenas::~AutoCopyFreeListToArenas() michael@0: { michael@0: for (ZonesIter zone(runtime, selector); !zone.done(); zone.next()) michael@0: zone->allocator.arenas.clearFreeListsInArenas(); michael@0: } michael@0: michael@0: class AutoCopyFreeListToArenasForGC michael@0: { michael@0: JSRuntime *runtime; michael@0: michael@0: public: michael@0: AutoCopyFreeListToArenasForGC(JSRuntime *rt) : runtime(rt) { michael@0: JS_ASSERT(rt->currentThreadHasExclusiveAccess()); michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) michael@0: zone->allocator.arenas.copyFreeListsToArenas(); michael@0: } michael@0: ~AutoCopyFreeListToArenasForGC() { michael@0: for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) michael@0: zone->allocator.arenas.clearFreeListsInArenas(); michael@0: } michael@0: }; michael@0: michael@0: static void michael@0: IncrementalCollectSlice(JSRuntime *rt, michael@0: int64_t budget, michael@0: JS::gcreason::Reason gcReason, michael@0: JSGCInvocationKind gcKind); michael@0: michael@0: static void michael@0: ResetIncrementalGC(JSRuntime *rt, const char *reason) michael@0: { michael@0: switch (rt->gcIncrementalState) { michael@0: case NO_INCREMENTAL: michael@0: return; michael@0: michael@0: case MARK: { michael@0: /* Cancel any ongoing marking. */ michael@0: AutoCopyFreeListToArenasForGC copy(rt); michael@0: michael@0: rt->gcMarker.reset(); michael@0: rt->gcMarker.stop(); michael@0: michael@0: for (GCCompartmentsIter c(rt); !c.done(); c.next()) { michael@0: ArrayBufferObject::resetArrayBufferList(c); michael@0: ResetGrayList(c); michael@0: } michael@0: michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: JS_ASSERT(zone->isGCMarking()); michael@0: zone->setNeedsBarrier(false, Zone::UpdateIon); michael@0: zone->setGCState(Zone::NoGC); michael@0: } michael@0: rt->setNeedsBarrier(false); michael@0: AssertNeedsBarrierFlagsConsistent(rt); michael@0: michael@0: rt->gcIncrementalState = NO_INCREMENTAL; michael@0: michael@0: JS_ASSERT(!rt->gcStrictCompartmentChecking); michael@0: michael@0: break; michael@0: } michael@0: michael@0: case SWEEP: michael@0: rt->gcMarker.reset(); michael@0: michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) michael@0: zone->scheduledForDestruction = false; michael@0: michael@0: /* Finish sweeping the current zone group, then abort. */ michael@0: rt->gcAbortSweepAfterCurrentGroup = true; michael@0: IncrementalCollectSlice(rt, SliceBudget::Unlimited, JS::gcreason::RESET, GC_NORMAL); michael@0: michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD); michael@0: rt->gcHelperThread.waitBackgroundSweepOrAllocEnd(); michael@0: } michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Invalid incremental GC state"); michael@0: } michael@0: michael@0: rt->gcStats.reset(reason); michael@0: michael@0: #ifdef DEBUG michael@0: for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) michael@0: JS_ASSERT(c->gcLiveArrayBuffers.empty()); michael@0: michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: JS_ASSERT(!zone->needsBarrier()); michael@0: for (unsigned i = 0; i < FINALIZE_LIMIT; ++i) michael@0: JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: class AutoGCSlice { michael@0: public: michael@0: AutoGCSlice(JSRuntime *rt); michael@0: ~AutoGCSlice(); michael@0: michael@0: private: michael@0: JSRuntime *runtime; michael@0: }; michael@0: michael@0: } /* anonymous namespace */ michael@0: michael@0: AutoGCSlice::AutoGCSlice(JSRuntime *rt) michael@0: : runtime(rt) michael@0: { michael@0: /* michael@0: * During incremental GC, the compartment's active flag determines whether michael@0: * there are stack frames active for any of its scripts. Normally this flag michael@0: * is set at the beginning of the mark phase. During incremental GC, we also michael@0: * set it at the start of every phase. michael@0: */ michael@0: for (ActivationIterator iter(rt); !iter.done(); ++iter) michael@0: iter->compartment()->zone()->active = true; michael@0: michael@0: for (GCZonesIter zone(rt); !zone.done(); zone.next()) { michael@0: /* michael@0: * Clear needsBarrier early so we don't do any write barriers during michael@0: * GC. We don't need to update the Ion barriers (which is expensive) michael@0: * because Ion code doesn't run during GC. If need be, we'll update the michael@0: * Ion barriers in ~AutoGCSlice. michael@0: */ michael@0: if (zone->isGCMarking()) { michael@0: JS_ASSERT(zone->needsBarrier()); michael@0: zone->setNeedsBarrier(false, Zone::DontUpdateIon); michael@0: } else { michael@0: JS_ASSERT(!zone->needsBarrier()); michael@0: } michael@0: } michael@0: rt->setNeedsBarrier(false); michael@0: AssertNeedsBarrierFlagsConsistent(rt); michael@0: } michael@0: michael@0: AutoGCSlice::~AutoGCSlice() michael@0: { michael@0: /* We can't use GCZonesIter if this is the end of the last slice. */ michael@0: bool haveBarriers = false; michael@0: for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) { michael@0: if (zone->isGCMarking()) { michael@0: zone->setNeedsBarrier(true, Zone::UpdateIon); michael@0: zone->allocator.arenas.prepareForIncrementalGC(runtime); michael@0: haveBarriers = true; michael@0: } else { michael@0: zone->setNeedsBarrier(false, Zone::UpdateIon); michael@0: } michael@0: } michael@0: runtime->setNeedsBarrier(haveBarriers); michael@0: AssertNeedsBarrierFlagsConsistent(runtime); michael@0: } michael@0: michael@0: static void michael@0: PushZealSelectedObjects(JSRuntime *rt) michael@0: { michael@0: #ifdef JS_GC_ZEAL michael@0: /* Push selected objects onto the mark stack and clear the list. */ michael@0: for (JSObject **obj = rt->gcSelectedForMarking.begin(); michael@0: obj != rt->gcSelectedForMarking.end(); obj++) michael@0: { michael@0: MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj"); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: IncrementalCollectSlice(JSRuntime *rt, michael@0: int64_t budget, michael@0: JS::gcreason::Reason reason, michael@0: JSGCInvocationKind gckind) michael@0: { michael@0: JS_ASSERT(rt->currentThreadHasExclusiveAccess()); michael@0: michael@0: AutoCopyFreeListToArenasForGC copy(rt); michael@0: AutoGCSlice slice(rt); michael@0: michael@0: bool lastGC = (reason == JS::gcreason::DESTROY_RUNTIME); michael@0: michael@0: gc::State initialState = rt->gcIncrementalState; michael@0: michael@0: int zeal = 0; michael@0: #ifdef JS_GC_ZEAL michael@0: if (reason == JS::gcreason::DEBUG_GC && budget != SliceBudget::Unlimited) { michael@0: /* michael@0: * Do the incremental collection type specified by zeal mode if the michael@0: * collection was triggered by RunDebugGC() and incremental GC has not michael@0: * been cancelled by ResetIncrementalGC. michael@0: */ michael@0: zeal = rt->gcZeal(); michael@0: } michael@0: #endif michael@0: michael@0: JS_ASSERT_IF(rt->gcIncrementalState != NO_INCREMENTAL, rt->gcIsIncremental); michael@0: rt->gcIsIncremental = budget != SliceBudget::Unlimited; michael@0: michael@0: if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) { michael@0: /* michael@0: * Yields between slices occurs at predetermined points in these modes; michael@0: * the budget is not used. michael@0: */ michael@0: budget = SliceBudget::Unlimited; michael@0: } michael@0: michael@0: SliceBudget sliceBudget(budget); michael@0: michael@0: if (rt->gcIncrementalState == NO_INCREMENTAL) { michael@0: rt->gcIncrementalState = MARK_ROOTS; michael@0: rt->gcLastMarkSlice = false; michael@0: } michael@0: michael@0: if (rt->gcIncrementalState == MARK) michael@0: AutoGCRooter::traceAllWrappers(&rt->gcMarker); michael@0: michael@0: switch (rt->gcIncrementalState) { michael@0: michael@0: case MARK_ROOTS: michael@0: if (!BeginMarkPhase(rt)) { michael@0: rt->gcIncrementalState = NO_INCREMENTAL; michael@0: return; michael@0: } michael@0: michael@0: if (!lastGC) michael@0: PushZealSelectedObjects(rt); michael@0: michael@0: rt->gcIncrementalState = MARK; michael@0: michael@0: if (rt->gcIsIncremental && zeal == ZealIncrementalRootsThenFinish) michael@0: break; michael@0: michael@0: /* fall through */ michael@0: michael@0: case MARK: { michael@0: /* If we needed delayed marking for gray roots, then collect until done. */ michael@0: if (!rt->gcMarker.hasBufferedGrayRoots()) { michael@0: sliceBudget.reset(); michael@0: rt->gcIsIncremental = false; michael@0: } michael@0: michael@0: bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_MARK); michael@0: if (!finished) michael@0: break; michael@0: michael@0: JS_ASSERT(rt->gcMarker.isDrained()); michael@0: michael@0: if (!rt->gcLastMarkSlice && rt->gcIsIncremental && michael@0: ((initialState == MARK && zeal != ZealIncrementalRootsThenFinish) || michael@0: zeal == ZealIncrementalMarkAllThenFinish)) michael@0: { michael@0: /* michael@0: * Yield with the aim of starting the sweep in the next michael@0: * slice. We will need to mark anything new on the stack michael@0: * when we resume, so we stay in MARK state. michael@0: */ michael@0: rt->gcLastMarkSlice = true; michael@0: break; michael@0: } michael@0: michael@0: rt->gcIncrementalState = SWEEP; michael@0: michael@0: /* michael@0: * This runs to completion, but we don't continue if the budget is michael@0: * now exhasted. michael@0: */ michael@0: BeginSweepPhase(rt, lastGC); michael@0: if (sliceBudget.isOverBudget()) michael@0: break; michael@0: michael@0: /* michael@0: * Always yield here when running in incremental multi-slice zeal michael@0: * mode, so RunDebugGC can reset the slice buget. michael@0: */ michael@0: if (rt->gcIsIncremental && zeal == ZealIncrementalMultipleSlices) michael@0: break; michael@0: michael@0: /* fall through */ michael@0: } michael@0: michael@0: case SWEEP: { michael@0: bool finished = SweepPhase(rt, sliceBudget); michael@0: if (!finished) michael@0: break; michael@0: michael@0: EndSweepPhase(rt, gckind, lastGC); michael@0: michael@0: if (rt->gcSweepOnBackgroundThread) michael@0: rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK); michael@0: michael@0: rt->gcIncrementalState = NO_INCREMENTAL; michael@0: break; michael@0: } michael@0: michael@0: default: michael@0: JS_ASSERT(false); michael@0: } michael@0: } michael@0: michael@0: IncrementalSafety michael@0: gc::IsIncrementalGCSafe(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(!rt->mainThread.suppressGC); michael@0: michael@0: if (rt->keepAtoms()) michael@0: return IncrementalSafety::Unsafe("keepAtoms set"); michael@0: michael@0: if (!rt->gcIncrementalEnabled) michael@0: return IncrementalSafety::Unsafe("incremental permanently disabled"); michael@0: michael@0: return IncrementalSafety::Safe(); michael@0: } michael@0: michael@0: static void michael@0: BudgetIncrementalGC(JSRuntime *rt, int64_t *budget) michael@0: { michael@0: IncrementalSafety safe = IsIncrementalGCSafe(rt); michael@0: if (!safe) { michael@0: ResetIncrementalGC(rt, safe.reason()); michael@0: *budget = SliceBudget::Unlimited; michael@0: rt->gcStats.nonincremental(safe.reason()); michael@0: return; michael@0: } michael@0: michael@0: if (rt->gcMode() != JSGC_MODE_INCREMENTAL) { michael@0: ResetIncrementalGC(rt, "GC mode change"); michael@0: *budget = SliceBudget::Unlimited; michael@0: rt->gcStats.nonincremental("GC mode"); michael@0: return; michael@0: } michael@0: michael@0: if (rt->isTooMuchMalloc()) { michael@0: *budget = SliceBudget::Unlimited; michael@0: rt->gcStats.nonincremental("malloc bytes trigger"); michael@0: } michael@0: michael@0: bool reset = false; michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: if (zone->gcBytes >= zone->gcTriggerBytes) { michael@0: *budget = SliceBudget::Unlimited; michael@0: rt->gcStats.nonincremental("allocation trigger"); michael@0: } michael@0: michael@0: if (rt->gcIncrementalState != NO_INCREMENTAL && michael@0: zone->isGCScheduled() != zone->wasGCStarted()) michael@0: { michael@0: reset = true; michael@0: } michael@0: michael@0: if (zone->isTooMuchMalloc()) { michael@0: *budget = SliceBudget::Unlimited; michael@0: rt->gcStats.nonincremental("malloc bytes trigger"); michael@0: } michael@0: } michael@0: michael@0: if (reset) michael@0: ResetIncrementalGC(rt, "zone change"); michael@0: } michael@0: michael@0: /* michael@0: * Run one GC "cycle" (either a slice of incremental GC or an entire michael@0: * non-incremental GC. We disable inlining to ensure that the bottom of the michael@0: * stack with possible GC roots recorded in MarkRuntime excludes any pointers we michael@0: * use during the marking implementation. michael@0: * michael@0: * Returns true if we "reset" an existing incremental GC, which would force us michael@0: * to run another cycle. michael@0: */ michael@0: static MOZ_NEVER_INLINE bool michael@0: GCCycle(JSRuntime *rt, bool incremental, int64_t budget, michael@0: JSGCInvocationKind gckind, JS::gcreason::Reason reason) michael@0: { michael@0: AutoGCSession gcsession(rt); michael@0: michael@0: /* michael@0: * As we about to purge caches and clear the mark bits we must wait for michael@0: * any background finalization to finish. We must also wait for the michael@0: * background allocation to finish so we can avoid taking the GC lock michael@0: * when manipulating the chunks during the GC. michael@0: */ michael@0: { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD); michael@0: rt->gcHelperThread.waitBackgroundSweepOrAllocEnd(); michael@0: } michael@0: michael@0: State prevState = rt->gcIncrementalState; michael@0: michael@0: if (!incremental) { michael@0: /* If non-incremental GC was requested, reset incremental GC. */ michael@0: ResetIncrementalGC(rt, "requested"); michael@0: rt->gcStats.nonincremental("requested"); michael@0: budget = SliceBudget::Unlimited; michael@0: } else { michael@0: BudgetIncrementalGC(rt, &budget); michael@0: } michael@0: michael@0: /* The GC was reset, so we need a do-over. */ michael@0: if (prevState != NO_INCREMENTAL && rt->gcIncrementalState == NO_INCREMENTAL) { michael@0: gcsession.cancel(); michael@0: return true; michael@0: } michael@0: michael@0: IncrementalCollectSlice(rt, budget, reason, gckind); michael@0: return false; michael@0: } michael@0: michael@0: #ifdef JS_GC_ZEAL michael@0: static bool michael@0: IsDeterministicGCReason(JS::gcreason::Reason reason) michael@0: { michael@0: if (reason > JS::gcreason::DEBUG_GC && michael@0: reason != JS::gcreason::CC_FORCED && reason != JS::gcreason::SHUTDOWN_CC) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: if (reason == JS::gcreason::MAYBEGC) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: #endif michael@0: michael@0: static bool michael@0: ShouldCleanUpEverything(JSRuntime *rt, JS::gcreason::Reason reason, JSGCInvocationKind gckind) michael@0: { michael@0: // During shutdown, we must clean everything up, for the sake of leak michael@0: // detection. When a runtime has no contexts, or we're doing a GC before a michael@0: // shutdown CC, those are strong indications that we're shutting down. michael@0: return reason == JS::gcreason::DESTROY_RUNTIME || michael@0: reason == JS::gcreason::SHUTDOWN_CC || michael@0: gckind == GC_SHRINK; michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: #ifdef JSGC_GENERATIONAL michael@0: class AutoDisableStoreBuffer michael@0: { michael@0: JSRuntime *runtime; michael@0: bool prior; michael@0: michael@0: public: michael@0: AutoDisableStoreBuffer(JSRuntime *rt) : runtime(rt) { michael@0: prior = rt->gcStoreBuffer.isEnabled(); michael@0: rt->gcStoreBuffer.disable(); michael@0: } michael@0: ~AutoDisableStoreBuffer() { michael@0: if (prior) michael@0: runtime->gcStoreBuffer.enable(); michael@0: } michael@0: }; michael@0: #else michael@0: struct AutoDisableStoreBuffer michael@0: { michael@0: AutoDisableStoreBuffer(JSRuntime *) {} michael@0: }; michael@0: #endif michael@0: michael@0: } /* anonymous namespace */ michael@0: michael@0: static void michael@0: Collect(JSRuntime *rt, bool incremental, int64_t budget, michael@0: JSGCInvocationKind gckind, JS::gcreason::Reason reason) michael@0: { michael@0: /* GC shouldn't be running in parallel execution mode */ michael@0: JS_ASSERT(!InParallelSection()); michael@0: michael@0: JS_AbortIfWrongThread(rt); michael@0: michael@0: /* If we attempt to invoke the GC while we are running in the GC, assert. */ michael@0: JS_ASSERT(!rt->isHeapBusy()); michael@0: michael@0: if (rt->mainThread.suppressGC) michael@0: return; michael@0: michael@0: TraceLogger *logger = TraceLoggerForMainThread(rt); michael@0: AutoTraceLog logGC(logger, TraceLogger::GC); michael@0: michael@0: #ifdef JS_GC_ZEAL michael@0: if (rt->gcDeterministicOnly && !IsDeterministicGCReason(reason)) michael@0: return; michael@0: #endif michael@0: michael@0: JS_ASSERT_IF(!incremental || budget != SliceBudget::Unlimited, JSGC_INCREMENTAL); michael@0: michael@0: AutoStopVerifyingBarriers av(rt, reason == JS::gcreason::SHUTDOWN_CC || michael@0: reason == JS::gcreason::DESTROY_RUNTIME); michael@0: michael@0: RecordNativeStackTopForGC(rt); michael@0: michael@0: int zoneCount = 0; michael@0: int compartmentCount = 0; michael@0: int collectedCount = 0; michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: if (rt->gcMode() == JSGC_MODE_GLOBAL) michael@0: zone->scheduleGC(); michael@0: michael@0: /* This is a heuristic to avoid resets. */ michael@0: if (rt->gcIncrementalState != NO_INCREMENTAL && zone->needsBarrier()) michael@0: zone->scheduleGC(); michael@0: michael@0: zoneCount++; michael@0: if (zone->isGCScheduled()) michael@0: collectedCount++; michael@0: } michael@0: michael@0: for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) michael@0: compartmentCount++; michael@0: michael@0: rt->gcShouldCleanUpEverything = ShouldCleanUpEverything(rt, reason, gckind); michael@0: michael@0: bool repeat = false; michael@0: do { michael@0: MinorGC(rt, reason); michael@0: michael@0: /* michael@0: * Marking can trigger many incidental post barriers, some of them for michael@0: * objects which are not going to be live after the GC. michael@0: */ michael@0: AutoDisableStoreBuffer adsb(rt); michael@0: michael@0: gcstats::AutoGCSlice agc(rt->gcStats, collectedCount, zoneCount, compartmentCount, reason); michael@0: michael@0: /* michael@0: * Let the API user decide to defer a GC if it wants to (unless this michael@0: * is the last context). Invoke the callback regardless. michael@0: */ michael@0: if (rt->gcIncrementalState == NO_INCREMENTAL) { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_BEGIN); michael@0: if (JSGCCallback callback = rt->gcCallback) michael@0: callback(rt, JSGC_BEGIN, rt->gcCallbackData); michael@0: } michael@0: michael@0: rt->gcPoke = false; michael@0: bool wasReset = GCCycle(rt, incremental, budget, gckind, reason); michael@0: michael@0: if (rt->gcIncrementalState == NO_INCREMENTAL) { michael@0: gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_END); michael@0: if (JSGCCallback callback = rt->gcCallback) michael@0: callback(rt, JSGC_END, rt->gcCallbackData); michael@0: } michael@0: michael@0: /* Need to re-schedule all zones for GC. */ michael@0: if (rt->gcPoke && rt->gcShouldCleanUpEverything) michael@0: JS::PrepareForFullGC(rt); michael@0: michael@0: /* michael@0: * If we reset an existing GC, we need to start a new one. Also, we michael@0: * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything michael@0: * case) until we can be sure that no additional garbage is created michael@0: * (which typically happens if roots are dropped during finalizers). michael@0: */ michael@0: repeat = (rt->gcPoke && rt->gcShouldCleanUpEverything) || wasReset; michael@0: } while (repeat); michael@0: michael@0: if (rt->gcIncrementalState == NO_INCREMENTAL) { michael@0: #ifdef JS_THREADSAFE michael@0: EnqueuePendingParseTasksAfterGC(rt); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: void michael@0: js::GC(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason) michael@0: { michael@0: Collect(rt, false, SliceBudget::Unlimited, gckind, reason); michael@0: } michael@0: michael@0: void michael@0: js::GCSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis) michael@0: { michael@0: int64_t sliceBudget; michael@0: if (millis) michael@0: sliceBudget = SliceBudget::TimeBudget(millis); michael@0: else if (rt->gcHighFrequencyGC && rt->gcDynamicMarkSlice) michael@0: sliceBudget = rt->gcSliceBudget * IGC_MARK_SLICE_MULTIPLIER; michael@0: else michael@0: sliceBudget = rt->gcSliceBudget; michael@0: michael@0: Collect(rt, true, sliceBudget, gckind, reason); michael@0: } michael@0: michael@0: void michael@0: js::GCFinalSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason) michael@0: { michael@0: Collect(rt, true, SliceBudget::Unlimited, gckind, reason); michael@0: } michael@0: michael@0: static bool michael@0: ZonesSelected(JSRuntime *rt) michael@0: { michael@0: for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) { michael@0: if (zone->isGCScheduled()) michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: void michael@0: js::GCDebugSlice(JSRuntime *rt, bool limit, int64_t objCount) michael@0: { michael@0: int64_t budget = limit ? SliceBudget::WorkBudget(objCount) : SliceBudget::Unlimited; michael@0: if (!ZonesSelected(rt)) { michael@0: if (JS::IsIncrementalGCInProgress(rt)) michael@0: JS::PrepareForIncrementalGC(rt); michael@0: else michael@0: JS::PrepareForFullGC(rt); michael@0: } michael@0: Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC); michael@0: } michael@0: michael@0: /* Schedule a full GC unless a zone will already be collected. */ michael@0: void michael@0: js::PrepareForDebugGC(JSRuntime *rt) michael@0: { michael@0: if (!ZonesSelected(rt)) michael@0: JS::PrepareForFullGC(rt); michael@0: } michael@0: michael@0: JS_FRIEND_API(void) michael@0: JS::ShrinkGCBuffers(JSRuntime *rt) michael@0: { michael@0: AutoLockGC lock(rt); michael@0: JS_ASSERT(!rt->isHeapBusy()); michael@0: michael@0: if (!rt->useHelperThreads()) michael@0: ExpireChunksAndArenas(rt, true); michael@0: else michael@0: rt->gcHelperThread.startBackgroundShrink(); michael@0: } michael@0: michael@0: void michael@0: js::MinorGC(JSRuntime *rt, JS::gcreason::Reason reason) michael@0: { michael@0: #ifdef JSGC_GENERATIONAL michael@0: TraceLogger *logger = TraceLoggerForMainThread(rt); michael@0: AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC); michael@0: rt->gcNursery.collect(rt, reason, nullptr); michael@0: JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gcNursery.isEmpty()); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: js::MinorGC(JSContext *cx, JS::gcreason::Reason reason) michael@0: { michael@0: // Alternate to the runtime-taking form above which allows marking type michael@0: // objects as needing pretenuring. michael@0: #ifdef JSGC_GENERATIONAL michael@0: TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); michael@0: AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC); michael@0: michael@0: Nursery::TypeObjectList pretenureTypes; michael@0: JSRuntime *rt = cx->runtime(); michael@0: rt->gcNursery.collect(cx->runtime(), reason, &pretenureTypes); michael@0: for (size_t i = 0; i < pretenureTypes.length(); i++) { michael@0: if (pretenureTypes[i]->canPreTenure()) michael@0: pretenureTypes[i]->setShouldPreTenure(cx); michael@0: } michael@0: JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gcNursery.isEmpty()); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: js::gc::GCIfNeeded(JSContext *cx) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: michael@0: #ifdef JSGC_GENERATIONAL michael@0: /* michael@0: * In case of store buffer overflow perform minor GC first so that the michael@0: * correct reason is seen in the logs. michael@0: */ michael@0: if (rt->gcStoreBuffer.isAboutToOverflow()) michael@0: MinorGC(cx, JS::gcreason::FULL_STORE_BUFFER); michael@0: #endif michael@0: michael@0: if (rt->gcIsNeeded) michael@0: GCSlice(rt, GC_NORMAL, rt->gcTriggerReason); michael@0: } michael@0: michael@0: void michael@0: js::gc::FinishBackgroundFinalize(JSRuntime *rt) michael@0: { michael@0: rt->gcHelperThread.waitBackgroundSweepEnd(); michael@0: } michael@0: michael@0: AutoFinishGC::AutoFinishGC(JSRuntime *rt) michael@0: { michael@0: if (JS::IsIncrementalGCInProgress(rt)) { michael@0: JS::PrepareForIncrementalGC(rt); michael@0: JS::FinishIncrementalGC(rt, JS::gcreason::API); michael@0: } michael@0: michael@0: gc::FinishBackgroundFinalize(rt); michael@0: } michael@0: michael@0: AutoPrepareForTracing::AutoPrepareForTracing(JSRuntime *rt, ZoneSelector selector) michael@0: : finish(rt), michael@0: session(rt), michael@0: copy(rt, selector) michael@0: { michael@0: RecordNativeStackTopForGC(rt); michael@0: } michael@0: michael@0: JSCompartment * michael@0: js::NewCompartment(JSContext *cx, Zone *zone, JSPrincipals *principals, michael@0: const JS::CompartmentOptions &options) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: JS_AbortIfWrongThread(rt); michael@0: michael@0: ScopedJSDeletePtr zoneHolder; michael@0: if (!zone) { michael@0: zone = cx->new_(rt); michael@0: if (!zone) michael@0: return nullptr; michael@0: michael@0: zoneHolder.reset(zone); michael@0: michael@0: zone->setGCLastBytes(8192, GC_NORMAL); michael@0: michael@0: const JSPrincipals *trusted = rt->trustedPrincipals(); michael@0: zone->isSystem = principals && principals == trusted; michael@0: } michael@0: michael@0: ScopedJSDeletePtr compartment(cx->new_(zone, options)); michael@0: if (!compartment || !compartment->init(cx)) michael@0: return nullptr; michael@0: michael@0: // Set up the principals. michael@0: JS_SetCompartmentPrincipals(compartment, principals); michael@0: michael@0: AutoLockGC lock(rt); michael@0: michael@0: if (!zone->compartments.append(compartment.get())) { michael@0: js_ReportOutOfMemory(cx); michael@0: return nullptr; michael@0: } michael@0: michael@0: if (zoneHolder && !rt->zones.append(zone)) { michael@0: js_ReportOutOfMemory(cx); michael@0: return nullptr; michael@0: } michael@0: michael@0: zoneHolder.forget(); michael@0: return compartment.forget(); michael@0: } michael@0: michael@0: void michael@0: gc::MergeCompartments(JSCompartment *source, JSCompartment *target) michael@0: { michael@0: // The source compartment must be specifically flagged as mergable. This michael@0: // also implies that the compartment is not visible to the debugger. michael@0: JS_ASSERT(source->options_.mergeable()); michael@0: michael@0: JSRuntime *rt = source->runtimeFromMainThread(); michael@0: michael@0: AutoPrepareForTracing prepare(rt, SkipAtoms); michael@0: michael@0: // Cleanup tables and other state in the source compartment that will be michael@0: // meaningless after merging into the target compartment. michael@0: michael@0: source->clearTables(); michael@0: michael@0: // Fixup compartment pointers in source to refer to target. michael@0: michael@0: for (CellIter iter(source->zone(), FINALIZE_SCRIPT); !iter.done(); iter.next()) { michael@0: JSScript *script = iter.get(); michael@0: JS_ASSERT(script->compartment() == source); michael@0: script->compartment_ = target; michael@0: } michael@0: michael@0: for (CellIter iter(source->zone(), FINALIZE_BASE_SHAPE); !iter.done(); iter.next()) { michael@0: BaseShape *base = iter.get(); michael@0: JS_ASSERT(base->compartment() == source); michael@0: base->compartment_ = target; michael@0: } michael@0: michael@0: // Fixup zone pointers in source's zone to refer to target's zone. michael@0: michael@0: for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) { michael@0: for (ArenaIter aiter(source->zone(), AllocKind(thingKind)); !aiter.done(); aiter.next()) { michael@0: ArenaHeader *aheader = aiter.get(); michael@0: aheader->zone = target->zone(); michael@0: } michael@0: } michael@0: michael@0: // The source should be the only compartment in its zone. michael@0: for (CompartmentsInZoneIter c(source->zone()); !c.done(); c.next()) michael@0: JS_ASSERT(c.get() == source); michael@0: michael@0: // Merge the allocator in source's zone into target's zone. michael@0: target->zone()->allocator.arenas.adoptArenas(rt, &source->zone()->allocator.arenas); michael@0: target->zone()->gcBytes += source->zone()->gcBytes; michael@0: source->zone()->gcBytes = 0; michael@0: michael@0: // Merge other info in source's zone into target's zone. michael@0: target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc); michael@0: } michael@0: michael@0: void michael@0: gc::RunDebugGC(JSContext *cx) michael@0: { michael@0: #ifdef JS_GC_ZEAL michael@0: JSRuntime *rt = cx->runtime(); michael@0: int type = rt->gcZeal(); michael@0: michael@0: if (rt->mainThread.suppressGC) michael@0: return; michael@0: michael@0: if (type == js::gc::ZealGenerationalGCValue) michael@0: return MinorGC(rt, JS::gcreason::DEBUG_GC); michael@0: michael@0: PrepareForDebugGC(cx->runtime()); michael@0: michael@0: if (type == ZealIncrementalRootsThenFinish || michael@0: type == ZealIncrementalMarkAllThenFinish || michael@0: type == ZealIncrementalMultipleSlices) michael@0: { michael@0: js::gc::State initialState = rt->gcIncrementalState; michael@0: int64_t budget; michael@0: if (type == ZealIncrementalMultipleSlices) { michael@0: /* michael@0: * Start with a small slice limit and double it every slice. This michael@0: * ensure that we get multiple slices, and collection runs to michael@0: * completion. michael@0: */ michael@0: if (initialState == NO_INCREMENTAL) michael@0: rt->gcIncrementalLimit = rt->gcZealFrequency / 2; michael@0: else michael@0: rt->gcIncrementalLimit *= 2; michael@0: budget = SliceBudget::WorkBudget(rt->gcIncrementalLimit); michael@0: } else { michael@0: // This triggers incremental GC but is actually ignored by IncrementalMarkSlice. michael@0: budget = SliceBudget::WorkBudget(1); michael@0: } michael@0: michael@0: Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC); michael@0: michael@0: /* michael@0: * For multi-slice zeal, reset the slice size when we get to the sweep michael@0: * phase. michael@0: */ michael@0: if (type == ZealIncrementalMultipleSlices && michael@0: initialState == MARK && rt->gcIncrementalState == SWEEP) michael@0: { michael@0: rt->gcIncrementalLimit = rt->gcZealFrequency / 2; michael@0: } michael@0: } else { michael@0: Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, JS::gcreason::DEBUG_GC); michael@0: } michael@0: michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: gc::SetDeterministicGC(JSContext *cx, bool enabled) michael@0: { michael@0: #ifdef JS_GC_ZEAL michael@0: JSRuntime *rt = cx->runtime(); michael@0: rt->gcDeterministicOnly = enabled; michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: gc::SetValidateGC(JSContext *cx, bool enabled) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: rt->gcValidate = enabled; michael@0: } michael@0: michael@0: void michael@0: gc::SetFullCompartmentChecks(JSContext *cx, bool enabled) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: rt->gcFullCompartmentChecks = enabled; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: michael@0: /* Should only be called manually under gdb */ michael@0: void PreventGCDuringInteractiveDebug() michael@0: { michael@0: TlsPerThreadData.get()->suppressGC++; michael@0: } michael@0: michael@0: #endif michael@0: michael@0: void michael@0: js::ReleaseAllJITCode(FreeOp *fop) michael@0: { michael@0: #ifdef JS_ION michael@0: michael@0: # ifdef JSGC_GENERATIONAL michael@0: /* michael@0: * Scripts can entrain nursery things, inserting references to the script michael@0: * into the store buffer. Clear the store buffer before discarding scripts. michael@0: */ michael@0: MinorGC(fop->runtime(), JS::gcreason::EVICT_NURSERY); michael@0: # endif michael@0: michael@0: for (ZonesIter zone(fop->runtime(), SkipAtoms); !zone.done(); zone.next()) { michael@0: if (!zone->jitZone()) michael@0: continue; michael@0: michael@0: # ifdef DEBUG michael@0: /* Assert no baseline scripts are marked as active. */ michael@0: for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) { michael@0: JSScript *script = i.get(); michael@0: JS_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active()); michael@0: } michael@0: # endif michael@0: michael@0: /* Mark baseline scripts on the stack as active. */ michael@0: jit::MarkActiveBaselineScripts(zone); michael@0: michael@0: jit::InvalidateAll(fop, zone); michael@0: michael@0: for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) { michael@0: JSScript *script = i.get(); michael@0: jit::FinishInvalidation(fop, script); michael@0: jit::FinishInvalidation(fop, script); michael@0: michael@0: /* michael@0: * Discard baseline script if it's not marked as active. Note that michael@0: * this also resets the active flag. michael@0: */ michael@0: jit::FinishDiscardBaselineScript(fop, script); michael@0: } michael@0: michael@0: zone->jitZone()->optimizedStubSpace()->free(); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: /* michael@0: * There are three possible PCCount profiling states: michael@0: * michael@0: * 1. None: Neither scripts nor the runtime have count information. michael@0: * 2. Profile: Active scripts have count information, the runtime does not. michael@0: * 3. Query: Scripts do not have count information, the runtime does. michael@0: * michael@0: * When starting to profile scripts, counting begins immediately, with all JIT michael@0: * code discarded and recompiled with counts as necessary. Active interpreter michael@0: * frames will not begin profiling until they begin executing another script michael@0: * (via a call or return). michael@0: * michael@0: * The below API functions manage transitions to new states, according michael@0: * to the table below. michael@0: * michael@0: * Old State michael@0: * ------------------------- michael@0: * Function None Profile Query michael@0: * -------- michael@0: * StartPCCountProfiling Profile Profile Profile michael@0: * StopPCCountProfiling None Query Query michael@0: * PurgePCCounts None None None michael@0: */ michael@0: michael@0: static void michael@0: ReleaseScriptCounts(FreeOp *fop) michael@0: { michael@0: JSRuntime *rt = fop->runtime(); michael@0: JS_ASSERT(rt->scriptAndCountsVector); michael@0: michael@0: ScriptAndCountsVector &vec = *rt->scriptAndCountsVector; michael@0: michael@0: for (size_t i = 0; i < vec.length(); i++) michael@0: vec[i].scriptCounts.destroy(fop); michael@0: michael@0: fop->delete_(rt->scriptAndCountsVector); michael@0: rt->scriptAndCountsVector = nullptr; michael@0: } michael@0: michael@0: JS_FRIEND_API(void) michael@0: js::StartPCCountProfiling(JSContext *cx) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: michael@0: if (rt->profilingScripts) michael@0: return; michael@0: michael@0: if (rt->scriptAndCountsVector) michael@0: ReleaseScriptCounts(rt->defaultFreeOp()); michael@0: michael@0: ReleaseAllJITCode(rt->defaultFreeOp()); michael@0: michael@0: rt->profilingScripts = true; michael@0: } michael@0: michael@0: JS_FRIEND_API(void) michael@0: js::StopPCCountProfiling(JSContext *cx) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: michael@0: if (!rt->profilingScripts) michael@0: return; michael@0: JS_ASSERT(!rt->scriptAndCountsVector); michael@0: michael@0: ReleaseAllJITCode(rt->defaultFreeOp()); michael@0: michael@0: ScriptAndCountsVector *vec = cx->new_(SystemAllocPolicy()); michael@0: if (!vec) michael@0: return; michael@0: michael@0: for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) { michael@0: for (CellIter i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) { michael@0: JSScript *script = i.get(); michael@0: if (script->hasScriptCounts() && script->types) { michael@0: ScriptAndCounts sac; michael@0: sac.script = script; michael@0: sac.scriptCounts.set(script->releaseScriptCounts()); michael@0: if (!vec->append(sac)) michael@0: sac.scriptCounts.destroy(rt->defaultFreeOp()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: rt->profilingScripts = false; michael@0: rt->scriptAndCountsVector = vec; michael@0: } michael@0: michael@0: JS_FRIEND_API(void) michael@0: js::PurgePCCounts(JSContext *cx) michael@0: { michael@0: JSRuntime *rt = cx->runtime(); michael@0: michael@0: if (!rt->scriptAndCountsVector) michael@0: return; michael@0: JS_ASSERT(!rt->profilingScripts); michael@0: michael@0: ReleaseScriptCounts(rt->defaultFreeOp()); michael@0: } michael@0: michael@0: void michael@0: js::PurgeJITCaches(Zone *zone) michael@0: { michael@0: #ifdef JS_ION michael@0: for (CellIterUnderGC i(zone, FINALIZE_SCRIPT); !i.done(); i.next()) { michael@0: JSScript *script = i.get(); michael@0: michael@0: /* Discard Ion caches. */ michael@0: jit::PurgeCaches(script); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: ArenaLists::normalizeBackgroundFinalizeState(AllocKind thingKind) michael@0: { michael@0: volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind]; michael@0: switch (*bfs) { michael@0: case BFS_DONE: michael@0: break; michael@0: case BFS_JUST_FINISHED: michael@0: // No allocations between end of last sweep and now. michael@0: // Transfering over arenas is a kind of allocation. michael@0: *bfs = BFS_DONE; michael@0: break; michael@0: default: michael@0: JS_ASSERT(!"Background finalization in progress, but it should not be."); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: void michael@0: ArenaLists::adoptArenas(JSRuntime *rt, ArenaLists *fromArenaLists) michael@0: { michael@0: // The other parallel threads have all completed now, and GC michael@0: // should be inactive, but still take the lock as a kind of read michael@0: // fence. michael@0: AutoLockGC lock(rt); michael@0: michael@0: fromArenaLists->purge(); michael@0: michael@0: for (size_t thingKind = 0; thingKind != FINALIZE_LIMIT; thingKind++) { michael@0: #ifdef JS_THREADSAFE michael@0: // When we enter a parallel section, we join the background michael@0: // thread, and we do not run GC while in the parallel section, michael@0: // so no finalizer should be active! michael@0: normalizeBackgroundFinalizeState(AllocKind(thingKind)); michael@0: fromArenaLists->normalizeBackgroundFinalizeState(AllocKind(thingKind)); michael@0: #endif michael@0: ArenaList *fromList = &fromArenaLists->arenaLists[thingKind]; michael@0: ArenaList *toList = &arenaLists[thingKind]; michael@0: while (fromList->head != nullptr) { michael@0: // Remove entry from |fromList| michael@0: ArenaHeader *fromHeader = fromList->head; michael@0: fromList->head = fromHeader->next; michael@0: fromHeader->next = nullptr; michael@0: michael@0: // During parallel execution, we sometimes keep empty arenas michael@0: // on the lists rather than sending them back to the chunk. michael@0: // Therefore, if fromHeader is empty, send it back to the michael@0: // chunk now. Otherwise, attach to |toList|. michael@0: if (fromHeader->isEmpty()) michael@0: fromHeader->chunk()->releaseArena(fromHeader); michael@0: else michael@0: toList->insert(fromHeader); michael@0: } michael@0: fromList->cursor = &fromList->head; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: ArenaLists::containsArena(JSRuntime *rt, ArenaHeader *needle) michael@0: { michael@0: AutoLockGC lock(rt); michael@0: size_t allocKind = needle->getAllocKind(); michael@0: for (ArenaHeader *aheader = arenaLists[allocKind].head; michael@0: aheader != nullptr; michael@0: aheader = aheader->next) michael@0: { michael@0: if (aheader == needle) michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: michael@0: AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSContext *cx) michael@0: : runtime(cx->runtime()), michael@0: markCount(runtime->gcObjectsMarkedInDeadZones), michael@0: inIncremental(JS::IsIncrementalGCInProgress(runtime)), michael@0: manipulatingDeadZones(runtime->gcManipulatingDeadZones) michael@0: { michael@0: runtime->gcManipulatingDeadZones = true; michael@0: } michael@0: michael@0: AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSObject *obj) michael@0: : runtime(obj->compartment()->runtimeFromMainThread()), michael@0: markCount(runtime->gcObjectsMarkedInDeadZones), michael@0: inIncremental(JS::IsIncrementalGCInProgress(runtime)), michael@0: manipulatingDeadZones(runtime->gcManipulatingDeadZones) michael@0: { michael@0: runtime->gcManipulatingDeadZones = true; michael@0: } michael@0: michael@0: AutoMaybeTouchDeadZones::~AutoMaybeTouchDeadZones() michael@0: { michael@0: runtime->gcManipulatingDeadZones = manipulatingDeadZones; michael@0: michael@0: if (inIncremental && runtime->gcObjectsMarkedInDeadZones != markCount) { michael@0: JS::PrepareForFullGC(runtime); michael@0: js::GC(runtime, GC_NORMAL, JS::gcreason::TRANSPLANT); michael@0: } michael@0: } michael@0: michael@0: AutoSuppressGC::AutoSuppressGC(ExclusiveContext *cx) michael@0: : suppressGC_(cx->perThreadData->suppressGC) michael@0: { michael@0: suppressGC_++; michael@0: } michael@0: michael@0: AutoSuppressGC::AutoSuppressGC(JSCompartment *comp) michael@0: : suppressGC_(comp->runtimeFromMainThread()->mainThread.suppressGC) michael@0: { michael@0: suppressGC_++; michael@0: } michael@0: michael@0: AutoSuppressGC::AutoSuppressGC(JSRuntime *rt) michael@0: : suppressGC_(rt->mainThread.suppressGC) michael@0: { michael@0: suppressGC_++; michael@0: } michael@0: michael@0: bool michael@0: js::UninlinedIsInsideNursery(JSRuntime *rt, const void *thing) michael@0: { michael@0: return IsInsideNursery(rt, thing); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: AutoDisableProxyCheck::AutoDisableProxyCheck(JSRuntime *rt michael@0: MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL) michael@0: : count(rt->gcDisableStrictProxyCheckingCount) michael@0: { michael@0: MOZ_GUARD_OBJECT_NOTIFIER_INIT; michael@0: count++; michael@0: } michael@0: michael@0: JS_FRIEND_API(void) michael@0: JS::AssertGCThingMustBeTenured(JSObject *obj) michael@0: { michael@0: JS_ASSERT((!IsNurseryAllocable(obj->tenuredGetAllocKind()) || obj->getClass()->finalize) && michael@0: obj->isTenured()); michael@0: } michael@0: michael@0: JS_FRIEND_API(size_t) michael@0: JS::GetGCNumber() michael@0: { michael@0: JSRuntime *rt = js::TlsPerThreadData.get()->runtimeFromMainThread(); michael@0: if (!rt) michael@0: return 0; michael@0: return rt->gcNumber; michael@0: } michael@0: michael@0: JS::AutoAssertNoGC::AutoAssertNoGC() michael@0: : runtime(nullptr), gcNumber(0) michael@0: { michael@0: js::PerThreadData *data = js::TlsPerThreadData.get(); michael@0: if (data) { michael@0: /* michael@0: * GC's from off-thread will always assert, so off-thread is implicitly michael@0: * AutoAssertNoGC. We still need to allow AutoAssertNoGC to be used in michael@0: * code that works from both threads, however. We also use this to michael@0: * annotate the off thread run loops. michael@0: */ michael@0: runtime = data->runtimeIfOnOwnerThread(); michael@0: if (runtime) michael@0: gcNumber = runtime->gcNumber; michael@0: } michael@0: } michael@0: michael@0: JS::AutoAssertNoGC::AutoAssertNoGC(JSRuntime *rt) michael@0: : runtime(rt), gcNumber(rt->gcNumber) michael@0: { michael@0: } michael@0: michael@0: JS::AutoAssertNoGC::~AutoAssertNoGC() michael@0: { michael@0: if (runtime) michael@0: MOZ_ASSERT(gcNumber == runtime->gcNumber, "GC ran inside an AutoAssertNoGC scope."); michael@0: } michael@0: #endif