diff -r 000000000000 -r 6474c204b198 js/src/gc/Nursery.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/js/src/gc/Nursery.cpp Wed Dec 31 06:09:35 2014 +0100 @@ -0,0 +1,950 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sw=4 et tw=78: + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifdef JSGC_GENERATIONAL + +#include "gc/Nursery-inl.h" + +#include "jscompartment.h" +#include "jsgc.h" +#include "jsinfer.h" +#include "jsutil.h" +#include "prmjtime.h" + +#include "gc/GCInternals.h" +#include "gc/Memory.h" +#ifdef JS_ION +#include "jit/IonFrames.h" +#endif +#include "mozilla/IntegerPrintfMacros.h" +#include "vm/ArrayObject.h" +#include "vm/Debugger.h" +#if defined(DEBUG) +#include "vm/ScopeObject.h" +#endif +#include "vm/TypedArrayObject.h" + +#include "jsgcinlines.h" + +#include "vm/ObjectImpl-inl.h" + +using namespace js; +using namespace gc; +using namespace mozilla; + +//#define PROFILE_NURSERY + +#ifdef PROFILE_NURSERY +/* + * Print timing information for minor GCs that take longer than this time in microseconds. + */ +static int64_t GCReportThreshold = INT64_MAX; +#endif + +bool +js::Nursery::init() +{ + JS_ASSERT(start() == 0); + + if (!hugeSlots.init()) + return false; + + void *heap = MapAlignedPages(runtime(), NurserySize, Alignment); + if (!heap) + return false; + + JSRuntime *rt = runtime(); + rt->gcNurseryStart_ = uintptr_t(heap); + currentStart_ = start(); + rt->gcNurseryEnd_ = chunk(LastNurseryChunk).end(); + numActiveChunks_ = 1; + JS_POISON(heap, JS_FRESH_NURSERY_PATTERN, NurserySize); + setCurrentChunk(0); + updateDecommittedRegion(); + +#ifdef PROFILE_NURSERY + char *env = getenv("JS_MINORGC_TIME"); + if (env) + GCReportThreshold = atoi(env); +#endif + + JS_ASSERT(isEnabled()); + return true; +} + +js::Nursery::~Nursery() +{ + if (start()) + UnmapPages(runtime(), (void *)start(), NurserySize); +} + +void +js::Nursery::enable() +{ + JS_ASSERT(isEmpty()); + if (isEnabled()) + return; + numActiveChunks_ = 1; + setCurrentChunk(0); + currentStart_ = position(); +#ifdef JS_GC_ZEAL + if (runtime()->gcZeal_ == ZealGenerationalGCValue) + enterZealMode(); +#endif +} + +void +js::Nursery::disable() +{ + JS_ASSERT(isEmpty()); + if (!isEnabled()) + return; + numActiveChunks_ = 0; + currentEnd_ = 0; + updateDecommittedRegion(); +} + +bool +js::Nursery::isEmpty() const +{ + JS_ASSERT(runtime_); + if (!isEnabled()) + return true; + JS_ASSERT_IF(runtime_->gcZeal_ != ZealGenerationalGCValue, currentStart_ == start()); + return position() == currentStart_; +} + +JSObject * +js::Nursery::allocateObject(JSContext *cx, size_t size, size_t numDynamic) +{ + /* Ensure there's enough space to replace the contents with a RelocationOverlay. */ + JS_ASSERT(size >= sizeof(RelocationOverlay)); + + /* Attempt to allocate slots contiguously after object, if possible. */ + if (numDynamic && numDynamic <= MaxNurserySlots) { + size_t totalSize = size + sizeof(HeapSlot) * numDynamic; + JSObject *obj = static_cast(allocate(totalSize)); + if (obj) { + obj->setInitialSlots(reinterpret_cast(size_t(obj) + size)); + return obj; + } + /* If we failed to allocate as a block, retry with out-of-line slots. */ + } + + HeapSlot *slots = nullptr; + if (numDynamic) { + slots = allocateHugeSlots(cx, numDynamic); + if (MOZ_UNLIKELY(!slots)) + return nullptr; + } + + JSObject *obj = static_cast(allocate(size)); + + if (obj) + obj->setInitialSlots(slots); + else + freeSlots(cx, slots); + + return obj; +} + +void * +js::Nursery::allocate(size_t size) +{ + JS_ASSERT(isEnabled()); + JS_ASSERT(!runtime()->isHeapBusy()); + JS_ASSERT(position() >= currentStart_); + + if (position() + size > currentEnd()) { + if (currentChunk_ + 1 == numActiveChunks_) + return nullptr; + setCurrentChunk(currentChunk_ + 1); + } + + void *thing = (void *)position(); + position_ = position() + size; + + JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size); + return thing; +} + +/* Internally, this function is used to allocate elements as well as slots. */ +HeapSlot * +js::Nursery::allocateSlots(JSContext *cx, JSObject *obj, uint32_t nslots) +{ + JS_ASSERT(obj); + JS_ASSERT(nslots > 0); + + if (!isInside(obj)) + return cx->pod_malloc(nslots); + + if (nslots > MaxNurserySlots) + return allocateHugeSlots(cx, nslots); + + size_t size = sizeof(HeapSlot) * nslots; + HeapSlot *slots = static_cast(allocate(size)); + if (slots) + return slots; + + return allocateHugeSlots(cx, nslots); +} + +ObjectElements * +js::Nursery::allocateElements(JSContext *cx, JSObject *obj, uint32_t nelems) +{ + JS_ASSERT(nelems >= ObjectElements::VALUES_PER_HEADER); + return reinterpret_cast(allocateSlots(cx, obj, nelems)); +} + +HeapSlot * +js::Nursery::reallocateSlots(JSContext *cx, JSObject *obj, HeapSlot *oldSlots, + uint32_t oldCount, uint32_t newCount) +{ + size_t oldSize = oldCount * sizeof(HeapSlot); + size_t newSize = newCount * sizeof(HeapSlot); + + if (!isInside(obj)) + return static_cast(cx->realloc_(oldSlots, oldSize, newSize)); + + if (!isInside(oldSlots)) { + HeapSlot *newSlots = static_cast(cx->realloc_(oldSlots, oldSize, newSize)); + if (oldSlots != newSlots) { + hugeSlots.remove(oldSlots); + /* If this put fails, we will only leak the slots. */ + (void)hugeSlots.put(newSlots); + } + return newSlots; + } + + /* The nursery cannot make use of the returned slots data. */ + if (newCount < oldCount) + return oldSlots; + + HeapSlot *newSlots = allocateSlots(cx, obj, newCount); + PodCopy(newSlots, oldSlots, oldCount); + return newSlots; +} + +ObjectElements * +js::Nursery::reallocateElements(JSContext *cx, JSObject *obj, ObjectElements *oldHeader, + uint32_t oldCount, uint32_t newCount) +{ + HeapSlot *slots = reallocateSlots(cx, obj, reinterpret_cast(oldHeader), + oldCount, newCount); + return reinterpret_cast(slots); +} + +void +js::Nursery::freeSlots(JSContext *cx, HeapSlot *slots) +{ + if (!isInside(slots)) { + hugeSlots.remove(slots); + js_free(slots); + } +} + +HeapSlot * +js::Nursery::allocateHugeSlots(JSContext *cx, size_t nslots) +{ + HeapSlot *slots = cx->pod_malloc(nslots); + /* If this put fails, we will only leak the slots. */ + (void)hugeSlots.put(slots); + return slots; +} + +void +js::Nursery::notifyInitialSlots(Cell *cell, HeapSlot *slots) +{ + if (isInside(cell) && !isInside(slots)) { + /* If this put fails, we will only leak the slots. */ + (void)hugeSlots.put(slots); + } +} + +namespace js { +namespace gc { + +class MinorCollectionTracer : public JSTracer +{ + public: + Nursery *nursery; + AutoTraceSession session; + + /* Amount of data moved to the tenured generation during collection. */ + size_t tenuredSize; + + /* + * This list is threaded through the Nursery using the space from already + * moved things. The list is used to fix up the moved things and to find + * things held live by intra-Nursery pointers. + */ + RelocationOverlay *head; + RelocationOverlay **tail; + + /* Save and restore all of the runtime state we use during MinorGC. */ + bool savedRuntimeNeedBarrier; + AutoDisableProxyCheck disableStrictProxyChecking; + AutoEnterOOMUnsafeRegion oomUnsafeRegion; + ArrayBufferVector liveArrayBuffers; + + /* Insert the given relocation entry into the list of things to visit. */ + MOZ_ALWAYS_INLINE void insertIntoFixupList(RelocationOverlay *entry) { + *tail = entry; + tail = &entry->next_; + *tail = nullptr; + } + + MinorCollectionTracer(JSRuntime *rt, Nursery *nursery) + : JSTracer(rt, Nursery::MinorGCCallback, TraceWeakMapKeysValues), + nursery(nursery), + session(rt, MinorCollecting), + tenuredSize(0), + head(nullptr), + tail(&head), + savedRuntimeNeedBarrier(rt->needsBarrier()), + disableStrictProxyChecking(rt) + { + rt->gcNumber++; + + /* + * We disable the runtime needsBarrier() check so that pre-barriers do + * not fire on objects that have been relocated. The pre-barrier's + * call to obj->zone() will try to look through shape_, which is now + * the relocation magic and will crash. However, zone->needsBarrier() + * must still be set correctly so that allocations we make in minor + * GCs between incremental slices will allocate their objects marked. + */ + rt->setNeedsBarrier(false); + + /* + * We use the live array buffer lists to track traced buffers so we can + * sweep their dead views. Incremental collection also use these lists, + * so we may need to save and restore their contents here. + */ + if (rt->gcIncrementalState != NO_INCREMENTAL) { + for (GCCompartmentsIter c(rt); !c.done(); c.next()) { + if (!ArrayBufferObject::saveArrayBufferList(c, liveArrayBuffers)) + CrashAtUnhandlableOOM("OOM while saving live array buffers"); + ArrayBufferObject::resetArrayBufferList(c); + } + } + } + + ~MinorCollectionTracer() { + runtime()->setNeedsBarrier(savedRuntimeNeedBarrier); + if (runtime()->gcIncrementalState != NO_INCREMENTAL) + ArrayBufferObject::restoreArrayBufferLists(liveArrayBuffers); + } +}; + +} /* namespace gc */ +} /* namespace js */ + +static AllocKind +GetObjectAllocKindForCopy(JSRuntime *rt, JSObject *obj) +{ + if (obj->is()) { + JS_ASSERT(obj->numFixedSlots() == 0); + + /* Use minimal size object if we are just going to copy the pointer. */ + if (!IsInsideNursery(rt, (void *)obj->getElementsHeader())) + return FINALIZE_OBJECT0_BACKGROUND; + + size_t nelements = obj->getDenseCapacity(); + return GetBackgroundAllocKind(GetGCArrayKind(nelements)); + } + + if (obj->is()) + return obj->as().getAllocKind(); + + /* + * Typed arrays in the nursery may have a lazily allocated buffer, make + * sure there is room for the array's fixed data when moving the array. + */ + if (obj->is() && !obj->as().buffer()) { + size_t nbytes = obj->as().byteLength(); + return GetBackgroundAllocKind(TypedArrayObject::AllocKindForLazyBuffer(nbytes)); + } + + AllocKind kind = GetGCObjectFixedSlotsKind(obj->numFixedSlots()); + JS_ASSERT(!IsBackgroundFinalized(kind)); + JS_ASSERT(CanBeFinalizedInBackground(kind, obj->getClass())); + return GetBackgroundAllocKind(kind); +} + +void * +js::Nursery::allocateFromTenured(Zone *zone, AllocKind thingKind) +{ + void *t = zone->allocator.arenas.allocateFromFreeList(thingKind, Arena::thingSize(thingKind)); + if (t) + return t; + zone->allocator.arenas.checkEmptyFreeList(thingKind); + return zone->allocator.arenas.allocateFromArena(zone, thingKind); +} + +void +js::Nursery::setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots) +{ + JS_ASSERT(nslots > 0); + JS_ASSERT(isInside(oldSlots)); + JS_ASSERT(!isInside(newSlots)); + *reinterpret_cast(oldSlots) = newSlots; +} + +void +js::Nursery::setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader, + uint32_t nelems) +{ + /* + * If the JIT has hoisted a zero length pointer, then we do not need to + * relocate it because reads and writes to/from this pointer are invalid. + */ + if (nelems - ObjectElements::VALUES_PER_HEADER < 1) + return; + JS_ASSERT(isInside(oldHeader)); + JS_ASSERT(!isInside(newHeader)); + *reinterpret_cast(oldHeader->elements()) = newHeader->elements(); +} + +#ifdef DEBUG +static bool IsWriteableAddress(void *ptr) +{ + volatile uint64_t *vPtr = reinterpret_cast(ptr); + *vPtr = *vPtr; + return true; +} +#endif + +void +js::Nursery::forwardBufferPointer(HeapSlot **pSlotsElems) +{ + HeapSlot *old = *pSlotsElems; + + if (!isInside(old)) + return; + + /* + * If the elements buffer is zero length, the "first" item could be inside + * of the next object or past the end of the allocable area. However, + * since we always store the runtime as the last word in the nursery, + * isInside will still be true, even if this zero-size allocation abuts the + * end of the allocable area. Thus, it is always safe to read the first + * word of |old| here. + */ + *pSlotsElems = *reinterpret_cast(old); + JS_ASSERT(!isInside(*pSlotsElems)); + JS_ASSERT(IsWriteableAddress(*pSlotsElems)); +} + +// Structure for counting how many times objects of a particular type have been +// tenured during a minor collection. +struct TenureCount +{ + types::TypeObject *type; + int count; +}; + +// Keep rough track of how many times we tenure objects of particular types +// during minor collections, using a fixed size hash for efficiency at the cost +// of potential collisions. +struct Nursery::TenureCountCache +{ + TenureCount entries[16]; + + TenureCountCache() { PodZero(this); } + + TenureCount &findEntry(types::TypeObject *type) { + return entries[PointerHasher::hash(type) % ArrayLength(entries)]; + } +}; + +void +js::Nursery::collectToFixedPoint(MinorCollectionTracer *trc, TenureCountCache &tenureCounts) +{ + for (RelocationOverlay *p = trc->head; p; p = p->next()) { + JSObject *obj = static_cast(p->forwardingAddress()); + traceObject(trc, obj); + + TenureCount &entry = tenureCounts.findEntry(obj->type()); + if (entry.type == obj->type()) { + entry.count++; + } else if (!entry.type) { + entry.type = obj->type(); + entry.count = 1; + } + } +} + +MOZ_ALWAYS_INLINE void +js::Nursery::traceObject(MinorCollectionTracer *trc, JSObject *obj) +{ + const Class *clasp = obj->getClass(); + if (clasp->trace) + clasp->trace(trc, obj); + + if (!obj->isNative()) + return; + + if (!obj->hasEmptyElements()) + markSlots(trc, obj->getDenseElements(), obj->getDenseInitializedLength()); + + HeapSlot *fixedStart, *fixedEnd, *dynStart, *dynEnd; + obj->getSlotRange(0, obj->slotSpan(), &fixedStart, &fixedEnd, &dynStart, &dynEnd); + markSlots(trc, fixedStart, fixedEnd); + markSlots(trc, dynStart, dynEnd); +} + +MOZ_ALWAYS_INLINE void +js::Nursery::markSlots(MinorCollectionTracer *trc, HeapSlot *vp, uint32_t nslots) +{ + markSlots(trc, vp, vp + nslots); +} + +MOZ_ALWAYS_INLINE void +js::Nursery::markSlots(MinorCollectionTracer *trc, HeapSlot *vp, HeapSlot *end) +{ + for (; vp != end; ++vp) + markSlot(trc, vp); +} + +MOZ_ALWAYS_INLINE void +js::Nursery::markSlot(MinorCollectionTracer *trc, HeapSlot *slotp) +{ + if (!slotp->isObject()) + return; + + JSObject *obj = &slotp->toObject(); + if (!isInside(obj)) + return; + + if (getForwardedPointer(&obj)) { + slotp->unsafeGet()->setObject(*obj); + return; + } + + JSObject *tenured = static_cast(moveToTenured(trc, obj)); + slotp->unsafeGet()->setObject(*tenured); +} + +void * +js::Nursery::moveToTenured(MinorCollectionTracer *trc, JSObject *src) +{ + Zone *zone = src->zone(); + AllocKind dstKind = GetObjectAllocKindForCopy(trc->runtime(), src); + JSObject *dst = static_cast(allocateFromTenured(zone, dstKind)); + if (!dst) + CrashAtUnhandlableOOM("Failed to allocate object while tenuring."); + + trc->tenuredSize += moveObjectToTenured(dst, src, dstKind); + + RelocationOverlay *overlay = reinterpret_cast(src); + overlay->forwardTo(dst); + trc->insertIntoFixupList(overlay); + + return static_cast(dst); +} + +size_t +js::Nursery::moveObjectToTenured(JSObject *dst, JSObject *src, AllocKind dstKind) +{ + size_t srcSize = Arena::thingSize(dstKind); + size_t tenuredSize = srcSize; + + /* + * Arrays do not necessarily have the same AllocKind between src and dst. + * We deal with this by copying elements manually, possibly re-inlining + * them if there is adequate room inline in dst. + */ + if (src->is()) + srcSize = sizeof(ObjectImpl); + + js_memcpy(dst, src, srcSize); + tenuredSize += moveSlotsToTenured(dst, src, dstKind); + tenuredSize += moveElementsToTenured(dst, src, dstKind); + + if (src->is()) + forwardTypedArrayPointers(dst, src); + + /* The shape's list head may point into the old object. */ + if (&src->shape_ == dst->shape_->listp) + dst->shape_->listp = &dst->shape_; + + return tenuredSize; +} + +void +js::Nursery::forwardTypedArrayPointers(JSObject *dst, JSObject *src) +{ + /* + * Typed array data may be stored inline inside the object's fixed slots. If + * so, we need update the private pointer and leave a forwarding pointer at + * the start of the data. + */ + TypedArrayObject &typedArray = src->as(); + JS_ASSERT_IF(typedArray.buffer(), !isInside(src->getPrivate())); + if (typedArray.buffer()) + return; + + void *srcData = src->fixedData(TypedArrayObject::FIXED_DATA_START); + void *dstData = dst->fixedData(TypedArrayObject::FIXED_DATA_START); + JS_ASSERT(src->getPrivate() == srcData); + dst->setPrivate(dstData); + + /* + * We don't know the number of slots here, but + * TypedArrayObject::AllocKindForLazyBuffer ensures that it's always at + * least one. + */ + size_t nslots = 1; + setSlotsForwardingPointer(reinterpret_cast(srcData), + reinterpret_cast(dstData), + nslots); +} + +size_t +js::Nursery::moveSlotsToTenured(JSObject *dst, JSObject *src, AllocKind dstKind) +{ + /* Fixed slots have already been copied over. */ + if (!src->hasDynamicSlots()) + return 0; + + if (!isInside(src->slots)) { + hugeSlots.remove(src->slots); + return 0; + } + + Zone *zone = src->zone(); + size_t count = src->numDynamicSlots(); + dst->slots = zone->pod_malloc(count); + if (!dst->slots) + CrashAtUnhandlableOOM("Failed to allocate slots while tenuring."); + PodCopy(dst->slots, src->slots, count); + setSlotsForwardingPointer(src->slots, dst->slots, count); + return count * sizeof(HeapSlot); +} + +size_t +js::Nursery::moveElementsToTenured(JSObject *dst, JSObject *src, AllocKind dstKind) +{ + if (src->hasEmptyElements()) + return 0; + + Zone *zone = src->zone(); + ObjectElements *srcHeader = src->getElementsHeader(); + ObjectElements *dstHeader; + + /* TODO Bug 874151: Prefer to put element data inline if we have space. */ + if (!isInside(srcHeader)) { + JS_ASSERT(src->elements == dst->elements); + hugeSlots.remove(reinterpret_cast(srcHeader)); + return 0; + } + + size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity; + + /* Unlike other objects, Arrays can have fixed elements. */ + if (src->is() && nslots <= GetGCKindSlots(dstKind)) { + dst->setFixedElements(); + dstHeader = dst->getElementsHeader(); + js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot)); + setElementsForwardingPointer(srcHeader, dstHeader, nslots); + return nslots * sizeof(HeapSlot); + } + + JS_ASSERT(nslots >= 2); + size_t nbytes = nslots * sizeof(HeapValue); + dstHeader = static_cast(zone->malloc_(nbytes)); + if (!dstHeader) + CrashAtUnhandlableOOM("Failed to allocate elements while tenuring."); + js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot)); + setElementsForwardingPointer(srcHeader, dstHeader, nslots); + dst->elements = dstHeader->elements(); + return nslots * sizeof(HeapSlot); +} + +static bool +ShouldMoveToTenured(MinorCollectionTracer *trc, void **thingp) +{ + Cell *cell = static_cast(*thingp); + Nursery &nursery = *trc->nursery; + return !nursery.isInside(thingp) && nursery.isInside(cell) && + !nursery.getForwardedPointer(thingp); +} + +/* static */ void +js::Nursery::MinorGCCallback(JSTracer *jstrc, void **thingp, JSGCTraceKind kind) +{ + MinorCollectionTracer *trc = static_cast(jstrc); + if (ShouldMoveToTenured(trc, thingp)) + *thingp = trc->nursery->moveToTenured(trc, static_cast(*thingp)); +} + +static void +CheckHashTablesAfterMovingGC(JSRuntime *rt) +{ +#ifdef JS_GC_ZEAL + if (rt->gcZeal() == ZealCheckHashTablesOnMinorGC) { + /* Check that internal hash tables no longer have any pointers into the nursery. */ + for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { + c->checkNewTypeObjectTableAfterMovingGC(); + c->checkInitialShapesTableAfterMovingGC(); + c->checkWrapperMapAfterMovingGC(); + if (c->debugScopes) + c->debugScopes->checkHashTablesAfterMovingGC(rt); + } + } +#endif +} + +#ifdef PROFILE_NURSERY +#define TIME_START(name) int64_t timstampStart_##name = PRMJ_Now() +#define TIME_END(name) int64_t timstampEnd_##name = PRMJ_Now() +#define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name) +#else +#define TIME_START(name) +#define TIME_END(name) +#define TIME_TOTAL(name) +#endif + +void +js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList *pretenureTypes) +{ + JS_AbortIfWrongThread(rt); + + if (rt->mainThread.suppressGC) + return; + + if (!isEnabled()) + return; + + if (isEmpty()) + return; + + rt->gcStats.count(gcstats::STAT_MINOR_GC); + + TIME_START(total); + + AutoStopVerifyingBarriers av(rt, false); + + // Move objects pointed to by roots from the nursery to the major heap. + MinorCollectionTracer trc(rt, this); + + // Mark the store buffer. This must happen first. + StoreBuffer &sb = rt->gcStoreBuffer; + TIME_START(markValues); + sb.markValues(&trc); + TIME_END(markValues); + + TIME_START(markCells); + sb.markCells(&trc); + TIME_END(markCells); + + TIME_START(markSlots); + sb.markSlots(&trc); + TIME_END(markSlots); + + TIME_START(markWholeCells); + sb.markWholeCells(&trc); + TIME_END(markWholeCells); + + TIME_START(markRelocatableValues); + sb.markRelocatableValues(&trc); + TIME_END(markRelocatableValues); + + TIME_START(markRelocatableCells); + sb.markRelocatableCells(&trc); + TIME_END(markRelocatableCells); + + TIME_START(markGenericEntries); + sb.markGenericEntries(&trc); + TIME_END(markGenericEntries); + + TIME_START(checkHashTables); + CheckHashTablesAfterMovingGC(rt); + TIME_END(checkHashTables); + + TIME_START(markRuntime); + MarkRuntime(&trc); + TIME_END(markRuntime); + + TIME_START(markDebugger); + Debugger::markAll(&trc); + TIME_END(markDebugger); + + TIME_START(clearNewObjectCache); + rt->newObjectCache.clearNurseryObjects(rt); + TIME_END(clearNewObjectCache); + + // Most of the work is done here. This loop iterates over objects that have + // been moved to the major heap. If these objects have any outgoing pointers + // to the nursery, then those nursery objects get moved as well, until no + // objects are left to move. That is, we iterate to a fixed point. + TIME_START(collectToFP); + TenureCountCache tenureCounts; + collectToFixedPoint(&trc, tenureCounts); + TIME_END(collectToFP); + + // Update the array buffer object's view lists. + TIME_START(sweepArrayBufferViewList); + for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { + if (!c->gcLiveArrayBuffers.empty()) + ArrayBufferObject::sweep(c); + } + TIME_END(sweepArrayBufferViewList); + + // Update any slot or element pointers whose destination has been tenured. + TIME_START(updateJitActivations); +#ifdef JS_ION + js::jit::UpdateJitActivationsForMinorGC(rt, &trc); +#endif + TIME_END(updateJitActivations); + + // Resize the nursery. + TIME_START(resize); + double promotionRate = trc.tenuredSize / double(allocationEnd() - start()); + if (promotionRate > 0.05) + growAllocableSpace(); + else if (promotionRate < 0.01) + shrinkAllocableSpace(); + TIME_END(resize); + + // If we are promoting the nursery, or exhausted the store buffer with + // pointers to nursery things, which will force a collection well before + // the nursery is full, look for object types that are getting promoted + // excessively and try to pretenure them. + TIME_START(pretenure); + if (pretenureTypes && (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER)) { + for (size_t i = 0; i < ArrayLength(tenureCounts.entries); i++) { + const TenureCount &entry = tenureCounts.entries[i]; + if (entry.count >= 3000) + pretenureTypes->append(entry.type); // ignore alloc failure + } + } + TIME_END(pretenure); + + // Sweep. + TIME_START(freeHugeSlots); + freeHugeSlots(rt); + TIME_END(freeHugeSlots); + + TIME_START(sweep); + sweep(rt); + TIME_END(sweep); + + TIME_START(clearStoreBuffer); + rt->gcStoreBuffer.clear(); + TIME_END(clearStoreBuffer); + + // We ignore gcMaxBytes when allocating for minor collection. However, if we + // overflowed, we disable the nursery. The next time we allocate, we'll fail + // because gcBytes >= gcMaxBytes. + if (rt->gcBytes >= rt->gcMaxBytes) + disable(); + + TIME_END(total); + +#ifdef PROFILE_NURSERY + int64_t totalTime = TIME_TOTAL(total); + + if (totalTime >= GCReportThreshold) { + static bool printedHeader = false; + if (!printedHeader) { + fprintf(stderr, + "MinorGC: Reason PRate Size Time mkVals mkClls mkSlts mkWCll mkRVal mkRCll mkGnrc ckTbls mkRntm mkDbgr clrNOC collct swpABO updtIn resize pretnr frSlts clrSB sweep\n"); + printedHeader = true; + } + +#define FMT " %6" PRIu64 + fprintf(stderr, + "MinorGC: %20s %5.1f%% %4d" FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT "\n", + js::gcstats::ExplainReason(reason), + promotionRate * 100, + numActiveChunks_, + totalTime, + TIME_TOTAL(markValues), + TIME_TOTAL(markCells), + TIME_TOTAL(markSlots), + TIME_TOTAL(markWholeCells), + TIME_TOTAL(markRelocatableValues), + TIME_TOTAL(markRelocatableCells), + TIME_TOTAL(markGenericEntries), + TIME_TOTAL(checkHashTables), + TIME_TOTAL(markRuntime), + TIME_TOTAL(markDebugger), + TIME_TOTAL(clearNewObjectCache), + TIME_TOTAL(collectToFP), + TIME_TOTAL(sweepArrayBufferViewList), + TIME_TOTAL(updateJitActivations), + TIME_TOTAL(resize), + TIME_TOTAL(pretenure), + TIME_TOTAL(freeHugeSlots), + TIME_TOTAL(clearStoreBuffer), + TIME_TOTAL(sweep)); +#undef FMT + } +#endif +} + +void +js::Nursery::freeHugeSlots(JSRuntime *rt) +{ + for (HugeSlotsSet::Range r = hugeSlots.all(); !r.empty(); r.popFront()) + rt->defaultFreeOp()->free_(r.front()); + hugeSlots.clear(); +} + +void +js::Nursery::sweep(JSRuntime *rt) +{ +#ifdef JS_GC_ZEAL + /* Poison the nursery contents so touching a freed object will crash. */ + JS_POISON((void *)start(), JS_SWEPT_NURSERY_PATTERN, NurserySize); + for (int i = 0; i < NumNurseryChunks; ++i) + initChunk(i); + + if (rt->gcZeal_ == ZealGenerationalGCValue) { + MOZ_ASSERT(numActiveChunks_ == NumNurseryChunks); + + /* Only reset the alloc point when we are close to the end. */ + if (currentChunk_ + 1 == NumNurseryChunks) + setCurrentChunk(0); + } else +#endif + { +#ifdef JS_CRASH_DIAGNOSTICS + JS_POISON((void *)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start()); + for (int i = 0; i < numActiveChunks_; ++i) + chunk(i).trailer.runtime = runtime(); +#endif + setCurrentChunk(0); + } + + /* Set current start position for isEmpty checks. */ + currentStart_ = position(); +} + +void +js::Nursery::growAllocableSpace() +{ +#ifdef JS_GC_ZEAL + MOZ_ASSERT_IF(runtime()->gcZeal_ == ZealGenerationalGCValue, numActiveChunks_ == NumNurseryChunks); +#endif + numActiveChunks_ = Min(numActiveChunks_ * 2, NumNurseryChunks); +} + +void +js::Nursery::shrinkAllocableSpace() +{ +#ifdef JS_GC_ZEAL + if (runtime()->gcZeal_ == ZealGenerationalGCValue) + return; +#endif + numActiveChunks_ = Max(numActiveChunks_ - 1, 1); + updateDecommittedRegion(); +} + +#endif /* JSGC_GENERATIONAL */