Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=78:
3 *
4 * This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
6 * You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #ifdef JSGC_GENERATIONAL
10 #include "gc/Nursery-inl.h"
12 #include "jscompartment.h"
13 #include "jsgc.h"
14 #include "jsinfer.h"
15 #include "jsutil.h"
16 #include "prmjtime.h"
18 #include "gc/GCInternals.h"
19 #include "gc/Memory.h"
20 #ifdef JS_ION
21 #include "jit/IonFrames.h"
22 #endif
23 #include "mozilla/IntegerPrintfMacros.h"
24 #include "vm/ArrayObject.h"
25 #include "vm/Debugger.h"
26 #if defined(DEBUG)
27 #include "vm/ScopeObject.h"
28 #endif
29 #include "vm/TypedArrayObject.h"
31 #include "jsgcinlines.h"
33 #include "vm/ObjectImpl-inl.h"
35 using namespace js;
36 using namespace gc;
37 using namespace mozilla;
39 //#define PROFILE_NURSERY
41 #ifdef PROFILE_NURSERY
42 /*
43 * Print timing information for minor GCs that take longer than this time in microseconds.
44 */
45 static int64_t GCReportThreshold = INT64_MAX;
46 #endif
48 bool
49 js::Nursery::init()
50 {
51 JS_ASSERT(start() == 0);
53 if (!hugeSlots.init())
54 return false;
56 void *heap = MapAlignedPages(runtime(), NurserySize, Alignment);
57 if (!heap)
58 return false;
60 JSRuntime *rt = runtime();
61 rt->gcNurseryStart_ = uintptr_t(heap);
62 currentStart_ = start();
63 rt->gcNurseryEnd_ = chunk(LastNurseryChunk).end();
64 numActiveChunks_ = 1;
65 JS_POISON(heap, JS_FRESH_NURSERY_PATTERN, NurserySize);
66 setCurrentChunk(0);
67 updateDecommittedRegion();
69 #ifdef PROFILE_NURSERY
70 char *env = getenv("JS_MINORGC_TIME");
71 if (env)
72 GCReportThreshold = atoi(env);
73 #endif
75 JS_ASSERT(isEnabled());
76 return true;
77 }
79 js::Nursery::~Nursery()
80 {
81 if (start())
82 UnmapPages(runtime(), (void *)start(), NurserySize);
83 }
85 void
86 js::Nursery::enable()
87 {
88 JS_ASSERT(isEmpty());
89 if (isEnabled())
90 return;
91 numActiveChunks_ = 1;
92 setCurrentChunk(0);
93 currentStart_ = position();
94 #ifdef JS_GC_ZEAL
95 if (runtime()->gcZeal_ == ZealGenerationalGCValue)
96 enterZealMode();
97 #endif
98 }
100 void
101 js::Nursery::disable()
102 {
103 JS_ASSERT(isEmpty());
104 if (!isEnabled())
105 return;
106 numActiveChunks_ = 0;
107 currentEnd_ = 0;
108 updateDecommittedRegion();
109 }
111 bool
112 js::Nursery::isEmpty() const
113 {
114 JS_ASSERT(runtime_);
115 if (!isEnabled())
116 return true;
117 JS_ASSERT_IF(runtime_->gcZeal_ != ZealGenerationalGCValue, currentStart_ == start());
118 return position() == currentStart_;
119 }
121 JSObject *
122 js::Nursery::allocateObject(JSContext *cx, size_t size, size_t numDynamic)
123 {
124 /* Ensure there's enough space to replace the contents with a RelocationOverlay. */
125 JS_ASSERT(size >= sizeof(RelocationOverlay));
127 /* Attempt to allocate slots contiguously after object, if possible. */
128 if (numDynamic && numDynamic <= MaxNurserySlots) {
129 size_t totalSize = size + sizeof(HeapSlot) * numDynamic;
130 JSObject *obj = static_cast<JSObject *>(allocate(totalSize));
131 if (obj) {
132 obj->setInitialSlots(reinterpret_cast<HeapSlot *>(size_t(obj) + size));
133 return obj;
134 }
135 /* If we failed to allocate as a block, retry with out-of-line slots. */
136 }
138 HeapSlot *slots = nullptr;
139 if (numDynamic) {
140 slots = allocateHugeSlots(cx, numDynamic);
141 if (MOZ_UNLIKELY(!slots))
142 return nullptr;
143 }
145 JSObject *obj = static_cast<JSObject *>(allocate(size));
147 if (obj)
148 obj->setInitialSlots(slots);
149 else
150 freeSlots(cx, slots);
152 return obj;
153 }
155 void *
156 js::Nursery::allocate(size_t size)
157 {
158 JS_ASSERT(isEnabled());
159 JS_ASSERT(!runtime()->isHeapBusy());
160 JS_ASSERT(position() >= currentStart_);
162 if (position() + size > currentEnd()) {
163 if (currentChunk_ + 1 == numActiveChunks_)
164 return nullptr;
165 setCurrentChunk(currentChunk_ + 1);
166 }
168 void *thing = (void *)position();
169 position_ = position() + size;
171 JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
172 return thing;
173 }
175 /* Internally, this function is used to allocate elements as well as slots. */
176 HeapSlot *
177 js::Nursery::allocateSlots(JSContext *cx, JSObject *obj, uint32_t nslots)
178 {
179 JS_ASSERT(obj);
180 JS_ASSERT(nslots > 0);
182 if (!isInside(obj))
183 return cx->pod_malloc<HeapSlot>(nslots);
185 if (nslots > MaxNurserySlots)
186 return allocateHugeSlots(cx, nslots);
188 size_t size = sizeof(HeapSlot) * nslots;
189 HeapSlot *slots = static_cast<HeapSlot *>(allocate(size));
190 if (slots)
191 return slots;
193 return allocateHugeSlots(cx, nslots);
194 }
196 ObjectElements *
197 js::Nursery::allocateElements(JSContext *cx, JSObject *obj, uint32_t nelems)
198 {
199 JS_ASSERT(nelems >= ObjectElements::VALUES_PER_HEADER);
200 return reinterpret_cast<ObjectElements *>(allocateSlots(cx, obj, nelems));
201 }
203 HeapSlot *
204 js::Nursery::reallocateSlots(JSContext *cx, JSObject *obj, HeapSlot *oldSlots,
205 uint32_t oldCount, uint32_t newCount)
206 {
207 size_t oldSize = oldCount * sizeof(HeapSlot);
208 size_t newSize = newCount * sizeof(HeapSlot);
210 if (!isInside(obj))
211 return static_cast<HeapSlot *>(cx->realloc_(oldSlots, oldSize, newSize));
213 if (!isInside(oldSlots)) {
214 HeapSlot *newSlots = static_cast<HeapSlot *>(cx->realloc_(oldSlots, oldSize, newSize));
215 if (oldSlots != newSlots) {
216 hugeSlots.remove(oldSlots);
217 /* If this put fails, we will only leak the slots. */
218 (void)hugeSlots.put(newSlots);
219 }
220 return newSlots;
221 }
223 /* The nursery cannot make use of the returned slots data. */
224 if (newCount < oldCount)
225 return oldSlots;
227 HeapSlot *newSlots = allocateSlots(cx, obj, newCount);
228 PodCopy(newSlots, oldSlots, oldCount);
229 return newSlots;
230 }
232 ObjectElements *
233 js::Nursery::reallocateElements(JSContext *cx, JSObject *obj, ObjectElements *oldHeader,
234 uint32_t oldCount, uint32_t newCount)
235 {
236 HeapSlot *slots = reallocateSlots(cx, obj, reinterpret_cast<HeapSlot *>(oldHeader),
237 oldCount, newCount);
238 return reinterpret_cast<ObjectElements *>(slots);
239 }
241 void
242 js::Nursery::freeSlots(JSContext *cx, HeapSlot *slots)
243 {
244 if (!isInside(slots)) {
245 hugeSlots.remove(slots);
246 js_free(slots);
247 }
248 }
250 HeapSlot *
251 js::Nursery::allocateHugeSlots(JSContext *cx, size_t nslots)
252 {
253 HeapSlot *slots = cx->pod_malloc<HeapSlot>(nslots);
254 /* If this put fails, we will only leak the slots. */
255 (void)hugeSlots.put(slots);
256 return slots;
257 }
259 void
260 js::Nursery::notifyInitialSlots(Cell *cell, HeapSlot *slots)
261 {
262 if (isInside(cell) && !isInside(slots)) {
263 /* If this put fails, we will only leak the slots. */
264 (void)hugeSlots.put(slots);
265 }
266 }
268 namespace js {
269 namespace gc {
271 class MinorCollectionTracer : public JSTracer
272 {
273 public:
274 Nursery *nursery;
275 AutoTraceSession session;
277 /* Amount of data moved to the tenured generation during collection. */
278 size_t tenuredSize;
280 /*
281 * This list is threaded through the Nursery using the space from already
282 * moved things. The list is used to fix up the moved things and to find
283 * things held live by intra-Nursery pointers.
284 */
285 RelocationOverlay *head;
286 RelocationOverlay **tail;
288 /* Save and restore all of the runtime state we use during MinorGC. */
289 bool savedRuntimeNeedBarrier;
290 AutoDisableProxyCheck disableStrictProxyChecking;
291 AutoEnterOOMUnsafeRegion oomUnsafeRegion;
292 ArrayBufferVector liveArrayBuffers;
294 /* Insert the given relocation entry into the list of things to visit. */
295 MOZ_ALWAYS_INLINE void insertIntoFixupList(RelocationOverlay *entry) {
296 *tail = entry;
297 tail = &entry->next_;
298 *tail = nullptr;
299 }
301 MinorCollectionTracer(JSRuntime *rt, Nursery *nursery)
302 : JSTracer(rt, Nursery::MinorGCCallback, TraceWeakMapKeysValues),
303 nursery(nursery),
304 session(rt, MinorCollecting),
305 tenuredSize(0),
306 head(nullptr),
307 tail(&head),
308 savedRuntimeNeedBarrier(rt->needsBarrier()),
309 disableStrictProxyChecking(rt)
310 {
311 rt->gcNumber++;
313 /*
314 * We disable the runtime needsBarrier() check so that pre-barriers do
315 * not fire on objects that have been relocated. The pre-barrier's
316 * call to obj->zone() will try to look through shape_, which is now
317 * the relocation magic and will crash. However, zone->needsBarrier()
318 * must still be set correctly so that allocations we make in minor
319 * GCs between incremental slices will allocate their objects marked.
320 */
321 rt->setNeedsBarrier(false);
323 /*
324 * We use the live array buffer lists to track traced buffers so we can
325 * sweep their dead views. Incremental collection also use these lists,
326 * so we may need to save and restore their contents here.
327 */
328 if (rt->gcIncrementalState != NO_INCREMENTAL) {
329 for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
330 if (!ArrayBufferObject::saveArrayBufferList(c, liveArrayBuffers))
331 CrashAtUnhandlableOOM("OOM while saving live array buffers");
332 ArrayBufferObject::resetArrayBufferList(c);
333 }
334 }
335 }
337 ~MinorCollectionTracer() {
338 runtime()->setNeedsBarrier(savedRuntimeNeedBarrier);
339 if (runtime()->gcIncrementalState != NO_INCREMENTAL)
340 ArrayBufferObject::restoreArrayBufferLists(liveArrayBuffers);
341 }
342 };
344 } /* namespace gc */
345 } /* namespace js */
347 static AllocKind
348 GetObjectAllocKindForCopy(JSRuntime *rt, JSObject *obj)
349 {
350 if (obj->is<ArrayObject>()) {
351 JS_ASSERT(obj->numFixedSlots() == 0);
353 /* Use minimal size object if we are just going to copy the pointer. */
354 if (!IsInsideNursery(rt, (void *)obj->getElementsHeader()))
355 return FINALIZE_OBJECT0_BACKGROUND;
357 size_t nelements = obj->getDenseCapacity();
358 return GetBackgroundAllocKind(GetGCArrayKind(nelements));
359 }
361 if (obj->is<JSFunction>())
362 return obj->as<JSFunction>().getAllocKind();
364 /*
365 * Typed arrays in the nursery may have a lazily allocated buffer, make
366 * sure there is room for the array's fixed data when moving the array.
367 */
368 if (obj->is<TypedArrayObject>() && !obj->as<TypedArrayObject>().buffer()) {
369 size_t nbytes = obj->as<TypedArrayObject>().byteLength();
370 return GetBackgroundAllocKind(TypedArrayObject::AllocKindForLazyBuffer(nbytes));
371 }
373 AllocKind kind = GetGCObjectFixedSlotsKind(obj->numFixedSlots());
374 JS_ASSERT(!IsBackgroundFinalized(kind));
375 JS_ASSERT(CanBeFinalizedInBackground(kind, obj->getClass()));
376 return GetBackgroundAllocKind(kind);
377 }
379 void *
380 js::Nursery::allocateFromTenured(Zone *zone, AllocKind thingKind)
381 {
382 void *t = zone->allocator.arenas.allocateFromFreeList(thingKind, Arena::thingSize(thingKind));
383 if (t)
384 return t;
385 zone->allocator.arenas.checkEmptyFreeList(thingKind);
386 return zone->allocator.arenas.allocateFromArena(zone, thingKind);
387 }
389 void
390 js::Nursery::setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots)
391 {
392 JS_ASSERT(nslots > 0);
393 JS_ASSERT(isInside(oldSlots));
394 JS_ASSERT(!isInside(newSlots));
395 *reinterpret_cast<HeapSlot **>(oldSlots) = newSlots;
396 }
398 void
399 js::Nursery::setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
400 uint32_t nelems)
401 {
402 /*
403 * If the JIT has hoisted a zero length pointer, then we do not need to
404 * relocate it because reads and writes to/from this pointer are invalid.
405 */
406 if (nelems - ObjectElements::VALUES_PER_HEADER < 1)
407 return;
408 JS_ASSERT(isInside(oldHeader));
409 JS_ASSERT(!isInside(newHeader));
410 *reinterpret_cast<HeapSlot **>(oldHeader->elements()) = newHeader->elements();
411 }
413 #ifdef DEBUG
414 static bool IsWriteableAddress(void *ptr)
415 {
416 volatile uint64_t *vPtr = reinterpret_cast<volatile uint64_t *>(ptr);
417 *vPtr = *vPtr;
418 return true;
419 }
420 #endif
422 void
423 js::Nursery::forwardBufferPointer(HeapSlot **pSlotsElems)
424 {
425 HeapSlot *old = *pSlotsElems;
427 if (!isInside(old))
428 return;
430 /*
431 * If the elements buffer is zero length, the "first" item could be inside
432 * of the next object or past the end of the allocable area. However,
433 * since we always store the runtime as the last word in the nursery,
434 * isInside will still be true, even if this zero-size allocation abuts the
435 * end of the allocable area. Thus, it is always safe to read the first
436 * word of |old| here.
437 */
438 *pSlotsElems = *reinterpret_cast<HeapSlot **>(old);
439 JS_ASSERT(!isInside(*pSlotsElems));
440 JS_ASSERT(IsWriteableAddress(*pSlotsElems));
441 }
443 // Structure for counting how many times objects of a particular type have been
444 // tenured during a minor collection.
445 struct TenureCount
446 {
447 types::TypeObject *type;
448 int count;
449 };
451 // Keep rough track of how many times we tenure objects of particular types
452 // during minor collections, using a fixed size hash for efficiency at the cost
453 // of potential collisions.
454 struct Nursery::TenureCountCache
455 {
456 TenureCount entries[16];
458 TenureCountCache() { PodZero(this); }
460 TenureCount &findEntry(types::TypeObject *type) {
461 return entries[PointerHasher<types::TypeObject *, 3>::hash(type) % ArrayLength(entries)];
462 }
463 };
465 void
466 js::Nursery::collectToFixedPoint(MinorCollectionTracer *trc, TenureCountCache &tenureCounts)
467 {
468 for (RelocationOverlay *p = trc->head; p; p = p->next()) {
469 JSObject *obj = static_cast<JSObject*>(p->forwardingAddress());
470 traceObject(trc, obj);
472 TenureCount &entry = tenureCounts.findEntry(obj->type());
473 if (entry.type == obj->type()) {
474 entry.count++;
475 } else if (!entry.type) {
476 entry.type = obj->type();
477 entry.count = 1;
478 }
479 }
480 }
482 MOZ_ALWAYS_INLINE void
483 js::Nursery::traceObject(MinorCollectionTracer *trc, JSObject *obj)
484 {
485 const Class *clasp = obj->getClass();
486 if (clasp->trace)
487 clasp->trace(trc, obj);
489 if (!obj->isNative())
490 return;
492 if (!obj->hasEmptyElements())
493 markSlots(trc, obj->getDenseElements(), obj->getDenseInitializedLength());
495 HeapSlot *fixedStart, *fixedEnd, *dynStart, *dynEnd;
496 obj->getSlotRange(0, obj->slotSpan(), &fixedStart, &fixedEnd, &dynStart, &dynEnd);
497 markSlots(trc, fixedStart, fixedEnd);
498 markSlots(trc, dynStart, dynEnd);
499 }
501 MOZ_ALWAYS_INLINE void
502 js::Nursery::markSlots(MinorCollectionTracer *trc, HeapSlot *vp, uint32_t nslots)
503 {
504 markSlots(trc, vp, vp + nslots);
505 }
507 MOZ_ALWAYS_INLINE void
508 js::Nursery::markSlots(MinorCollectionTracer *trc, HeapSlot *vp, HeapSlot *end)
509 {
510 for (; vp != end; ++vp)
511 markSlot(trc, vp);
512 }
514 MOZ_ALWAYS_INLINE void
515 js::Nursery::markSlot(MinorCollectionTracer *trc, HeapSlot *slotp)
516 {
517 if (!slotp->isObject())
518 return;
520 JSObject *obj = &slotp->toObject();
521 if (!isInside(obj))
522 return;
524 if (getForwardedPointer(&obj)) {
525 slotp->unsafeGet()->setObject(*obj);
526 return;
527 }
529 JSObject *tenured = static_cast<JSObject*>(moveToTenured(trc, obj));
530 slotp->unsafeGet()->setObject(*tenured);
531 }
533 void *
534 js::Nursery::moveToTenured(MinorCollectionTracer *trc, JSObject *src)
535 {
536 Zone *zone = src->zone();
537 AllocKind dstKind = GetObjectAllocKindForCopy(trc->runtime(), src);
538 JSObject *dst = static_cast<JSObject *>(allocateFromTenured(zone, dstKind));
539 if (!dst)
540 CrashAtUnhandlableOOM("Failed to allocate object while tenuring.");
542 trc->tenuredSize += moveObjectToTenured(dst, src, dstKind);
544 RelocationOverlay *overlay = reinterpret_cast<RelocationOverlay *>(src);
545 overlay->forwardTo(dst);
546 trc->insertIntoFixupList(overlay);
548 return static_cast<void *>(dst);
549 }
551 size_t
552 js::Nursery::moveObjectToTenured(JSObject *dst, JSObject *src, AllocKind dstKind)
553 {
554 size_t srcSize = Arena::thingSize(dstKind);
555 size_t tenuredSize = srcSize;
557 /*
558 * Arrays do not necessarily have the same AllocKind between src and dst.
559 * We deal with this by copying elements manually, possibly re-inlining
560 * them if there is adequate room inline in dst.
561 */
562 if (src->is<ArrayObject>())
563 srcSize = sizeof(ObjectImpl);
565 js_memcpy(dst, src, srcSize);
566 tenuredSize += moveSlotsToTenured(dst, src, dstKind);
567 tenuredSize += moveElementsToTenured(dst, src, dstKind);
569 if (src->is<TypedArrayObject>())
570 forwardTypedArrayPointers(dst, src);
572 /* The shape's list head may point into the old object. */
573 if (&src->shape_ == dst->shape_->listp)
574 dst->shape_->listp = &dst->shape_;
576 return tenuredSize;
577 }
579 void
580 js::Nursery::forwardTypedArrayPointers(JSObject *dst, JSObject *src)
581 {
582 /*
583 * Typed array data may be stored inline inside the object's fixed slots. If
584 * so, we need update the private pointer and leave a forwarding pointer at
585 * the start of the data.
586 */
587 TypedArrayObject &typedArray = src->as<TypedArrayObject>();
588 JS_ASSERT_IF(typedArray.buffer(), !isInside(src->getPrivate()));
589 if (typedArray.buffer())
590 return;
592 void *srcData = src->fixedData(TypedArrayObject::FIXED_DATA_START);
593 void *dstData = dst->fixedData(TypedArrayObject::FIXED_DATA_START);
594 JS_ASSERT(src->getPrivate() == srcData);
595 dst->setPrivate(dstData);
597 /*
598 * We don't know the number of slots here, but
599 * TypedArrayObject::AllocKindForLazyBuffer ensures that it's always at
600 * least one.
601 */
602 size_t nslots = 1;
603 setSlotsForwardingPointer(reinterpret_cast<HeapSlot*>(srcData),
604 reinterpret_cast<HeapSlot*>(dstData),
605 nslots);
606 }
608 size_t
609 js::Nursery::moveSlotsToTenured(JSObject *dst, JSObject *src, AllocKind dstKind)
610 {
611 /* Fixed slots have already been copied over. */
612 if (!src->hasDynamicSlots())
613 return 0;
615 if (!isInside(src->slots)) {
616 hugeSlots.remove(src->slots);
617 return 0;
618 }
620 Zone *zone = src->zone();
621 size_t count = src->numDynamicSlots();
622 dst->slots = zone->pod_malloc<HeapSlot>(count);
623 if (!dst->slots)
624 CrashAtUnhandlableOOM("Failed to allocate slots while tenuring.");
625 PodCopy(dst->slots, src->slots, count);
626 setSlotsForwardingPointer(src->slots, dst->slots, count);
627 return count * sizeof(HeapSlot);
628 }
630 size_t
631 js::Nursery::moveElementsToTenured(JSObject *dst, JSObject *src, AllocKind dstKind)
632 {
633 if (src->hasEmptyElements())
634 return 0;
636 Zone *zone = src->zone();
637 ObjectElements *srcHeader = src->getElementsHeader();
638 ObjectElements *dstHeader;
640 /* TODO Bug 874151: Prefer to put element data inline if we have space. */
641 if (!isInside(srcHeader)) {
642 JS_ASSERT(src->elements == dst->elements);
643 hugeSlots.remove(reinterpret_cast<HeapSlot*>(srcHeader));
644 return 0;
645 }
647 size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity;
649 /* Unlike other objects, Arrays can have fixed elements. */
650 if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
651 dst->setFixedElements();
652 dstHeader = dst->getElementsHeader();
653 js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
654 setElementsForwardingPointer(srcHeader, dstHeader, nslots);
655 return nslots * sizeof(HeapSlot);
656 }
658 JS_ASSERT(nslots >= 2);
659 size_t nbytes = nslots * sizeof(HeapValue);
660 dstHeader = static_cast<ObjectElements *>(zone->malloc_(nbytes));
661 if (!dstHeader)
662 CrashAtUnhandlableOOM("Failed to allocate elements while tenuring.");
663 js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
664 setElementsForwardingPointer(srcHeader, dstHeader, nslots);
665 dst->elements = dstHeader->elements();
666 return nslots * sizeof(HeapSlot);
667 }
669 static bool
670 ShouldMoveToTenured(MinorCollectionTracer *trc, void **thingp)
671 {
672 Cell *cell = static_cast<Cell *>(*thingp);
673 Nursery &nursery = *trc->nursery;
674 return !nursery.isInside(thingp) && nursery.isInside(cell) &&
675 !nursery.getForwardedPointer(thingp);
676 }
678 /* static */ void
679 js::Nursery::MinorGCCallback(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
680 {
681 MinorCollectionTracer *trc = static_cast<MinorCollectionTracer *>(jstrc);
682 if (ShouldMoveToTenured(trc, thingp))
683 *thingp = trc->nursery->moveToTenured(trc, static_cast<JSObject *>(*thingp));
684 }
686 static void
687 CheckHashTablesAfterMovingGC(JSRuntime *rt)
688 {
689 #ifdef JS_GC_ZEAL
690 if (rt->gcZeal() == ZealCheckHashTablesOnMinorGC) {
691 /* Check that internal hash tables no longer have any pointers into the nursery. */
692 for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
693 c->checkNewTypeObjectTableAfterMovingGC();
694 c->checkInitialShapesTableAfterMovingGC();
695 c->checkWrapperMapAfterMovingGC();
696 if (c->debugScopes)
697 c->debugScopes->checkHashTablesAfterMovingGC(rt);
698 }
699 }
700 #endif
701 }
703 #ifdef PROFILE_NURSERY
704 #define TIME_START(name) int64_t timstampStart_##name = PRMJ_Now()
705 #define TIME_END(name) int64_t timstampEnd_##name = PRMJ_Now()
706 #define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name)
707 #else
708 #define TIME_START(name)
709 #define TIME_END(name)
710 #define TIME_TOTAL(name)
711 #endif
713 void
714 js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList *pretenureTypes)
715 {
716 JS_AbortIfWrongThread(rt);
718 if (rt->mainThread.suppressGC)
719 return;
721 if (!isEnabled())
722 return;
724 if (isEmpty())
725 return;
727 rt->gcStats.count(gcstats::STAT_MINOR_GC);
729 TIME_START(total);
731 AutoStopVerifyingBarriers av(rt, false);
733 // Move objects pointed to by roots from the nursery to the major heap.
734 MinorCollectionTracer trc(rt, this);
736 // Mark the store buffer. This must happen first.
737 StoreBuffer &sb = rt->gcStoreBuffer;
738 TIME_START(markValues);
739 sb.markValues(&trc);
740 TIME_END(markValues);
742 TIME_START(markCells);
743 sb.markCells(&trc);
744 TIME_END(markCells);
746 TIME_START(markSlots);
747 sb.markSlots(&trc);
748 TIME_END(markSlots);
750 TIME_START(markWholeCells);
751 sb.markWholeCells(&trc);
752 TIME_END(markWholeCells);
754 TIME_START(markRelocatableValues);
755 sb.markRelocatableValues(&trc);
756 TIME_END(markRelocatableValues);
758 TIME_START(markRelocatableCells);
759 sb.markRelocatableCells(&trc);
760 TIME_END(markRelocatableCells);
762 TIME_START(markGenericEntries);
763 sb.markGenericEntries(&trc);
764 TIME_END(markGenericEntries);
766 TIME_START(checkHashTables);
767 CheckHashTablesAfterMovingGC(rt);
768 TIME_END(checkHashTables);
770 TIME_START(markRuntime);
771 MarkRuntime(&trc);
772 TIME_END(markRuntime);
774 TIME_START(markDebugger);
775 Debugger::markAll(&trc);
776 TIME_END(markDebugger);
778 TIME_START(clearNewObjectCache);
779 rt->newObjectCache.clearNurseryObjects(rt);
780 TIME_END(clearNewObjectCache);
782 // Most of the work is done here. This loop iterates over objects that have
783 // been moved to the major heap. If these objects have any outgoing pointers
784 // to the nursery, then those nursery objects get moved as well, until no
785 // objects are left to move. That is, we iterate to a fixed point.
786 TIME_START(collectToFP);
787 TenureCountCache tenureCounts;
788 collectToFixedPoint(&trc, tenureCounts);
789 TIME_END(collectToFP);
791 // Update the array buffer object's view lists.
792 TIME_START(sweepArrayBufferViewList);
793 for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
794 if (!c->gcLiveArrayBuffers.empty())
795 ArrayBufferObject::sweep(c);
796 }
797 TIME_END(sweepArrayBufferViewList);
799 // Update any slot or element pointers whose destination has been tenured.
800 TIME_START(updateJitActivations);
801 #ifdef JS_ION
802 js::jit::UpdateJitActivationsForMinorGC(rt, &trc);
803 #endif
804 TIME_END(updateJitActivations);
806 // Resize the nursery.
807 TIME_START(resize);
808 double promotionRate = trc.tenuredSize / double(allocationEnd() - start());
809 if (promotionRate > 0.05)
810 growAllocableSpace();
811 else if (promotionRate < 0.01)
812 shrinkAllocableSpace();
813 TIME_END(resize);
815 // If we are promoting the nursery, or exhausted the store buffer with
816 // pointers to nursery things, which will force a collection well before
817 // the nursery is full, look for object types that are getting promoted
818 // excessively and try to pretenure them.
819 TIME_START(pretenure);
820 if (pretenureTypes && (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER)) {
821 for (size_t i = 0; i < ArrayLength(tenureCounts.entries); i++) {
822 const TenureCount &entry = tenureCounts.entries[i];
823 if (entry.count >= 3000)
824 pretenureTypes->append(entry.type); // ignore alloc failure
825 }
826 }
827 TIME_END(pretenure);
829 // Sweep.
830 TIME_START(freeHugeSlots);
831 freeHugeSlots(rt);
832 TIME_END(freeHugeSlots);
834 TIME_START(sweep);
835 sweep(rt);
836 TIME_END(sweep);
838 TIME_START(clearStoreBuffer);
839 rt->gcStoreBuffer.clear();
840 TIME_END(clearStoreBuffer);
842 // We ignore gcMaxBytes when allocating for minor collection. However, if we
843 // overflowed, we disable the nursery. The next time we allocate, we'll fail
844 // because gcBytes >= gcMaxBytes.
845 if (rt->gcBytes >= rt->gcMaxBytes)
846 disable();
848 TIME_END(total);
850 #ifdef PROFILE_NURSERY
851 int64_t totalTime = TIME_TOTAL(total);
853 if (totalTime >= GCReportThreshold) {
854 static bool printedHeader = false;
855 if (!printedHeader) {
856 fprintf(stderr,
857 "MinorGC: Reason PRate Size Time mkVals mkClls mkSlts mkWCll mkRVal mkRCll mkGnrc ckTbls mkRntm mkDbgr clrNOC collct swpABO updtIn resize pretnr frSlts clrSB sweep\n");
858 printedHeader = true;
859 }
861 #define FMT " %6" PRIu64
862 fprintf(stderr,
863 "MinorGC: %20s %5.1f%% %4d" FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT "\n",
864 js::gcstats::ExplainReason(reason),
865 promotionRate * 100,
866 numActiveChunks_,
867 totalTime,
868 TIME_TOTAL(markValues),
869 TIME_TOTAL(markCells),
870 TIME_TOTAL(markSlots),
871 TIME_TOTAL(markWholeCells),
872 TIME_TOTAL(markRelocatableValues),
873 TIME_TOTAL(markRelocatableCells),
874 TIME_TOTAL(markGenericEntries),
875 TIME_TOTAL(checkHashTables),
876 TIME_TOTAL(markRuntime),
877 TIME_TOTAL(markDebugger),
878 TIME_TOTAL(clearNewObjectCache),
879 TIME_TOTAL(collectToFP),
880 TIME_TOTAL(sweepArrayBufferViewList),
881 TIME_TOTAL(updateJitActivations),
882 TIME_TOTAL(resize),
883 TIME_TOTAL(pretenure),
884 TIME_TOTAL(freeHugeSlots),
885 TIME_TOTAL(clearStoreBuffer),
886 TIME_TOTAL(sweep));
887 #undef FMT
888 }
889 #endif
890 }
892 void
893 js::Nursery::freeHugeSlots(JSRuntime *rt)
894 {
895 for (HugeSlotsSet::Range r = hugeSlots.all(); !r.empty(); r.popFront())
896 rt->defaultFreeOp()->free_(r.front());
897 hugeSlots.clear();
898 }
900 void
901 js::Nursery::sweep(JSRuntime *rt)
902 {
903 #ifdef JS_GC_ZEAL
904 /* Poison the nursery contents so touching a freed object will crash. */
905 JS_POISON((void *)start(), JS_SWEPT_NURSERY_PATTERN, NurserySize);
906 for (int i = 0; i < NumNurseryChunks; ++i)
907 initChunk(i);
909 if (rt->gcZeal_ == ZealGenerationalGCValue) {
910 MOZ_ASSERT(numActiveChunks_ == NumNurseryChunks);
912 /* Only reset the alloc point when we are close to the end. */
913 if (currentChunk_ + 1 == NumNurseryChunks)
914 setCurrentChunk(0);
915 } else
916 #endif
917 {
918 #ifdef JS_CRASH_DIAGNOSTICS
919 JS_POISON((void *)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
920 for (int i = 0; i < numActiveChunks_; ++i)
921 chunk(i).trailer.runtime = runtime();
922 #endif
923 setCurrentChunk(0);
924 }
926 /* Set current start position for isEmpty checks. */
927 currentStart_ = position();
928 }
930 void
931 js::Nursery::growAllocableSpace()
932 {
933 #ifdef JS_GC_ZEAL
934 MOZ_ASSERT_IF(runtime()->gcZeal_ == ZealGenerationalGCValue, numActiveChunks_ == NumNurseryChunks);
935 #endif
936 numActiveChunks_ = Min(numActiveChunks_ * 2, NumNurseryChunks);
937 }
939 void
940 js::Nursery::shrinkAllocableSpace()
941 {
942 #ifdef JS_GC_ZEAL
943 if (runtime()->gcZeal_ == ZealGenerationalGCValue)
944 return;
945 #endif
946 numActiveChunks_ = Max(numActiveChunks_ - 1, 1);
947 updateDecommittedRegion();
948 }
950 #endif /* JSGC_GENERATIONAL */