michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef gc_Heap_h michael@0: #define gc_Heap_h michael@0: michael@0: #include "mozilla/Attributes.h" michael@0: #include "mozilla/PodOperations.h" michael@0: michael@0: #include michael@0: #include michael@0: michael@0: #include "jspubtd.h" michael@0: #include "jstypes.h" michael@0: #include "jsutil.h" michael@0: michael@0: #include "ds/BitArray.h" michael@0: #include "gc/Memory.h" michael@0: #include "js/HeapAPI.h" michael@0: michael@0: struct JSCompartment; michael@0: michael@0: struct JSRuntime; michael@0: michael@0: namespace JS { michael@0: namespace shadow { michael@0: class Runtime; michael@0: } michael@0: } michael@0: michael@0: namespace js { michael@0: michael@0: class FreeOp; michael@0: michael@0: namespace gc { michael@0: michael@0: struct Arena; michael@0: struct ArenaList; michael@0: struct ArenaHeader; michael@0: struct Chunk; michael@0: michael@0: /* michael@0: * This flag allows an allocation site to request a specific heap based upon the michael@0: * estimated lifetime or lifetime requirements of objects allocated from that michael@0: * site. michael@0: */ michael@0: enum InitialHeap { michael@0: DefaultHeap, michael@0: TenuredHeap michael@0: }; michael@0: michael@0: /* The GC allocation kinds. */ michael@0: enum AllocKind { michael@0: FINALIZE_OBJECT0, michael@0: FINALIZE_OBJECT0_BACKGROUND, michael@0: FINALIZE_OBJECT2, michael@0: FINALIZE_OBJECT2_BACKGROUND, michael@0: FINALIZE_OBJECT4, michael@0: FINALIZE_OBJECT4_BACKGROUND, michael@0: FINALIZE_OBJECT8, michael@0: FINALIZE_OBJECT8_BACKGROUND, michael@0: FINALIZE_OBJECT12, michael@0: FINALIZE_OBJECT12_BACKGROUND, michael@0: FINALIZE_OBJECT16, michael@0: FINALIZE_OBJECT16_BACKGROUND, michael@0: FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND, michael@0: FINALIZE_SCRIPT, michael@0: FINALIZE_LAZY_SCRIPT, michael@0: FINALIZE_SHAPE, michael@0: FINALIZE_BASE_SHAPE, michael@0: FINALIZE_TYPE_OBJECT, michael@0: FINALIZE_FAT_INLINE_STRING, michael@0: FINALIZE_STRING, michael@0: FINALIZE_EXTERNAL_STRING, michael@0: FINALIZE_JITCODE, michael@0: FINALIZE_LAST = FINALIZE_JITCODE michael@0: }; michael@0: michael@0: static const unsigned FINALIZE_LIMIT = FINALIZE_LAST + 1; michael@0: static const unsigned FINALIZE_OBJECT_LIMIT = FINALIZE_OBJECT_LAST + 1; michael@0: michael@0: /* michael@0: * This must be an upper bound, but we do not need the least upper bound, so michael@0: * we just exclude non-background objects. michael@0: */ michael@0: static const size_t MAX_BACKGROUND_FINALIZE_KINDS = FINALIZE_LIMIT - FINALIZE_OBJECT_LIMIT / 2; michael@0: michael@0: /* michael@0: * A GC cell is the base class for all GC things. michael@0: */ michael@0: struct Cell michael@0: { michael@0: public: michael@0: inline ArenaHeader *arenaHeader() const; michael@0: inline AllocKind tenuredGetAllocKind() const; michael@0: MOZ_ALWAYS_INLINE bool isMarked(uint32_t color = BLACK) const; michael@0: MOZ_ALWAYS_INLINE bool markIfUnmarked(uint32_t color = BLACK) const; michael@0: MOZ_ALWAYS_INLINE void unmark(uint32_t color) const; michael@0: michael@0: inline JSRuntime *runtimeFromMainThread() const; michael@0: inline JS::shadow::Runtime *shadowRuntimeFromMainThread() const; michael@0: inline JS::Zone *tenuredZone() const; michael@0: inline JS::Zone *tenuredZoneFromAnyThread() const; michael@0: inline bool tenuredIsInsideZone(JS::Zone *zone) const; michael@0: michael@0: // Note: Unrestricted access to the runtime of a GC thing from an arbitrary michael@0: // thread can easily lead to races. Use this method very carefully. michael@0: inline JSRuntime *runtimeFromAnyThread() const; michael@0: inline JS::shadow::Runtime *shadowRuntimeFromAnyThread() const; michael@0: michael@0: #ifdef DEBUG michael@0: inline bool isAligned() const; michael@0: inline bool isTenured() const; michael@0: #endif michael@0: michael@0: protected: michael@0: inline uintptr_t address() const; michael@0: inline Chunk *chunk() const; michael@0: }; michael@0: michael@0: /* michael@0: * The mark bitmap has one bit per each GC cell. For multi-cell GC things this michael@0: * wastes space but allows to avoid expensive devisions by thing's size when michael@0: * accessing the bitmap. In addition this allows to use some bits for colored michael@0: * marking during the cycle GC. michael@0: */ michael@0: const size_t ArenaCellCount = size_t(1) << (ArenaShift - CellShift); michael@0: const size_t ArenaBitmapBits = ArenaCellCount; michael@0: const size_t ArenaBitmapBytes = ArenaBitmapBits / 8; michael@0: const size_t ArenaBitmapWords = ArenaBitmapBits / JS_BITS_PER_WORD; michael@0: michael@0: /* michael@0: * A FreeSpan represents a contiguous sequence of free cells in an Arena. michael@0: * |first| is the address of the first free cell in the span. |last| is the michael@0: * address of the last free cell in the span. This last cell holds a FreeSpan michael@0: * data structure for the next span unless this is the last span on the list michael@0: * of spans in the arena. For this last span |last| points to the last byte of michael@0: * the last thing in the arena and no linkage is stored there, so michael@0: * |last| == arenaStart + ArenaSize - 1. If the space at the arena end is michael@0: * fully used this last span is empty and |first| == |last + 1|. michael@0: * michael@0: * Thus |first| < |last| implies that we have either the last span with at least michael@0: * one element or that the span is not the last and contains at least 2 michael@0: * elements. In both cases to allocate a thing from this span we need simply michael@0: * to increment |first| by the allocation size. michael@0: * michael@0: * |first| == |last| implies that we have a one element span that records the michael@0: * next span. So to allocate from it we need to update the span list head michael@0: * with a copy of the span stored at |last| address so the following michael@0: * allocations will use that span. michael@0: * michael@0: * |first| > |last| implies that we have an empty last span and the arena is michael@0: * fully used. michael@0: * michael@0: * Also only for the last span (|last| & 1)! = 0 as all allocation sizes are michael@0: * multiples of CellSize. michael@0: */ michael@0: struct FreeSpan michael@0: { michael@0: uintptr_t first; michael@0: uintptr_t last; michael@0: michael@0: public: michael@0: FreeSpan() {} michael@0: michael@0: FreeSpan(uintptr_t first, uintptr_t last) michael@0: : first(first), last(last) { michael@0: checkSpan(); michael@0: } michael@0: michael@0: /* michael@0: * To minimize the size of the arena header the first span is encoded michael@0: * there as offsets from the arena start. michael@0: */ michael@0: static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) { michael@0: static_assert(ArenaShift < 16, "Check that we can pack offsets into uint16_t."); michael@0: JS_ASSERT(firstOffset <= ArenaSize); michael@0: JS_ASSERT(lastOffset < ArenaSize); michael@0: JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1))); michael@0: return firstOffset | (lastOffset << 16); michael@0: } michael@0: michael@0: /* michael@0: * Encoded offsets for a full arena when its first span is the last one michael@0: * and empty. michael@0: */ michael@0: static const size_t FullArenaOffsets = ArenaSize | ((ArenaSize - 1) << 16); michael@0: michael@0: static FreeSpan decodeOffsets(uintptr_t arenaAddr, size_t offsets) { michael@0: JS_ASSERT(!(arenaAddr & ArenaMask)); michael@0: michael@0: size_t firstOffset = offsets & 0xFFFF; michael@0: size_t lastOffset = offsets >> 16; michael@0: JS_ASSERT(firstOffset <= ArenaSize); michael@0: JS_ASSERT(lastOffset < ArenaSize); michael@0: michael@0: /* michael@0: * We must not use | when calculating first as firstOffset is michael@0: * ArenaMask + 1 for the empty span. michael@0: */ michael@0: return FreeSpan(arenaAddr + firstOffset, arenaAddr | lastOffset); michael@0: } michael@0: michael@0: void initAsEmpty(uintptr_t arenaAddr = 0) { michael@0: JS_ASSERT(!(arenaAddr & ArenaMask)); michael@0: first = arenaAddr + ArenaSize; michael@0: last = arenaAddr | (ArenaSize - 1); michael@0: JS_ASSERT(isEmpty()); michael@0: } michael@0: michael@0: bool isEmpty() const { michael@0: checkSpan(); michael@0: return first > last; michael@0: } michael@0: michael@0: bool hasNext() const { michael@0: checkSpan(); michael@0: return !(last & uintptr_t(1)); michael@0: } michael@0: michael@0: const FreeSpan *nextSpan() const { michael@0: JS_ASSERT(hasNext()); michael@0: return reinterpret_cast(last); michael@0: } michael@0: michael@0: FreeSpan *nextSpanUnchecked(size_t thingSize) const { michael@0: #ifdef DEBUG michael@0: uintptr_t lastOffset = last & ArenaMask; michael@0: JS_ASSERT(!(lastOffset & 1)); michael@0: JS_ASSERT((ArenaSize - lastOffset) % thingSize == 0); michael@0: #endif michael@0: return reinterpret_cast(last); michael@0: } michael@0: michael@0: uintptr_t arenaAddressUnchecked() const { michael@0: return last & ~ArenaMask; michael@0: } michael@0: michael@0: uintptr_t arenaAddress() const { michael@0: checkSpan(); michael@0: return arenaAddressUnchecked(); michael@0: } michael@0: michael@0: ArenaHeader *arenaHeader() const { michael@0: return reinterpret_cast(arenaAddress()); michael@0: } michael@0: michael@0: bool isSameNonEmptySpan(const FreeSpan *another) const { michael@0: JS_ASSERT(!isEmpty()); michael@0: JS_ASSERT(!another->isEmpty()); michael@0: return first == another->first && last == another->last; michael@0: } michael@0: michael@0: bool isWithinArena(uintptr_t arenaAddr) const { michael@0: JS_ASSERT(!(arenaAddr & ArenaMask)); michael@0: michael@0: /* Return true for the last empty span as well. */ michael@0: return arenaAddress() == arenaAddr; michael@0: } michael@0: michael@0: size_t encodeAsOffsets() const { michael@0: /* michael@0: * We must use first - arenaAddress(), not first & ArenaMask as michael@0: * first == ArenaMask + 1 for an empty span. michael@0: */ michael@0: uintptr_t arenaAddr = arenaAddress(); michael@0: return encodeOffsets(first - arenaAddr, last & ArenaMask); michael@0: } michael@0: michael@0: /* See comments before FreeSpan for details. */ michael@0: MOZ_ALWAYS_INLINE void *allocate(size_t thingSize) { michael@0: JS_ASSERT(thingSize % CellSize == 0); michael@0: checkSpan(); michael@0: uintptr_t thing = first; michael@0: if (thing < last) { michael@0: /* Bump-allocate from the current span. */ michael@0: first = thing + thingSize; michael@0: } else if (MOZ_LIKELY(thing == last)) { michael@0: /* michael@0: * Move to the next span. We use MOZ_LIKELY as without PGO michael@0: * compilers mis-predict == here as unlikely to succeed. michael@0: */ michael@0: *this = *reinterpret_cast(thing); michael@0: } else { michael@0: return nullptr; michael@0: } michael@0: checkSpan(); michael@0: JS_EXTRA_POISON(reinterpret_cast(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize); michael@0: return reinterpret_cast(thing); michael@0: } michael@0: michael@0: /* A version of allocate when we know that the span is not empty. */ michael@0: MOZ_ALWAYS_INLINE void *infallibleAllocate(size_t thingSize) { michael@0: JS_ASSERT(thingSize % CellSize == 0); michael@0: checkSpan(); michael@0: uintptr_t thing = first; michael@0: if (thing < last) { michael@0: first = thing + thingSize; michael@0: } else { michael@0: JS_ASSERT(thing == last); michael@0: *this = *reinterpret_cast(thing); michael@0: } michael@0: checkSpan(); michael@0: JS_EXTRA_POISON(reinterpret_cast(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize); michael@0: return reinterpret_cast(thing); michael@0: } michael@0: michael@0: /* michael@0: * Allocate from a newly allocated arena. We do not move the free list michael@0: * from the arena. Rather we set the arena up as fully used during the michael@0: * initialization so to allocate we simply return the first thing in the michael@0: * arena and set the free list to point to the second. michael@0: */ michael@0: MOZ_ALWAYS_INLINE void *allocateFromNewArena(uintptr_t arenaAddr, size_t firstThingOffset, michael@0: size_t thingSize) { michael@0: JS_ASSERT(!(arenaAddr & ArenaMask)); michael@0: uintptr_t thing = arenaAddr | firstThingOffset; michael@0: first = thing + thingSize; michael@0: last = arenaAddr | ArenaMask; michael@0: checkSpan(); michael@0: JS_EXTRA_POISON(reinterpret_cast(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize); michael@0: return reinterpret_cast(thing); michael@0: } michael@0: michael@0: void checkSpan() const { michael@0: #ifdef DEBUG michael@0: /* We do not allow spans at the end of the address space. */ michael@0: JS_ASSERT(last != uintptr_t(-1)); michael@0: JS_ASSERT(first); michael@0: JS_ASSERT(last); michael@0: JS_ASSERT(first - 1 <= last); michael@0: uintptr_t arenaAddr = arenaAddressUnchecked(); michael@0: if (last & 1) { michael@0: /* The span is the last. */ michael@0: JS_ASSERT((last & ArenaMask) == ArenaMask); michael@0: michael@0: if (first - 1 == last) { michael@0: /* The span is last and empty. The above start != 0 check michael@0: * implies that we are not at the end of the address space. michael@0: */ michael@0: return; michael@0: } michael@0: size_t spanLength = last - first + 1; michael@0: JS_ASSERT(spanLength % CellSize == 0); michael@0: michael@0: /* Start and end must belong to the same arena. */ michael@0: JS_ASSERT((first & ~ArenaMask) == arenaAddr); michael@0: return; michael@0: } michael@0: michael@0: /* The span is not the last and we have more spans to follow. */ michael@0: JS_ASSERT(first <= last); michael@0: size_t spanLengthWithoutOneThing = last - first; michael@0: JS_ASSERT(spanLengthWithoutOneThing % CellSize == 0); michael@0: michael@0: JS_ASSERT((first & ~ArenaMask) == arenaAddr); michael@0: michael@0: /* michael@0: * If there is not enough space before the arena end to allocate one michael@0: * more thing, then the span must be marked as the last one to avoid michael@0: * storing useless empty span reference. michael@0: */ michael@0: size_t beforeTail = ArenaSize - (last & ArenaMask); michael@0: JS_ASSERT(beforeTail >= sizeof(FreeSpan) + CellSize); michael@0: michael@0: FreeSpan *next = reinterpret_cast(last); michael@0: michael@0: /* michael@0: * The GC things on the list of free spans come from one arena michael@0: * and the spans are linked in ascending address order with michael@0: * at least one non-free thing between spans. michael@0: */ michael@0: JS_ASSERT(last < next->first); michael@0: JS_ASSERT(arenaAddr == next->arenaAddressUnchecked()); michael@0: michael@0: if (next->first > next->last) { michael@0: /* michael@0: * The next span is the empty span that terminates the list for michael@0: * arenas that do not have any free things at the end. michael@0: */ michael@0: JS_ASSERT(next->first - 1 == next->last); michael@0: JS_ASSERT(arenaAddr + ArenaSize == next->first); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: }; michael@0: michael@0: /* Every arena has a header. */ michael@0: struct ArenaHeader : public JS::shadow::ArenaHeader michael@0: { michael@0: friend struct FreeLists; michael@0: michael@0: /* michael@0: * ArenaHeader::next has two purposes: when unallocated, it points to the michael@0: * next available Arena's header. When allocated, it points to the next michael@0: * arena of the same size class and compartment. michael@0: */ michael@0: ArenaHeader *next; michael@0: michael@0: private: michael@0: /* michael@0: * The first span of free things in the arena. We encode it as the start michael@0: * and end offsets within the arena, not as FreeSpan structure, to michael@0: * minimize the header size. michael@0: */ michael@0: size_t firstFreeSpanOffsets; michael@0: michael@0: /* michael@0: * One of AllocKind constants or FINALIZE_LIMIT when the arena does not michael@0: * contain any GC things and is on the list of empty arenas in the GC michael@0: * chunk. The latter allows to quickly check if the arena is allocated michael@0: * during the conservative GC scanning without searching the arena in the michael@0: * list. michael@0: * michael@0: * We use 8 bits for the allocKind so the compiler can use byte-level memory michael@0: * instructions to access it. michael@0: */ michael@0: size_t allocKind : 8; michael@0: michael@0: /* michael@0: * When collecting we sometimes need to keep an auxillary list of arenas, michael@0: * for which we use the following fields. This happens for several reasons: michael@0: * michael@0: * When recursive marking uses too much stack the marking is delayed and the michael@0: * corresponding arenas are put into a stack. To distinguish the bottom of michael@0: * the stack from the arenas not present in the stack we use the michael@0: * markOverflow flag to tag arenas on the stack. michael@0: * michael@0: * Delayed marking is also used for arenas that we allocate into during an michael@0: * incremental GC. In this case, we intend to mark all the objects in the michael@0: * arena, and it's faster to do this marking in bulk. michael@0: * michael@0: * When sweeping we keep track of which arenas have been allocated since the michael@0: * end of the mark phase. This allows us to tell whether a pointer to an michael@0: * unmarked object is yet to be finalized or has already been reallocated. michael@0: * We set the allocatedDuringIncremental flag for this and clear it at the michael@0: * end of the sweep phase. michael@0: * michael@0: * To minimize the ArenaHeader size we record the next linkage as michael@0: * arenaAddress() >> ArenaShift and pack it with the allocKind field and the michael@0: * flags. michael@0: */ michael@0: public: michael@0: size_t hasDelayedMarking : 1; michael@0: size_t allocatedDuringIncremental : 1; michael@0: size_t markOverflow : 1; michael@0: size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1; michael@0: static_assert(ArenaShift >= 8 + 1 + 1 + 1, michael@0: "ArenaHeader::auxNextLink packing assumes that ArenaShift has enough bits to " michael@0: "cover allocKind and hasDelayedMarking."); michael@0: michael@0: inline uintptr_t address() const; michael@0: inline Chunk *chunk() const; michael@0: michael@0: bool allocated() const { michael@0: JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT)); michael@0: return allocKind < size_t(FINALIZE_LIMIT); michael@0: } michael@0: michael@0: void init(JS::Zone *zoneArg, AllocKind kind) { michael@0: JS_ASSERT(!allocated()); michael@0: JS_ASSERT(!markOverflow); michael@0: JS_ASSERT(!allocatedDuringIncremental); michael@0: JS_ASSERT(!hasDelayedMarking); michael@0: zone = zoneArg; michael@0: michael@0: static_assert(FINALIZE_LIMIT <= 255, "We must be able to fit the allockind into uint8_t."); michael@0: allocKind = size_t(kind); michael@0: michael@0: /* See comments in FreeSpan::allocateFromNewArena. */ michael@0: firstFreeSpanOffsets = FreeSpan::FullArenaOffsets; michael@0: } michael@0: michael@0: void setAsNotAllocated() { michael@0: allocKind = size_t(FINALIZE_LIMIT); michael@0: markOverflow = 0; michael@0: allocatedDuringIncremental = 0; michael@0: hasDelayedMarking = 0; michael@0: auxNextLink = 0; michael@0: } michael@0: michael@0: inline uintptr_t arenaAddress() const; michael@0: inline Arena *getArena(); michael@0: michael@0: AllocKind getAllocKind() const { michael@0: JS_ASSERT(allocated()); michael@0: return AllocKind(allocKind); michael@0: } michael@0: michael@0: inline size_t getThingSize() const; michael@0: michael@0: bool hasFreeThings() const { michael@0: return firstFreeSpanOffsets != FreeSpan::FullArenaOffsets; michael@0: } michael@0: michael@0: inline bool isEmpty() const; michael@0: michael@0: void setAsFullyUsed() { michael@0: firstFreeSpanOffsets = FreeSpan::FullArenaOffsets; michael@0: } michael@0: michael@0: inline FreeSpan getFirstFreeSpan() const; michael@0: inline void setFirstFreeSpan(const FreeSpan *span); michael@0: michael@0: #ifdef DEBUG michael@0: void checkSynchronizedWithFreeList() const; michael@0: #endif michael@0: michael@0: inline ArenaHeader *getNextDelayedMarking() const; michael@0: inline void setNextDelayedMarking(ArenaHeader *aheader); michael@0: inline void unsetDelayedMarking(); michael@0: michael@0: inline ArenaHeader *getNextAllocDuringSweep() const; michael@0: inline void setNextAllocDuringSweep(ArenaHeader *aheader); michael@0: inline void unsetAllocDuringSweep(); michael@0: }; michael@0: michael@0: struct Arena michael@0: { michael@0: /* michael@0: * Layout of an arena: michael@0: * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader michael@0: * descriptor followed by some pad bytes. The remainder of the arena is michael@0: * filled with the array of T things. The pad bytes ensure that the thing michael@0: * array ends exactly at the end of the arena. michael@0: * michael@0: * +-------------+-----+----+----+-----+----+ michael@0: * | ArenaHeader | pad | T0 | T1 | ... | Tn | michael@0: * +-------------+-----+----+----+-----+----+ michael@0: * michael@0: * <----------------------------------------> = ArenaSize bytes michael@0: * <-------------------> = first thing offset michael@0: */ michael@0: ArenaHeader aheader; michael@0: uint8_t data[ArenaSize - sizeof(ArenaHeader)]; michael@0: michael@0: private: michael@0: static JS_FRIEND_DATA(const uint32_t) ThingSizes[]; michael@0: static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[]; michael@0: michael@0: public: michael@0: static void staticAsserts(); michael@0: michael@0: static size_t thingSize(AllocKind kind) { michael@0: return ThingSizes[kind]; michael@0: } michael@0: michael@0: static size_t firstThingOffset(AllocKind kind) { michael@0: return FirstThingOffsets[kind]; michael@0: } michael@0: michael@0: static size_t thingsPerArena(size_t thingSize) { michael@0: JS_ASSERT(thingSize % CellSize == 0); michael@0: michael@0: /* We should be able to fit FreeSpan in any GC thing. */ michael@0: JS_ASSERT(thingSize >= sizeof(FreeSpan)); michael@0: michael@0: return (ArenaSize - sizeof(ArenaHeader)) / thingSize; michael@0: } michael@0: michael@0: static size_t thingsSpan(size_t thingSize) { michael@0: return thingsPerArena(thingSize) * thingSize; michael@0: } michael@0: michael@0: static bool isAligned(uintptr_t thing, size_t thingSize) { michael@0: /* Things ends at the arena end. */ michael@0: uintptr_t tailOffset = (ArenaSize - thing) & ArenaMask; michael@0: return tailOffset % thingSize == 0; michael@0: } michael@0: michael@0: uintptr_t address() const { michael@0: return aheader.address(); michael@0: } michael@0: michael@0: uintptr_t thingsStart(AllocKind thingKind) { michael@0: return address() | firstThingOffset(thingKind); michael@0: } michael@0: michael@0: uintptr_t thingsEnd() { michael@0: return address() + ArenaSize; michael@0: } michael@0: michael@0: void setAsFullyUnused(AllocKind thingKind); michael@0: michael@0: template michael@0: bool finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize); michael@0: }; michael@0: michael@0: static_assert(sizeof(Arena) == ArenaSize, "The hardcoded arena size must match the struct size."); michael@0: michael@0: inline size_t michael@0: ArenaHeader::getThingSize() const michael@0: { michael@0: JS_ASSERT(allocated()); michael@0: return Arena::thingSize(getAllocKind()); michael@0: } michael@0: michael@0: /* michael@0: * The tail of the chunk info is shared between all chunks in the system, both michael@0: * nursery and tenured. This structure is locatable from any GC pointer by michael@0: * aligning to 1MiB. michael@0: */ michael@0: struct ChunkTrailer michael@0: { michael@0: /* The index the chunk in the nursery, or LocationTenuredHeap. */ michael@0: uint32_t location; michael@0: michael@0: #if JS_BITS_PER_WORD == 64 michael@0: uint32_t padding; michael@0: #endif michael@0: michael@0: JSRuntime *runtime; michael@0: }; michael@0: michael@0: static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t), "ChunkTrailer size is incorrect."); michael@0: michael@0: /* The chunk header (located at the end of the chunk to preserve arena alignment). */ michael@0: struct ChunkInfo michael@0: { michael@0: Chunk *next; michael@0: Chunk **prevp; michael@0: michael@0: /* Free arenas are linked together with aheader.next. */ michael@0: ArenaHeader *freeArenasHead; michael@0: michael@0: #if JS_BITS_PER_WORD == 32 michael@0: /* michael@0: * Calculating sizes and offsets is simpler if sizeof(ChunkInfo) is michael@0: * architecture-independent. michael@0: */ michael@0: char padding[20]; michael@0: #endif michael@0: michael@0: /* michael@0: * Decommitted arenas are tracked by a bitmap in the chunk header. We use michael@0: * this offset to start our search iteration close to a decommitted arena michael@0: * that we can allocate. michael@0: */ michael@0: uint32_t lastDecommittedArenaOffset; michael@0: michael@0: /* Number of free arenas, either committed or decommitted. */ michael@0: uint32_t numArenasFree; michael@0: michael@0: /* Number of free, committed arenas. */ michael@0: uint32_t numArenasFreeCommitted; michael@0: michael@0: /* Number of GC cycles this chunk has survived. */ michael@0: uint32_t age; michael@0: michael@0: /* Information shared by all Chunk types. */ michael@0: ChunkTrailer trailer; michael@0: }; michael@0: michael@0: /* michael@0: * Calculating ArenasPerChunk: michael@0: * michael@0: * In order to figure out how many Arenas will fit in a chunk, we need to know michael@0: * how much extra space is available after we allocate the header data. This michael@0: * is a problem because the header size depends on the number of arenas in the michael@0: * chunk. The two dependent fields are bitmap and decommittedArenas. michael@0: * michael@0: * For the mark bitmap, we know that each arena will use a fixed number of full michael@0: * bytes: ArenaBitmapBytes. The full size of the header data is this number michael@0: * multiplied by the eventual number of arenas we have in the header. We, michael@0: * conceptually, distribute this header data among the individual arenas and do michael@0: * not include it in the header. This way we do not have to worry about its michael@0: * variable size: it gets attached to the variable number we are computing. michael@0: * michael@0: * For the decommitted arena bitmap, we only have 1 bit per arena, so this michael@0: * technique will not work. Instead, we observe that we do not have enough michael@0: * header info to fill 8 full arenas: it is currently 4 on 64bit, less on michael@0: * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas. michael@0: * This will not become 63 bytes unless we double the data required in the michael@0: * header. Therefore, we just compute the number of bytes required to track michael@0: * every possible arena and do not worry about slop bits, since there are too michael@0: * few to usefully allocate. michael@0: * michael@0: * To actually compute the number of arenas we can allocate in a chunk, we michael@0: * divide the amount of available space less the header info (not including michael@0: * the mark bitmap which is distributed into the arena size) by the size of michael@0: * the arena (with the mark bitmap bytes it uses). michael@0: */ michael@0: const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes; michael@0: const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE; michael@0: const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes; michael@0: const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader; michael@0: static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density."); michael@0: michael@0: /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */ michael@0: struct ChunkBitmap michael@0: { michael@0: volatile uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk]; michael@0: michael@0: public: michael@0: ChunkBitmap() { } michael@0: michael@0: MOZ_ALWAYS_INLINE void getMarkWordAndMask(const Cell *cell, uint32_t color, michael@0: uintptr_t **wordp, uintptr_t *maskp) michael@0: { michael@0: GetGCThingMarkWordAndMask(cell, color, wordp, maskp); michael@0: } michael@0: michael@0: MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarked(const Cell *cell, uint32_t color) { michael@0: uintptr_t *word, mask; michael@0: getMarkWordAndMask(cell, color, &word, &mask); michael@0: return *word & mask; michael@0: } michael@0: michael@0: MOZ_ALWAYS_INLINE bool markIfUnmarked(const Cell *cell, uint32_t color) { michael@0: uintptr_t *word, mask; michael@0: getMarkWordAndMask(cell, BLACK, &word, &mask); michael@0: if (*word & mask) michael@0: return false; michael@0: *word |= mask; michael@0: if (color != BLACK) { michael@0: /* michael@0: * We use getMarkWordAndMask to recalculate both mask and word as michael@0: * doing just mask << color may overflow the mask. michael@0: */ michael@0: getMarkWordAndMask(cell, color, &word, &mask); michael@0: if (*word & mask) michael@0: return false; michael@0: *word |= mask; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: MOZ_ALWAYS_INLINE void unmark(const Cell *cell, uint32_t color) { michael@0: uintptr_t *word, mask; michael@0: getMarkWordAndMask(cell, color, &word, &mask); michael@0: *word &= ~mask; michael@0: } michael@0: michael@0: void clear() { michael@0: memset((void *)bitmap, 0, sizeof(bitmap)); michael@0: } michael@0: michael@0: uintptr_t *arenaBits(ArenaHeader *aheader) { michael@0: static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD, michael@0: "We assume that the part of the bitmap corresponding to the arena " michael@0: "has the exact number of words so we do not need to deal with a word " michael@0: "that covers bits from two arenas."); michael@0: michael@0: uintptr_t *word, unused; michael@0: getMarkWordAndMask(reinterpret_cast(aheader->address()), BLACK, &word, &unused); michael@0: return word; michael@0: } michael@0: }; michael@0: michael@0: static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap), michael@0: "Ensure our ChunkBitmap actually covers all arenas."); michael@0: static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk, michael@0: "Ensure that the mark bitmap has the right number of bits."); michael@0: michael@0: typedef BitArray PerArenaBitmap; michael@0: michael@0: const size_t ChunkPadSize = ChunkSize michael@0: - (sizeof(Arena) * ArenasPerChunk) michael@0: - sizeof(ChunkBitmap) michael@0: - sizeof(PerArenaBitmap) michael@0: - sizeof(ChunkInfo); michael@0: static_assert(ChunkPadSize < BytesPerArenaWithHeader, michael@0: "If the chunk padding is larger than an arena, we should have one more arena."); michael@0: michael@0: /* michael@0: * Chunks contain arenas and associated data structures (mark bitmap, delayed michael@0: * marking state). michael@0: */ michael@0: struct Chunk michael@0: { michael@0: Arena arenas[ArenasPerChunk]; michael@0: michael@0: /* Pad to full size to ensure cache alignment of ChunkInfo. */ michael@0: uint8_t padding[ChunkPadSize]; michael@0: michael@0: ChunkBitmap bitmap; michael@0: PerArenaBitmap decommittedArenas; michael@0: ChunkInfo info; michael@0: michael@0: static Chunk *fromAddress(uintptr_t addr) { michael@0: addr &= ~ChunkMask; michael@0: return reinterpret_cast(addr); michael@0: } michael@0: michael@0: static bool withinArenasRange(uintptr_t addr) { michael@0: uintptr_t offset = addr & ChunkMask; michael@0: return offset < ArenasPerChunk * ArenaSize; michael@0: } michael@0: michael@0: static size_t arenaIndex(uintptr_t addr) { michael@0: JS_ASSERT(withinArenasRange(addr)); michael@0: return (addr & ChunkMask) >> ArenaShift; michael@0: } michael@0: michael@0: uintptr_t address() const { michael@0: uintptr_t addr = reinterpret_cast(this); michael@0: JS_ASSERT(!(addr & ChunkMask)); michael@0: return addr; michael@0: } michael@0: michael@0: bool unused() const { michael@0: return info.numArenasFree == ArenasPerChunk; michael@0: } michael@0: michael@0: bool hasAvailableArenas() const { michael@0: return info.numArenasFree != 0; michael@0: } michael@0: michael@0: inline void addToAvailableList(JS::Zone *zone); michael@0: inline void insertToAvailableList(Chunk **insertPoint); michael@0: inline void removeFromAvailableList(); michael@0: michael@0: ArenaHeader *allocateArena(JS::Zone *zone, AllocKind kind); michael@0: michael@0: void releaseArena(ArenaHeader *aheader); michael@0: void recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind); michael@0: michael@0: static Chunk *allocate(JSRuntime *rt); michael@0: michael@0: void decommitAllArenas(JSRuntime *rt) { michael@0: decommittedArenas.clear(true); michael@0: MarkPagesUnused(rt, &arenas[0], ArenasPerChunk * ArenaSize); michael@0: michael@0: info.freeArenasHead = nullptr; michael@0: info.lastDecommittedArenaOffset = 0; michael@0: info.numArenasFree = ArenasPerChunk; michael@0: info.numArenasFreeCommitted = 0; michael@0: } michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: static inline void release(JSRuntime *rt, Chunk *chunk); michael@0: static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead); michael@0: michael@0: /* Must be called with the GC lock taken. */ michael@0: inline void prepareToBeFreed(JSRuntime *rt); michael@0: michael@0: /* michael@0: * Assuming that the info.prevp points to the next field of the previous michael@0: * chunk in a doubly-linked list, get that chunk. michael@0: */ michael@0: Chunk *getPrevious() { michael@0: JS_ASSERT(info.prevp); michael@0: return fromPointerToNext(info.prevp); michael@0: } michael@0: michael@0: /* Get the chunk from a pointer to its info.next field. */ michael@0: static Chunk *fromPointerToNext(Chunk **nextFieldPtr) { michael@0: uintptr_t addr = reinterpret_cast(nextFieldPtr); michael@0: JS_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next)); michael@0: return reinterpret_cast(addr - offsetof(Chunk, info.next)); michael@0: } michael@0: michael@0: private: michael@0: inline void init(JSRuntime *rt); michael@0: michael@0: /* Search for a decommitted arena to allocate. */ michael@0: unsigned findDecommittedArenaOffset(); michael@0: ArenaHeader* fetchNextDecommittedArena(); michael@0: michael@0: public: michael@0: /* Unlink and return the freeArenasHead. */ michael@0: inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt); michael@0: michael@0: inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader); michael@0: }; michael@0: michael@0: static_assert(sizeof(Chunk) == ChunkSize, michael@0: "Ensure the hardcoded chunk size definition actually matches the struct."); michael@0: static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap), michael@0: "The hardcoded API bitmap offset must match the actual offset."); michael@0: static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) + michael@0: offsetof(ChunkInfo, trailer) + michael@0: offsetof(ChunkTrailer, runtime), michael@0: "The hardcoded API runtime offset must match the actual offset."); michael@0: michael@0: inline uintptr_t michael@0: ArenaHeader::address() const michael@0: { michael@0: uintptr_t addr = reinterpret_cast(this); michael@0: JS_ASSERT(!(addr & ArenaMask)); michael@0: JS_ASSERT(Chunk::withinArenasRange(addr)); michael@0: return addr; michael@0: } michael@0: michael@0: inline Chunk * michael@0: ArenaHeader::chunk() const michael@0: { michael@0: return Chunk::fromAddress(address()); michael@0: } michael@0: michael@0: inline uintptr_t michael@0: ArenaHeader::arenaAddress() const michael@0: { michael@0: return address(); michael@0: } michael@0: michael@0: inline Arena * michael@0: ArenaHeader::getArena() michael@0: { michael@0: return reinterpret_cast(arenaAddress()); michael@0: } michael@0: michael@0: inline bool michael@0: ArenaHeader::isEmpty() const michael@0: { michael@0: /* Arena is empty if its first span covers the whole arena. */ michael@0: JS_ASSERT(allocated()); michael@0: size_t firstThingOffset = Arena::firstThingOffset(getAllocKind()); michael@0: return firstFreeSpanOffsets == FreeSpan::encodeOffsets(firstThingOffset, ArenaMask); michael@0: } michael@0: michael@0: FreeSpan michael@0: ArenaHeader::getFirstFreeSpan() const michael@0: { michael@0: #ifdef DEBUG michael@0: checkSynchronizedWithFreeList(); michael@0: #endif michael@0: return FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets); michael@0: } michael@0: michael@0: void michael@0: ArenaHeader::setFirstFreeSpan(const FreeSpan *span) michael@0: { michael@0: JS_ASSERT(span->isWithinArena(arenaAddress())); michael@0: firstFreeSpanOffsets = span->encodeAsOffsets(); michael@0: } michael@0: michael@0: inline ArenaHeader * michael@0: ArenaHeader::getNextDelayedMarking() const michael@0: { michael@0: JS_ASSERT(hasDelayedMarking); michael@0: return &reinterpret_cast(auxNextLink << ArenaShift)->aheader; michael@0: } michael@0: michael@0: inline void michael@0: ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader) michael@0: { michael@0: JS_ASSERT(!(uintptr_t(aheader) & ArenaMask)); michael@0: JS_ASSERT(!auxNextLink && !hasDelayedMarking); michael@0: hasDelayedMarking = 1; michael@0: auxNextLink = aheader->arenaAddress() >> ArenaShift; michael@0: } michael@0: michael@0: inline void michael@0: ArenaHeader::unsetDelayedMarking() michael@0: { michael@0: JS_ASSERT(hasDelayedMarking); michael@0: hasDelayedMarking = 0; michael@0: auxNextLink = 0; michael@0: } michael@0: michael@0: inline ArenaHeader * michael@0: ArenaHeader::getNextAllocDuringSweep() const michael@0: { michael@0: JS_ASSERT(allocatedDuringIncremental); michael@0: return &reinterpret_cast(auxNextLink << ArenaShift)->aheader; michael@0: } michael@0: michael@0: inline void michael@0: ArenaHeader::setNextAllocDuringSweep(ArenaHeader *aheader) michael@0: { michael@0: JS_ASSERT(!auxNextLink && !allocatedDuringIncremental); michael@0: allocatedDuringIncremental = 1; michael@0: auxNextLink = aheader->arenaAddress() >> ArenaShift; michael@0: } michael@0: michael@0: inline void michael@0: ArenaHeader::unsetAllocDuringSweep() michael@0: { michael@0: JS_ASSERT(allocatedDuringIncremental); michael@0: allocatedDuringIncremental = 0; michael@0: auxNextLink = 0; michael@0: } michael@0: michael@0: static void michael@0: AssertValidColor(const void *thing, uint32_t color) michael@0: { michael@0: #ifdef DEBUG michael@0: ArenaHeader *aheader = reinterpret_cast(thing)->arenaHeader(); michael@0: JS_ASSERT(color < aheader->getThingSize() / CellSize); michael@0: #endif michael@0: } michael@0: michael@0: inline ArenaHeader * michael@0: Cell::arenaHeader() const michael@0: { michael@0: JS_ASSERT(isTenured()); michael@0: uintptr_t addr = address(); michael@0: addr &= ~ArenaMask; michael@0: return reinterpret_cast(addr); michael@0: } michael@0: michael@0: inline JSRuntime * michael@0: Cell::runtimeFromMainThread() const michael@0: { michael@0: JSRuntime *rt = chunk()->info.trailer.runtime; michael@0: JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); michael@0: return rt; michael@0: } michael@0: michael@0: inline JS::shadow::Runtime * michael@0: Cell::shadowRuntimeFromMainThread() const michael@0: { michael@0: return reinterpret_cast(runtimeFromMainThread()); michael@0: } michael@0: michael@0: inline JSRuntime * michael@0: Cell::runtimeFromAnyThread() const michael@0: { michael@0: return chunk()->info.trailer.runtime; michael@0: } michael@0: michael@0: inline JS::shadow::Runtime * michael@0: Cell::shadowRuntimeFromAnyThread() const michael@0: { michael@0: return reinterpret_cast(runtimeFromAnyThread()); michael@0: } michael@0: michael@0: bool michael@0: Cell::isMarked(uint32_t color /* = BLACK */) const michael@0: { michael@0: JS_ASSERT(isTenured()); michael@0: JS_ASSERT(arenaHeader()->allocated()); michael@0: AssertValidColor(this, color); michael@0: return chunk()->bitmap.isMarked(this, color); michael@0: } michael@0: michael@0: bool michael@0: Cell::markIfUnmarked(uint32_t color /* = BLACK */) const michael@0: { michael@0: JS_ASSERT(isTenured()); michael@0: AssertValidColor(this, color); michael@0: return chunk()->bitmap.markIfUnmarked(this, color); michael@0: } michael@0: michael@0: void michael@0: Cell::unmark(uint32_t color) const michael@0: { michael@0: JS_ASSERT(isTenured()); michael@0: JS_ASSERT(color != BLACK); michael@0: AssertValidColor(this, color); michael@0: chunk()->bitmap.unmark(this, color); michael@0: } michael@0: michael@0: JS::Zone * michael@0: Cell::tenuredZone() const michael@0: { michael@0: JS::Zone *zone = arenaHeader()->zone; michael@0: JS_ASSERT(CurrentThreadCanAccessZone(zone)); michael@0: JS_ASSERT(isTenured()); michael@0: return zone; michael@0: } michael@0: michael@0: JS::Zone * michael@0: Cell::tenuredZoneFromAnyThread() const michael@0: { michael@0: JS_ASSERT(isTenured()); michael@0: return arenaHeader()->zone; michael@0: } michael@0: michael@0: bool michael@0: Cell::tenuredIsInsideZone(JS::Zone *zone) const michael@0: { michael@0: JS_ASSERT(isTenured()); michael@0: return zone == arenaHeader()->zone; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: bool michael@0: Cell::isAligned() const michael@0: { michael@0: return Arena::isAligned(address(), arenaHeader()->getThingSize()); michael@0: } michael@0: michael@0: bool michael@0: Cell::isTenured() const michael@0: { michael@0: #ifdef JSGC_GENERATIONAL michael@0: JS::shadow::Runtime *rt = js::gc::GetGCThingRuntime(this); michael@0: return !IsInsideNursery(rt, this); michael@0: #endif michael@0: return true; michael@0: } michael@0: #endif michael@0: michael@0: inline uintptr_t michael@0: Cell::address() const michael@0: { michael@0: uintptr_t addr = uintptr_t(this); michael@0: JS_ASSERT(addr % CellSize == 0); michael@0: JS_ASSERT(Chunk::withinArenasRange(addr)); michael@0: return addr; michael@0: } michael@0: michael@0: Chunk * michael@0: Cell::chunk() const michael@0: { michael@0: uintptr_t addr = uintptr_t(this); michael@0: JS_ASSERT(addr % CellSize == 0); michael@0: addr &= ~(ChunkSize - 1); michael@0: return reinterpret_cast(addr); michael@0: } michael@0: michael@0: inline bool michael@0: InFreeList(ArenaHeader *aheader, void *thing) michael@0: { michael@0: if (!aheader->hasFreeThings()) michael@0: return false; michael@0: michael@0: FreeSpan firstSpan(aheader->getFirstFreeSpan()); michael@0: uintptr_t addr = reinterpret_cast(thing); michael@0: michael@0: for (const FreeSpan *span = &firstSpan;;) { michael@0: /* If the thing comes before the current span, it's not free. */ michael@0: if (addr < span->first) michael@0: return false; michael@0: michael@0: /* michael@0: * If we find it inside the span, it's dead. We use here "<=" and not michael@0: * "<" even for the last span as we know that thing is inside the michael@0: * arena. Thus, for the last span thing < span->end. michael@0: */ michael@0: if (addr <= span->last) michael@0: return true; michael@0: michael@0: /* michael@0: * The last possible empty span is an the end of the arena. Here michael@0: * span->end < thing < thingsEnd and so we must have more spans. michael@0: */ michael@0: span = span->nextSpan(); michael@0: } michael@0: } michael@0: michael@0: } /* namespace gc */ michael@0: michael@0: gc::AllocKind michael@0: gc::Cell::tenuredGetAllocKind() const michael@0: { michael@0: return arenaHeader()->getAllocKind(); michael@0: } michael@0: michael@0: } /* namespace js */ michael@0: michael@0: #endif /* gc_Heap_h */