js/src/gc/Heap.h

changeset 0
6474c204b198
equal deleted inserted replaced
-1:000000000000 0:6158c356d3b2
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #ifndef gc_Heap_h
8 #define gc_Heap_h
9
10 #include "mozilla/Attributes.h"
11 #include "mozilla/PodOperations.h"
12
13 #include <stddef.h>
14 #include <stdint.h>
15
16 #include "jspubtd.h"
17 #include "jstypes.h"
18 #include "jsutil.h"
19
20 #include "ds/BitArray.h"
21 #include "gc/Memory.h"
22 #include "js/HeapAPI.h"
23
24 struct JSCompartment;
25
26 struct JSRuntime;
27
28 namespace JS {
29 namespace shadow {
30 class Runtime;
31 }
32 }
33
34 namespace js {
35
36 class FreeOp;
37
38 namespace gc {
39
40 struct Arena;
41 struct ArenaList;
42 struct ArenaHeader;
43 struct Chunk;
44
45 /*
46 * This flag allows an allocation site to request a specific heap based upon the
47 * estimated lifetime or lifetime requirements of objects allocated from that
48 * site.
49 */
50 enum InitialHeap {
51 DefaultHeap,
52 TenuredHeap
53 };
54
55 /* The GC allocation kinds. */
56 enum AllocKind {
57 FINALIZE_OBJECT0,
58 FINALIZE_OBJECT0_BACKGROUND,
59 FINALIZE_OBJECT2,
60 FINALIZE_OBJECT2_BACKGROUND,
61 FINALIZE_OBJECT4,
62 FINALIZE_OBJECT4_BACKGROUND,
63 FINALIZE_OBJECT8,
64 FINALIZE_OBJECT8_BACKGROUND,
65 FINALIZE_OBJECT12,
66 FINALIZE_OBJECT12_BACKGROUND,
67 FINALIZE_OBJECT16,
68 FINALIZE_OBJECT16_BACKGROUND,
69 FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND,
70 FINALIZE_SCRIPT,
71 FINALIZE_LAZY_SCRIPT,
72 FINALIZE_SHAPE,
73 FINALIZE_BASE_SHAPE,
74 FINALIZE_TYPE_OBJECT,
75 FINALIZE_FAT_INLINE_STRING,
76 FINALIZE_STRING,
77 FINALIZE_EXTERNAL_STRING,
78 FINALIZE_JITCODE,
79 FINALIZE_LAST = FINALIZE_JITCODE
80 };
81
82 static const unsigned FINALIZE_LIMIT = FINALIZE_LAST + 1;
83 static const unsigned FINALIZE_OBJECT_LIMIT = FINALIZE_OBJECT_LAST + 1;
84
85 /*
86 * This must be an upper bound, but we do not need the least upper bound, so
87 * we just exclude non-background objects.
88 */
89 static const size_t MAX_BACKGROUND_FINALIZE_KINDS = FINALIZE_LIMIT - FINALIZE_OBJECT_LIMIT / 2;
90
91 /*
92 * A GC cell is the base class for all GC things.
93 */
94 struct Cell
95 {
96 public:
97 inline ArenaHeader *arenaHeader() const;
98 inline AllocKind tenuredGetAllocKind() const;
99 MOZ_ALWAYS_INLINE bool isMarked(uint32_t color = BLACK) const;
100 MOZ_ALWAYS_INLINE bool markIfUnmarked(uint32_t color = BLACK) const;
101 MOZ_ALWAYS_INLINE void unmark(uint32_t color) const;
102
103 inline JSRuntime *runtimeFromMainThread() const;
104 inline JS::shadow::Runtime *shadowRuntimeFromMainThread() const;
105 inline JS::Zone *tenuredZone() const;
106 inline JS::Zone *tenuredZoneFromAnyThread() const;
107 inline bool tenuredIsInsideZone(JS::Zone *zone) const;
108
109 // Note: Unrestricted access to the runtime of a GC thing from an arbitrary
110 // thread can easily lead to races. Use this method very carefully.
111 inline JSRuntime *runtimeFromAnyThread() const;
112 inline JS::shadow::Runtime *shadowRuntimeFromAnyThread() const;
113
114 #ifdef DEBUG
115 inline bool isAligned() const;
116 inline bool isTenured() const;
117 #endif
118
119 protected:
120 inline uintptr_t address() const;
121 inline Chunk *chunk() const;
122 };
123
124 /*
125 * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
126 * wastes space but allows to avoid expensive devisions by thing's size when
127 * accessing the bitmap. In addition this allows to use some bits for colored
128 * marking during the cycle GC.
129 */
130 const size_t ArenaCellCount = size_t(1) << (ArenaShift - CellShift);
131 const size_t ArenaBitmapBits = ArenaCellCount;
132 const size_t ArenaBitmapBytes = ArenaBitmapBits / 8;
133 const size_t ArenaBitmapWords = ArenaBitmapBits / JS_BITS_PER_WORD;
134
135 /*
136 * A FreeSpan represents a contiguous sequence of free cells in an Arena.
137 * |first| is the address of the first free cell in the span. |last| is the
138 * address of the last free cell in the span. This last cell holds a FreeSpan
139 * data structure for the next span unless this is the last span on the list
140 * of spans in the arena. For this last span |last| points to the last byte of
141 * the last thing in the arena and no linkage is stored there, so
142 * |last| == arenaStart + ArenaSize - 1. If the space at the arena end is
143 * fully used this last span is empty and |first| == |last + 1|.
144 *
145 * Thus |first| < |last| implies that we have either the last span with at least
146 * one element or that the span is not the last and contains at least 2
147 * elements. In both cases to allocate a thing from this span we need simply
148 * to increment |first| by the allocation size.
149 *
150 * |first| == |last| implies that we have a one element span that records the
151 * next span. So to allocate from it we need to update the span list head
152 * with a copy of the span stored at |last| address so the following
153 * allocations will use that span.
154 *
155 * |first| > |last| implies that we have an empty last span and the arena is
156 * fully used.
157 *
158 * Also only for the last span (|last| & 1)! = 0 as all allocation sizes are
159 * multiples of CellSize.
160 */
161 struct FreeSpan
162 {
163 uintptr_t first;
164 uintptr_t last;
165
166 public:
167 FreeSpan() {}
168
169 FreeSpan(uintptr_t first, uintptr_t last)
170 : first(first), last(last) {
171 checkSpan();
172 }
173
174 /*
175 * To minimize the size of the arena header the first span is encoded
176 * there as offsets from the arena start.
177 */
178 static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) {
179 static_assert(ArenaShift < 16, "Check that we can pack offsets into uint16_t.");
180 JS_ASSERT(firstOffset <= ArenaSize);
181 JS_ASSERT(lastOffset < ArenaSize);
182 JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1)));
183 return firstOffset | (lastOffset << 16);
184 }
185
186 /*
187 * Encoded offsets for a full arena when its first span is the last one
188 * and empty.
189 */
190 static const size_t FullArenaOffsets = ArenaSize | ((ArenaSize - 1) << 16);
191
192 static FreeSpan decodeOffsets(uintptr_t arenaAddr, size_t offsets) {
193 JS_ASSERT(!(arenaAddr & ArenaMask));
194
195 size_t firstOffset = offsets & 0xFFFF;
196 size_t lastOffset = offsets >> 16;
197 JS_ASSERT(firstOffset <= ArenaSize);
198 JS_ASSERT(lastOffset < ArenaSize);
199
200 /*
201 * We must not use | when calculating first as firstOffset is
202 * ArenaMask + 1 for the empty span.
203 */
204 return FreeSpan(arenaAddr + firstOffset, arenaAddr | lastOffset);
205 }
206
207 void initAsEmpty(uintptr_t arenaAddr = 0) {
208 JS_ASSERT(!(arenaAddr & ArenaMask));
209 first = arenaAddr + ArenaSize;
210 last = arenaAddr | (ArenaSize - 1);
211 JS_ASSERT(isEmpty());
212 }
213
214 bool isEmpty() const {
215 checkSpan();
216 return first > last;
217 }
218
219 bool hasNext() const {
220 checkSpan();
221 return !(last & uintptr_t(1));
222 }
223
224 const FreeSpan *nextSpan() const {
225 JS_ASSERT(hasNext());
226 return reinterpret_cast<FreeSpan *>(last);
227 }
228
229 FreeSpan *nextSpanUnchecked(size_t thingSize) const {
230 #ifdef DEBUG
231 uintptr_t lastOffset = last & ArenaMask;
232 JS_ASSERT(!(lastOffset & 1));
233 JS_ASSERT((ArenaSize - lastOffset) % thingSize == 0);
234 #endif
235 return reinterpret_cast<FreeSpan *>(last);
236 }
237
238 uintptr_t arenaAddressUnchecked() const {
239 return last & ~ArenaMask;
240 }
241
242 uintptr_t arenaAddress() const {
243 checkSpan();
244 return arenaAddressUnchecked();
245 }
246
247 ArenaHeader *arenaHeader() const {
248 return reinterpret_cast<ArenaHeader *>(arenaAddress());
249 }
250
251 bool isSameNonEmptySpan(const FreeSpan *another) const {
252 JS_ASSERT(!isEmpty());
253 JS_ASSERT(!another->isEmpty());
254 return first == another->first && last == another->last;
255 }
256
257 bool isWithinArena(uintptr_t arenaAddr) const {
258 JS_ASSERT(!(arenaAddr & ArenaMask));
259
260 /* Return true for the last empty span as well. */
261 return arenaAddress() == arenaAddr;
262 }
263
264 size_t encodeAsOffsets() const {
265 /*
266 * We must use first - arenaAddress(), not first & ArenaMask as
267 * first == ArenaMask + 1 for an empty span.
268 */
269 uintptr_t arenaAddr = arenaAddress();
270 return encodeOffsets(first - arenaAddr, last & ArenaMask);
271 }
272
273 /* See comments before FreeSpan for details. */
274 MOZ_ALWAYS_INLINE void *allocate(size_t thingSize) {
275 JS_ASSERT(thingSize % CellSize == 0);
276 checkSpan();
277 uintptr_t thing = first;
278 if (thing < last) {
279 /* Bump-allocate from the current span. */
280 first = thing + thingSize;
281 } else if (MOZ_LIKELY(thing == last)) {
282 /*
283 * Move to the next span. We use MOZ_LIKELY as without PGO
284 * compilers mis-predict == here as unlikely to succeed.
285 */
286 *this = *reinterpret_cast<FreeSpan *>(thing);
287 } else {
288 return nullptr;
289 }
290 checkSpan();
291 JS_EXTRA_POISON(reinterpret_cast<void *>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
292 return reinterpret_cast<void *>(thing);
293 }
294
295 /* A version of allocate when we know that the span is not empty. */
296 MOZ_ALWAYS_INLINE void *infallibleAllocate(size_t thingSize) {
297 JS_ASSERT(thingSize % CellSize == 0);
298 checkSpan();
299 uintptr_t thing = first;
300 if (thing < last) {
301 first = thing + thingSize;
302 } else {
303 JS_ASSERT(thing == last);
304 *this = *reinterpret_cast<FreeSpan *>(thing);
305 }
306 checkSpan();
307 JS_EXTRA_POISON(reinterpret_cast<void *>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
308 return reinterpret_cast<void *>(thing);
309 }
310
311 /*
312 * Allocate from a newly allocated arena. We do not move the free list
313 * from the arena. Rather we set the arena up as fully used during the
314 * initialization so to allocate we simply return the first thing in the
315 * arena and set the free list to point to the second.
316 */
317 MOZ_ALWAYS_INLINE void *allocateFromNewArena(uintptr_t arenaAddr, size_t firstThingOffset,
318 size_t thingSize) {
319 JS_ASSERT(!(arenaAddr & ArenaMask));
320 uintptr_t thing = arenaAddr | firstThingOffset;
321 first = thing + thingSize;
322 last = arenaAddr | ArenaMask;
323 checkSpan();
324 JS_EXTRA_POISON(reinterpret_cast<void *>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
325 return reinterpret_cast<void *>(thing);
326 }
327
328 void checkSpan() const {
329 #ifdef DEBUG
330 /* We do not allow spans at the end of the address space. */
331 JS_ASSERT(last != uintptr_t(-1));
332 JS_ASSERT(first);
333 JS_ASSERT(last);
334 JS_ASSERT(first - 1 <= last);
335 uintptr_t arenaAddr = arenaAddressUnchecked();
336 if (last & 1) {
337 /* The span is the last. */
338 JS_ASSERT((last & ArenaMask) == ArenaMask);
339
340 if (first - 1 == last) {
341 /* The span is last and empty. The above start != 0 check
342 * implies that we are not at the end of the address space.
343 */
344 return;
345 }
346 size_t spanLength = last - first + 1;
347 JS_ASSERT(spanLength % CellSize == 0);
348
349 /* Start and end must belong to the same arena. */
350 JS_ASSERT((first & ~ArenaMask) == arenaAddr);
351 return;
352 }
353
354 /* The span is not the last and we have more spans to follow. */
355 JS_ASSERT(first <= last);
356 size_t spanLengthWithoutOneThing = last - first;
357 JS_ASSERT(spanLengthWithoutOneThing % CellSize == 0);
358
359 JS_ASSERT((first & ~ArenaMask) == arenaAddr);
360
361 /*
362 * If there is not enough space before the arena end to allocate one
363 * more thing, then the span must be marked as the last one to avoid
364 * storing useless empty span reference.
365 */
366 size_t beforeTail = ArenaSize - (last & ArenaMask);
367 JS_ASSERT(beforeTail >= sizeof(FreeSpan) + CellSize);
368
369 FreeSpan *next = reinterpret_cast<FreeSpan *>(last);
370
371 /*
372 * The GC things on the list of free spans come from one arena
373 * and the spans are linked in ascending address order with
374 * at least one non-free thing between spans.
375 */
376 JS_ASSERT(last < next->first);
377 JS_ASSERT(arenaAddr == next->arenaAddressUnchecked());
378
379 if (next->first > next->last) {
380 /*
381 * The next span is the empty span that terminates the list for
382 * arenas that do not have any free things at the end.
383 */
384 JS_ASSERT(next->first - 1 == next->last);
385 JS_ASSERT(arenaAddr + ArenaSize == next->first);
386 }
387 #endif
388 }
389
390 };
391
392 /* Every arena has a header. */
393 struct ArenaHeader : public JS::shadow::ArenaHeader
394 {
395 friend struct FreeLists;
396
397 /*
398 * ArenaHeader::next has two purposes: when unallocated, it points to the
399 * next available Arena's header. When allocated, it points to the next
400 * arena of the same size class and compartment.
401 */
402 ArenaHeader *next;
403
404 private:
405 /*
406 * The first span of free things in the arena. We encode it as the start
407 * and end offsets within the arena, not as FreeSpan structure, to
408 * minimize the header size.
409 */
410 size_t firstFreeSpanOffsets;
411
412 /*
413 * One of AllocKind constants or FINALIZE_LIMIT when the arena does not
414 * contain any GC things and is on the list of empty arenas in the GC
415 * chunk. The latter allows to quickly check if the arena is allocated
416 * during the conservative GC scanning without searching the arena in the
417 * list.
418 *
419 * We use 8 bits for the allocKind so the compiler can use byte-level memory
420 * instructions to access it.
421 */
422 size_t allocKind : 8;
423
424 /*
425 * When collecting we sometimes need to keep an auxillary list of arenas,
426 * for which we use the following fields. This happens for several reasons:
427 *
428 * When recursive marking uses too much stack the marking is delayed and the
429 * corresponding arenas are put into a stack. To distinguish the bottom of
430 * the stack from the arenas not present in the stack we use the
431 * markOverflow flag to tag arenas on the stack.
432 *
433 * Delayed marking is also used for arenas that we allocate into during an
434 * incremental GC. In this case, we intend to mark all the objects in the
435 * arena, and it's faster to do this marking in bulk.
436 *
437 * When sweeping we keep track of which arenas have been allocated since the
438 * end of the mark phase. This allows us to tell whether a pointer to an
439 * unmarked object is yet to be finalized or has already been reallocated.
440 * We set the allocatedDuringIncremental flag for this and clear it at the
441 * end of the sweep phase.
442 *
443 * To minimize the ArenaHeader size we record the next linkage as
444 * arenaAddress() >> ArenaShift and pack it with the allocKind field and the
445 * flags.
446 */
447 public:
448 size_t hasDelayedMarking : 1;
449 size_t allocatedDuringIncremental : 1;
450 size_t markOverflow : 1;
451 size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
452 static_assert(ArenaShift >= 8 + 1 + 1 + 1,
453 "ArenaHeader::auxNextLink packing assumes that ArenaShift has enough bits to "
454 "cover allocKind and hasDelayedMarking.");
455
456 inline uintptr_t address() const;
457 inline Chunk *chunk() const;
458
459 bool allocated() const {
460 JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT));
461 return allocKind < size_t(FINALIZE_LIMIT);
462 }
463
464 void init(JS::Zone *zoneArg, AllocKind kind) {
465 JS_ASSERT(!allocated());
466 JS_ASSERT(!markOverflow);
467 JS_ASSERT(!allocatedDuringIncremental);
468 JS_ASSERT(!hasDelayedMarking);
469 zone = zoneArg;
470
471 static_assert(FINALIZE_LIMIT <= 255, "We must be able to fit the allockind into uint8_t.");
472 allocKind = size_t(kind);
473
474 /* See comments in FreeSpan::allocateFromNewArena. */
475 firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
476 }
477
478 void setAsNotAllocated() {
479 allocKind = size_t(FINALIZE_LIMIT);
480 markOverflow = 0;
481 allocatedDuringIncremental = 0;
482 hasDelayedMarking = 0;
483 auxNextLink = 0;
484 }
485
486 inline uintptr_t arenaAddress() const;
487 inline Arena *getArena();
488
489 AllocKind getAllocKind() const {
490 JS_ASSERT(allocated());
491 return AllocKind(allocKind);
492 }
493
494 inline size_t getThingSize() const;
495
496 bool hasFreeThings() const {
497 return firstFreeSpanOffsets != FreeSpan::FullArenaOffsets;
498 }
499
500 inline bool isEmpty() const;
501
502 void setAsFullyUsed() {
503 firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
504 }
505
506 inline FreeSpan getFirstFreeSpan() const;
507 inline void setFirstFreeSpan(const FreeSpan *span);
508
509 #ifdef DEBUG
510 void checkSynchronizedWithFreeList() const;
511 #endif
512
513 inline ArenaHeader *getNextDelayedMarking() const;
514 inline void setNextDelayedMarking(ArenaHeader *aheader);
515 inline void unsetDelayedMarking();
516
517 inline ArenaHeader *getNextAllocDuringSweep() const;
518 inline void setNextAllocDuringSweep(ArenaHeader *aheader);
519 inline void unsetAllocDuringSweep();
520 };
521
522 struct Arena
523 {
524 /*
525 * Layout of an arena:
526 * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader
527 * descriptor followed by some pad bytes. The remainder of the arena is
528 * filled with the array of T things. The pad bytes ensure that the thing
529 * array ends exactly at the end of the arena.
530 *
531 * +-------------+-----+----+----+-----+----+
532 * | ArenaHeader | pad | T0 | T1 | ... | Tn |
533 * +-------------+-----+----+----+-----+----+
534 *
535 * <----------------------------------------> = ArenaSize bytes
536 * <-------------------> = first thing offset
537 */
538 ArenaHeader aheader;
539 uint8_t data[ArenaSize - sizeof(ArenaHeader)];
540
541 private:
542 static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
543 static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
544
545 public:
546 static void staticAsserts();
547
548 static size_t thingSize(AllocKind kind) {
549 return ThingSizes[kind];
550 }
551
552 static size_t firstThingOffset(AllocKind kind) {
553 return FirstThingOffsets[kind];
554 }
555
556 static size_t thingsPerArena(size_t thingSize) {
557 JS_ASSERT(thingSize % CellSize == 0);
558
559 /* We should be able to fit FreeSpan in any GC thing. */
560 JS_ASSERT(thingSize >= sizeof(FreeSpan));
561
562 return (ArenaSize - sizeof(ArenaHeader)) / thingSize;
563 }
564
565 static size_t thingsSpan(size_t thingSize) {
566 return thingsPerArena(thingSize) * thingSize;
567 }
568
569 static bool isAligned(uintptr_t thing, size_t thingSize) {
570 /* Things ends at the arena end. */
571 uintptr_t tailOffset = (ArenaSize - thing) & ArenaMask;
572 return tailOffset % thingSize == 0;
573 }
574
575 uintptr_t address() const {
576 return aheader.address();
577 }
578
579 uintptr_t thingsStart(AllocKind thingKind) {
580 return address() | firstThingOffset(thingKind);
581 }
582
583 uintptr_t thingsEnd() {
584 return address() + ArenaSize;
585 }
586
587 void setAsFullyUnused(AllocKind thingKind);
588
589 template <typename T>
590 bool finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize);
591 };
592
593 static_assert(sizeof(Arena) == ArenaSize, "The hardcoded arena size must match the struct size.");
594
595 inline size_t
596 ArenaHeader::getThingSize() const
597 {
598 JS_ASSERT(allocated());
599 return Arena::thingSize(getAllocKind());
600 }
601
602 /*
603 * The tail of the chunk info is shared between all chunks in the system, both
604 * nursery and tenured. This structure is locatable from any GC pointer by
605 * aligning to 1MiB.
606 */
607 struct ChunkTrailer
608 {
609 /* The index the chunk in the nursery, or LocationTenuredHeap. */
610 uint32_t location;
611
612 #if JS_BITS_PER_WORD == 64
613 uint32_t padding;
614 #endif
615
616 JSRuntime *runtime;
617 };
618
619 static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t), "ChunkTrailer size is incorrect.");
620
621 /* The chunk header (located at the end of the chunk to preserve arena alignment). */
622 struct ChunkInfo
623 {
624 Chunk *next;
625 Chunk **prevp;
626
627 /* Free arenas are linked together with aheader.next. */
628 ArenaHeader *freeArenasHead;
629
630 #if JS_BITS_PER_WORD == 32
631 /*
632 * Calculating sizes and offsets is simpler if sizeof(ChunkInfo) is
633 * architecture-independent.
634 */
635 char padding[20];
636 #endif
637
638 /*
639 * Decommitted arenas are tracked by a bitmap in the chunk header. We use
640 * this offset to start our search iteration close to a decommitted arena
641 * that we can allocate.
642 */
643 uint32_t lastDecommittedArenaOffset;
644
645 /* Number of free arenas, either committed or decommitted. */
646 uint32_t numArenasFree;
647
648 /* Number of free, committed arenas. */
649 uint32_t numArenasFreeCommitted;
650
651 /* Number of GC cycles this chunk has survived. */
652 uint32_t age;
653
654 /* Information shared by all Chunk types. */
655 ChunkTrailer trailer;
656 };
657
658 /*
659 * Calculating ArenasPerChunk:
660 *
661 * In order to figure out how many Arenas will fit in a chunk, we need to know
662 * how much extra space is available after we allocate the header data. This
663 * is a problem because the header size depends on the number of arenas in the
664 * chunk. The two dependent fields are bitmap and decommittedArenas.
665 *
666 * For the mark bitmap, we know that each arena will use a fixed number of full
667 * bytes: ArenaBitmapBytes. The full size of the header data is this number
668 * multiplied by the eventual number of arenas we have in the header. We,
669 * conceptually, distribute this header data among the individual arenas and do
670 * not include it in the header. This way we do not have to worry about its
671 * variable size: it gets attached to the variable number we are computing.
672 *
673 * For the decommitted arena bitmap, we only have 1 bit per arena, so this
674 * technique will not work. Instead, we observe that we do not have enough
675 * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
676 * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
677 * This will not become 63 bytes unless we double the data required in the
678 * header. Therefore, we just compute the number of bytes required to track
679 * every possible arena and do not worry about slop bits, since there are too
680 * few to usefully allocate.
681 *
682 * To actually compute the number of arenas we can allocate in a chunk, we
683 * divide the amount of available space less the header info (not including
684 * the mark bitmap which is distributed into the arena size) by the size of
685 * the arena (with the mark bitmap bytes it uses).
686 */
687 const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
688 const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
689 const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
690 const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
691 static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density.");
692
693 /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
694 struct ChunkBitmap
695 {
696 volatile uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
697
698 public:
699 ChunkBitmap() { }
700
701 MOZ_ALWAYS_INLINE void getMarkWordAndMask(const Cell *cell, uint32_t color,
702 uintptr_t **wordp, uintptr_t *maskp)
703 {
704 GetGCThingMarkWordAndMask(cell, color, wordp, maskp);
705 }
706
707 MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarked(const Cell *cell, uint32_t color) {
708 uintptr_t *word, mask;
709 getMarkWordAndMask(cell, color, &word, &mask);
710 return *word & mask;
711 }
712
713 MOZ_ALWAYS_INLINE bool markIfUnmarked(const Cell *cell, uint32_t color) {
714 uintptr_t *word, mask;
715 getMarkWordAndMask(cell, BLACK, &word, &mask);
716 if (*word & mask)
717 return false;
718 *word |= mask;
719 if (color != BLACK) {
720 /*
721 * We use getMarkWordAndMask to recalculate both mask and word as
722 * doing just mask << color may overflow the mask.
723 */
724 getMarkWordAndMask(cell, color, &word, &mask);
725 if (*word & mask)
726 return false;
727 *word |= mask;
728 }
729 return true;
730 }
731
732 MOZ_ALWAYS_INLINE void unmark(const Cell *cell, uint32_t color) {
733 uintptr_t *word, mask;
734 getMarkWordAndMask(cell, color, &word, &mask);
735 *word &= ~mask;
736 }
737
738 void clear() {
739 memset((void *)bitmap, 0, sizeof(bitmap));
740 }
741
742 uintptr_t *arenaBits(ArenaHeader *aheader) {
743 static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
744 "We assume that the part of the bitmap corresponding to the arena "
745 "has the exact number of words so we do not need to deal with a word "
746 "that covers bits from two arenas.");
747
748 uintptr_t *word, unused;
749 getMarkWordAndMask(reinterpret_cast<Cell *>(aheader->address()), BLACK, &word, &unused);
750 return word;
751 }
752 };
753
754 static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap),
755 "Ensure our ChunkBitmap actually covers all arenas.");
756 static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk,
757 "Ensure that the mark bitmap has the right number of bits.");
758
759 typedef BitArray<ArenasPerChunk> PerArenaBitmap;
760
761 const size_t ChunkPadSize = ChunkSize
762 - (sizeof(Arena) * ArenasPerChunk)
763 - sizeof(ChunkBitmap)
764 - sizeof(PerArenaBitmap)
765 - sizeof(ChunkInfo);
766 static_assert(ChunkPadSize < BytesPerArenaWithHeader,
767 "If the chunk padding is larger than an arena, we should have one more arena.");
768
769 /*
770 * Chunks contain arenas and associated data structures (mark bitmap, delayed
771 * marking state).
772 */
773 struct Chunk
774 {
775 Arena arenas[ArenasPerChunk];
776
777 /* Pad to full size to ensure cache alignment of ChunkInfo. */
778 uint8_t padding[ChunkPadSize];
779
780 ChunkBitmap bitmap;
781 PerArenaBitmap decommittedArenas;
782 ChunkInfo info;
783
784 static Chunk *fromAddress(uintptr_t addr) {
785 addr &= ~ChunkMask;
786 return reinterpret_cast<Chunk *>(addr);
787 }
788
789 static bool withinArenasRange(uintptr_t addr) {
790 uintptr_t offset = addr & ChunkMask;
791 return offset < ArenasPerChunk * ArenaSize;
792 }
793
794 static size_t arenaIndex(uintptr_t addr) {
795 JS_ASSERT(withinArenasRange(addr));
796 return (addr & ChunkMask) >> ArenaShift;
797 }
798
799 uintptr_t address() const {
800 uintptr_t addr = reinterpret_cast<uintptr_t>(this);
801 JS_ASSERT(!(addr & ChunkMask));
802 return addr;
803 }
804
805 bool unused() const {
806 return info.numArenasFree == ArenasPerChunk;
807 }
808
809 bool hasAvailableArenas() const {
810 return info.numArenasFree != 0;
811 }
812
813 inline void addToAvailableList(JS::Zone *zone);
814 inline void insertToAvailableList(Chunk **insertPoint);
815 inline void removeFromAvailableList();
816
817 ArenaHeader *allocateArena(JS::Zone *zone, AllocKind kind);
818
819 void releaseArena(ArenaHeader *aheader);
820 void recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind);
821
822 static Chunk *allocate(JSRuntime *rt);
823
824 void decommitAllArenas(JSRuntime *rt) {
825 decommittedArenas.clear(true);
826 MarkPagesUnused(rt, &arenas[0], ArenasPerChunk * ArenaSize);
827
828 info.freeArenasHead = nullptr;
829 info.lastDecommittedArenaOffset = 0;
830 info.numArenasFree = ArenasPerChunk;
831 info.numArenasFreeCommitted = 0;
832 }
833
834 /* Must be called with the GC lock taken. */
835 static inline void release(JSRuntime *rt, Chunk *chunk);
836 static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
837
838 /* Must be called with the GC lock taken. */
839 inline void prepareToBeFreed(JSRuntime *rt);
840
841 /*
842 * Assuming that the info.prevp points to the next field of the previous
843 * chunk in a doubly-linked list, get that chunk.
844 */
845 Chunk *getPrevious() {
846 JS_ASSERT(info.prevp);
847 return fromPointerToNext(info.prevp);
848 }
849
850 /* Get the chunk from a pointer to its info.next field. */
851 static Chunk *fromPointerToNext(Chunk **nextFieldPtr) {
852 uintptr_t addr = reinterpret_cast<uintptr_t>(nextFieldPtr);
853 JS_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next));
854 return reinterpret_cast<Chunk *>(addr - offsetof(Chunk, info.next));
855 }
856
857 private:
858 inline void init(JSRuntime *rt);
859
860 /* Search for a decommitted arena to allocate. */
861 unsigned findDecommittedArenaOffset();
862 ArenaHeader* fetchNextDecommittedArena();
863
864 public:
865 /* Unlink and return the freeArenasHead. */
866 inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt);
867
868 inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
869 };
870
871 static_assert(sizeof(Chunk) == ChunkSize,
872 "Ensure the hardcoded chunk size definition actually matches the struct.");
873 static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
874 "The hardcoded API bitmap offset must match the actual offset.");
875 static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) +
876 offsetof(ChunkInfo, trailer) +
877 offsetof(ChunkTrailer, runtime),
878 "The hardcoded API runtime offset must match the actual offset.");
879
880 inline uintptr_t
881 ArenaHeader::address() const
882 {
883 uintptr_t addr = reinterpret_cast<uintptr_t>(this);
884 JS_ASSERT(!(addr & ArenaMask));
885 JS_ASSERT(Chunk::withinArenasRange(addr));
886 return addr;
887 }
888
889 inline Chunk *
890 ArenaHeader::chunk() const
891 {
892 return Chunk::fromAddress(address());
893 }
894
895 inline uintptr_t
896 ArenaHeader::arenaAddress() const
897 {
898 return address();
899 }
900
901 inline Arena *
902 ArenaHeader::getArena()
903 {
904 return reinterpret_cast<Arena *>(arenaAddress());
905 }
906
907 inline bool
908 ArenaHeader::isEmpty() const
909 {
910 /* Arena is empty if its first span covers the whole arena. */
911 JS_ASSERT(allocated());
912 size_t firstThingOffset = Arena::firstThingOffset(getAllocKind());
913 return firstFreeSpanOffsets == FreeSpan::encodeOffsets(firstThingOffset, ArenaMask);
914 }
915
916 FreeSpan
917 ArenaHeader::getFirstFreeSpan() const
918 {
919 #ifdef DEBUG
920 checkSynchronizedWithFreeList();
921 #endif
922 return FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
923 }
924
925 void
926 ArenaHeader::setFirstFreeSpan(const FreeSpan *span)
927 {
928 JS_ASSERT(span->isWithinArena(arenaAddress()));
929 firstFreeSpanOffsets = span->encodeAsOffsets();
930 }
931
932 inline ArenaHeader *
933 ArenaHeader::getNextDelayedMarking() const
934 {
935 JS_ASSERT(hasDelayedMarking);
936 return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
937 }
938
939 inline void
940 ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
941 {
942 JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
943 JS_ASSERT(!auxNextLink && !hasDelayedMarking);
944 hasDelayedMarking = 1;
945 auxNextLink = aheader->arenaAddress() >> ArenaShift;
946 }
947
948 inline void
949 ArenaHeader::unsetDelayedMarking()
950 {
951 JS_ASSERT(hasDelayedMarking);
952 hasDelayedMarking = 0;
953 auxNextLink = 0;
954 }
955
956 inline ArenaHeader *
957 ArenaHeader::getNextAllocDuringSweep() const
958 {
959 JS_ASSERT(allocatedDuringIncremental);
960 return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
961 }
962
963 inline void
964 ArenaHeader::setNextAllocDuringSweep(ArenaHeader *aheader)
965 {
966 JS_ASSERT(!auxNextLink && !allocatedDuringIncremental);
967 allocatedDuringIncremental = 1;
968 auxNextLink = aheader->arenaAddress() >> ArenaShift;
969 }
970
971 inline void
972 ArenaHeader::unsetAllocDuringSweep()
973 {
974 JS_ASSERT(allocatedDuringIncremental);
975 allocatedDuringIncremental = 0;
976 auxNextLink = 0;
977 }
978
979 static void
980 AssertValidColor(const void *thing, uint32_t color)
981 {
982 #ifdef DEBUG
983 ArenaHeader *aheader = reinterpret_cast<const Cell *>(thing)->arenaHeader();
984 JS_ASSERT(color < aheader->getThingSize() / CellSize);
985 #endif
986 }
987
988 inline ArenaHeader *
989 Cell::arenaHeader() const
990 {
991 JS_ASSERT(isTenured());
992 uintptr_t addr = address();
993 addr &= ~ArenaMask;
994 return reinterpret_cast<ArenaHeader *>(addr);
995 }
996
997 inline JSRuntime *
998 Cell::runtimeFromMainThread() const
999 {
1000 JSRuntime *rt = chunk()->info.trailer.runtime;
1001 JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
1002 return rt;
1003 }
1004
1005 inline JS::shadow::Runtime *
1006 Cell::shadowRuntimeFromMainThread() const
1007 {
1008 return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromMainThread());
1009 }
1010
1011 inline JSRuntime *
1012 Cell::runtimeFromAnyThread() const
1013 {
1014 return chunk()->info.trailer.runtime;
1015 }
1016
1017 inline JS::shadow::Runtime *
1018 Cell::shadowRuntimeFromAnyThread() const
1019 {
1020 return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread());
1021 }
1022
1023 bool
1024 Cell::isMarked(uint32_t color /* = BLACK */) const
1025 {
1026 JS_ASSERT(isTenured());
1027 JS_ASSERT(arenaHeader()->allocated());
1028 AssertValidColor(this, color);
1029 return chunk()->bitmap.isMarked(this, color);
1030 }
1031
1032 bool
1033 Cell::markIfUnmarked(uint32_t color /* = BLACK */) const
1034 {
1035 JS_ASSERT(isTenured());
1036 AssertValidColor(this, color);
1037 return chunk()->bitmap.markIfUnmarked(this, color);
1038 }
1039
1040 void
1041 Cell::unmark(uint32_t color) const
1042 {
1043 JS_ASSERT(isTenured());
1044 JS_ASSERT(color != BLACK);
1045 AssertValidColor(this, color);
1046 chunk()->bitmap.unmark(this, color);
1047 }
1048
1049 JS::Zone *
1050 Cell::tenuredZone() const
1051 {
1052 JS::Zone *zone = arenaHeader()->zone;
1053 JS_ASSERT(CurrentThreadCanAccessZone(zone));
1054 JS_ASSERT(isTenured());
1055 return zone;
1056 }
1057
1058 JS::Zone *
1059 Cell::tenuredZoneFromAnyThread() const
1060 {
1061 JS_ASSERT(isTenured());
1062 return arenaHeader()->zone;
1063 }
1064
1065 bool
1066 Cell::tenuredIsInsideZone(JS::Zone *zone) const
1067 {
1068 JS_ASSERT(isTenured());
1069 return zone == arenaHeader()->zone;
1070 }
1071
1072 #ifdef DEBUG
1073 bool
1074 Cell::isAligned() const
1075 {
1076 return Arena::isAligned(address(), arenaHeader()->getThingSize());
1077 }
1078
1079 bool
1080 Cell::isTenured() const
1081 {
1082 #ifdef JSGC_GENERATIONAL
1083 JS::shadow::Runtime *rt = js::gc::GetGCThingRuntime(this);
1084 return !IsInsideNursery(rt, this);
1085 #endif
1086 return true;
1087 }
1088 #endif
1089
1090 inline uintptr_t
1091 Cell::address() const
1092 {
1093 uintptr_t addr = uintptr_t(this);
1094 JS_ASSERT(addr % CellSize == 0);
1095 JS_ASSERT(Chunk::withinArenasRange(addr));
1096 return addr;
1097 }
1098
1099 Chunk *
1100 Cell::chunk() const
1101 {
1102 uintptr_t addr = uintptr_t(this);
1103 JS_ASSERT(addr % CellSize == 0);
1104 addr &= ~(ChunkSize - 1);
1105 return reinterpret_cast<Chunk *>(addr);
1106 }
1107
1108 inline bool
1109 InFreeList(ArenaHeader *aheader, void *thing)
1110 {
1111 if (!aheader->hasFreeThings())
1112 return false;
1113
1114 FreeSpan firstSpan(aheader->getFirstFreeSpan());
1115 uintptr_t addr = reinterpret_cast<uintptr_t>(thing);
1116
1117 for (const FreeSpan *span = &firstSpan;;) {
1118 /* If the thing comes before the current span, it's not free. */
1119 if (addr < span->first)
1120 return false;
1121
1122 /*
1123 * If we find it inside the span, it's dead. We use here "<=" and not
1124 * "<" even for the last span as we know that thing is inside the
1125 * arena. Thus, for the last span thing < span->end.
1126 */
1127 if (addr <= span->last)
1128 return true;
1129
1130 /*
1131 * The last possible empty span is an the end of the arena. Here
1132 * span->end < thing < thingsEnd and so we must have more spans.
1133 */
1134 span = span->nextSpan();
1135 }
1136 }
1137
1138 } /* namespace gc */
1139
1140 gc::AllocKind
1141 gc::Cell::tenuredGetAllocKind() const
1142 {
1143 return arenaHeader()->getAllocKind();
1144 }
1145
1146 } /* namespace js */
1147
1148 #endif /* gc_Heap_h */

mercurial