js/src/gc/Heap.h

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #ifndef gc_Heap_h
michael@0 8 #define gc_Heap_h
michael@0 9
michael@0 10 #include "mozilla/Attributes.h"
michael@0 11 #include "mozilla/PodOperations.h"
michael@0 12
michael@0 13 #include <stddef.h>
michael@0 14 #include <stdint.h>
michael@0 15
michael@0 16 #include "jspubtd.h"
michael@0 17 #include "jstypes.h"
michael@0 18 #include "jsutil.h"
michael@0 19
michael@0 20 #include "ds/BitArray.h"
michael@0 21 #include "gc/Memory.h"
michael@0 22 #include "js/HeapAPI.h"
michael@0 23
michael@0 24 struct JSCompartment;
michael@0 25
michael@0 26 struct JSRuntime;
michael@0 27
michael@0 28 namespace JS {
michael@0 29 namespace shadow {
michael@0 30 class Runtime;
michael@0 31 }
michael@0 32 }
michael@0 33
michael@0 34 namespace js {
michael@0 35
michael@0 36 class FreeOp;
michael@0 37
michael@0 38 namespace gc {
michael@0 39
michael@0 40 struct Arena;
michael@0 41 struct ArenaList;
michael@0 42 struct ArenaHeader;
michael@0 43 struct Chunk;
michael@0 44
michael@0 45 /*
michael@0 46 * This flag allows an allocation site to request a specific heap based upon the
michael@0 47 * estimated lifetime or lifetime requirements of objects allocated from that
michael@0 48 * site.
michael@0 49 */
michael@0 50 enum InitialHeap {
michael@0 51 DefaultHeap,
michael@0 52 TenuredHeap
michael@0 53 };
michael@0 54
michael@0 55 /* The GC allocation kinds. */
michael@0 56 enum AllocKind {
michael@0 57 FINALIZE_OBJECT0,
michael@0 58 FINALIZE_OBJECT0_BACKGROUND,
michael@0 59 FINALIZE_OBJECT2,
michael@0 60 FINALIZE_OBJECT2_BACKGROUND,
michael@0 61 FINALIZE_OBJECT4,
michael@0 62 FINALIZE_OBJECT4_BACKGROUND,
michael@0 63 FINALIZE_OBJECT8,
michael@0 64 FINALIZE_OBJECT8_BACKGROUND,
michael@0 65 FINALIZE_OBJECT12,
michael@0 66 FINALIZE_OBJECT12_BACKGROUND,
michael@0 67 FINALIZE_OBJECT16,
michael@0 68 FINALIZE_OBJECT16_BACKGROUND,
michael@0 69 FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND,
michael@0 70 FINALIZE_SCRIPT,
michael@0 71 FINALIZE_LAZY_SCRIPT,
michael@0 72 FINALIZE_SHAPE,
michael@0 73 FINALIZE_BASE_SHAPE,
michael@0 74 FINALIZE_TYPE_OBJECT,
michael@0 75 FINALIZE_FAT_INLINE_STRING,
michael@0 76 FINALIZE_STRING,
michael@0 77 FINALIZE_EXTERNAL_STRING,
michael@0 78 FINALIZE_JITCODE,
michael@0 79 FINALIZE_LAST = FINALIZE_JITCODE
michael@0 80 };
michael@0 81
michael@0 82 static const unsigned FINALIZE_LIMIT = FINALIZE_LAST + 1;
michael@0 83 static const unsigned FINALIZE_OBJECT_LIMIT = FINALIZE_OBJECT_LAST + 1;
michael@0 84
michael@0 85 /*
michael@0 86 * This must be an upper bound, but we do not need the least upper bound, so
michael@0 87 * we just exclude non-background objects.
michael@0 88 */
michael@0 89 static const size_t MAX_BACKGROUND_FINALIZE_KINDS = FINALIZE_LIMIT - FINALIZE_OBJECT_LIMIT / 2;
michael@0 90
michael@0 91 /*
michael@0 92 * A GC cell is the base class for all GC things.
michael@0 93 */
michael@0 94 struct Cell
michael@0 95 {
michael@0 96 public:
michael@0 97 inline ArenaHeader *arenaHeader() const;
michael@0 98 inline AllocKind tenuredGetAllocKind() const;
michael@0 99 MOZ_ALWAYS_INLINE bool isMarked(uint32_t color = BLACK) const;
michael@0 100 MOZ_ALWAYS_INLINE bool markIfUnmarked(uint32_t color = BLACK) const;
michael@0 101 MOZ_ALWAYS_INLINE void unmark(uint32_t color) const;
michael@0 102
michael@0 103 inline JSRuntime *runtimeFromMainThread() const;
michael@0 104 inline JS::shadow::Runtime *shadowRuntimeFromMainThread() const;
michael@0 105 inline JS::Zone *tenuredZone() const;
michael@0 106 inline JS::Zone *tenuredZoneFromAnyThread() const;
michael@0 107 inline bool tenuredIsInsideZone(JS::Zone *zone) const;
michael@0 108
michael@0 109 // Note: Unrestricted access to the runtime of a GC thing from an arbitrary
michael@0 110 // thread can easily lead to races. Use this method very carefully.
michael@0 111 inline JSRuntime *runtimeFromAnyThread() const;
michael@0 112 inline JS::shadow::Runtime *shadowRuntimeFromAnyThread() const;
michael@0 113
michael@0 114 #ifdef DEBUG
michael@0 115 inline bool isAligned() const;
michael@0 116 inline bool isTenured() const;
michael@0 117 #endif
michael@0 118
michael@0 119 protected:
michael@0 120 inline uintptr_t address() const;
michael@0 121 inline Chunk *chunk() const;
michael@0 122 };
michael@0 123
michael@0 124 /*
michael@0 125 * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
michael@0 126 * wastes space but allows to avoid expensive devisions by thing's size when
michael@0 127 * accessing the bitmap. In addition this allows to use some bits for colored
michael@0 128 * marking during the cycle GC.
michael@0 129 */
michael@0 130 const size_t ArenaCellCount = size_t(1) << (ArenaShift - CellShift);
michael@0 131 const size_t ArenaBitmapBits = ArenaCellCount;
michael@0 132 const size_t ArenaBitmapBytes = ArenaBitmapBits / 8;
michael@0 133 const size_t ArenaBitmapWords = ArenaBitmapBits / JS_BITS_PER_WORD;
michael@0 134
michael@0 135 /*
michael@0 136 * A FreeSpan represents a contiguous sequence of free cells in an Arena.
michael@0 137 * |first| is the address of the first free cell in the span. |last| is the
michael@0 138 * address of the last free cell in the span. This last cell holds a FreeSpan
michael@0 139 * data structure for the next span unless this is the last span on the list
michael@0 140 * of spans in the arena. For this last span |last| points to the last byte of
michael@0 141 * the last thing in the arena and no linkage is stored there, so
michael@0 142 * |last| == arenaStart + ArenaSize - 1. If the space at the arena end is
michael@0 143 * fully used this last span is empty and |first| == |last + 1|.
michael@0 144 *
michael@0 145 * Thus |first| < |last| implies that we have either the last span with at least
michael@0 146 * one element or that the span is not the last and contains at least 2
michael@0 147 * elements. In both cases to allocate a thing from this span we need simply
michael@0 148 * to increment |first| by the allocation size.
michael@0 149 *
michael@0 150 * |first| == |last| implies that we have a one element span that records the
michael@0 151 * next span. So to allocate from it we need to update the span list head
michael@0 152 * with a copy of the span stored at |last| address so the following
michael@0 153 * allocations will use that span.
michael@0 154 *
michael@0 155 * |first| > |last| implies that we have an empty last span and the arena is
michael@0 156 * fully used.
michael@0 157 *
michael@0 158 * Also only for the last span (|last| & 1)! = 0 as all allocation sizes are
michael@0 159 * multiples of CellSize.
michael@0 160 */
michael@0 161 struct FreeSpan
michael@0 162 {
michael@0 163 uintptr_t first;
michael@0 164 uintptr_t last;
michael@0 165
michael@0 166 public:
michael@0 167 FreeSpan() {}
michael@0 168
michael@0 169 FreeSpan(uintptr_t first, uintptr_t last)
michael@0 170 : first(first), last(last) {
michael@0 171 checkSpan();
michael@0 172 }
michael@0 173
michael@0 174 /*
michael@0 175 * To minimize the size of the arena header the first span is encoded
michael@0 176 * there as offsets from the arena start.
michael@0 177 */
michael@0 178 static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) {
michael@0 179 static_assert(ArenaShift < 16, "Check that we can pack offsets into uint16_t.");
michael@0 180 JS_ASSERT(firstOffset <= ArenaSize);
michael@0 181 JS_ASSERT(lastOffset < ArenaSize);
michael@0 182 JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1)));
michael@0 183 return firstOffset | (lastOffset << 16);
michael@0 184 }
michael@0 185
michael@0 186 /*
michael@0 187 * Encoded offsets for a full arena when its first span is the last one
michael@0 188 * and empty.
michael@0 189 */
michael@0 190 static const size_t FullArenaOffsets = ArenaSize | ((ArenaSize - 1) << 16);
michael@0 191
michael@0 192 static FreeSpan decodeOffsets(uintptr_t arenaAddr, size_t offsets) {
michael@0 193 JS_ASSERT(!(arenaAddr & ArenaMask));
michael@0 194
michael@0 195 size_t firstOffset = offsets & 0xFFFF;
michael@0 196 size_t lastOffset = offsets >> 16;
michael@0 197 JS_ASSERT(firstOffset <= ArenaSize);
michael@0 198 JS_ASSERT(lastOffset < ArenaSize);
michael@0 199
michael@0 200 /*
michael@0 201 * We must not use | when calculating first as firstOffset is
michael@0 202 * ArenaMask + 1 for the empty span.
michael@0 203 */
michael@0 204 return FreeSpan(arenaAddr + firstOffset, arenaAddr | lastOffset);
michael@0 205 }
michael@0 206
michael@0 207 void initAsEmpty(uintptr_t arenaAddr = 0) {
michael@0 208 JS_ASSERT(!(arenaAddr & ArenaMask));
michael@0 209 first = arenaAddr + ArenaSize;
michael@0 210 last = arenaAddr | (ArenaSize - 1);
michael@0 211 JS_ASSERT(isEmpty());
michael@0 212 }
michael@0 213
michael@0 214 bool isEmpty() const {
michael@0 215 checkSpan();
michael@0 216 return first > last;
michael@0 217 }
michael@0 218
michael@0 219 bool hasNext() const {
michael@0 220 checkSpan();
michael@0 221 return !(last & uintptr_t(1));
michael@0 222 }
michael@0 223
michael@0 224 const FreeSpan *nextSpan() const {
michael@0 225 JS_ASSERT(hasNext());
michael@0 226 return reinterpret_cast<FreeSpan *>(last);
michael@0 227 }
michael@0 228
michael@0 229 FreeSpan *nextSpanUnchecked(size_t thingSize) const {
michael@0 230 #ifdef DEBUG
michael@0 231 uintptr_t lastOffset = last & ArenaMask;
michael@0 232 JS_ASSERT(!(lastOffset & 1));
michael@0 233 JS_ASSERT((ArenaSize - lastOffset) % thingSize == 0);
michael@0 234 #endif
michael@0 235 return reinterpret_cast<FreeSpan *>(last);
michael@0 236 }
michael@0 237
michael@0 238 uintptr_t arenaAddressUnchecked() const {
michael@0 239 return last & ~ArenaMask;
michael@0 240 }
michael@0 241
michael@0 242 uintptr_t arenaAddress() const {
michael@0 243 checkSpan();
michael@0 244 return arenaAddressUnchecked();
michael@0 245 }
michael@0 246
michael@0 247 ArenaHeader *arenaHeader() const {
michael@0 248 return reinterpret_cast<ArenaHeader *>(arenaAddress());
michael@0 249 }
michael@0 250
michael@0 251 bool isSameNonEmptySpan(const FreeSpan *another) const {
michael@0 252 JS_ASSERT(!isEmpty());
michael@0 253 JS_ASSERT(!another->isEmpty());
michael@0 254 return first == another->first && last == another->last;
michael@0 255 }
michael@0 256
michael@0 257 bool isWithinArena(uintptr_t arenaAddr) const {
michael@0 258 JS_ASSERT(!(arenaAddr & ArenaMask));
michael@0 259
michael@0 260 /* Return true for the last empty span as well. */
michael@0 261 return arenaAddress() == arenaAddr;
michael@0 262 }
michael@0 263
michael@0 264 size_t encodeAsOffsets() const {
michael@0 265 /*
michael@0 266 * We must use first - arenaAddress(), not first & ArenaMask as
michael@0 267 * first == ArenaMask + 1 for an empty span.
michael@0 268 */
michael@0 269 uintptr_t arenaAddr = arenaAddress();
michael@0 270 return encodeOffsets(first - arenaAddr, last & ArenaMask);
michael@0 271 }
michael@0 272
michael@0 273 /* See comments before FreeSpan for details. */
michael@0 274 MOZ_ALWAYS_INLINE void *allocate(size_t thingSize) {
michael@0 275 JS_ASSERT(thingSize % CellSize == 0);
michael@0 276 checkSpan();
michael@0 277 uintptr_t thing = first;
michael@0 278 if (thing < last) {
michael@0 279 /* Bump-allocate from the current span. */
michael@0 280 first = thing + thingSize;
michael@0 281 } else if (MOZ_LIKELY(thing == last)) {
michael@0 282 /*
michael@0 283 * Move to the next span. We use MOZ_LIKELY as without PGO
michael@0 284 * compilers mis-predict == here as unlikely to succeed.
michael@0 285 */
michael@0 286 *this = *reinterpret_cast<FreeSpan *>(thing);
michael@0 287 } else {
michael@0 288 return nullptr;
michael@0 289 }
michael@0 290 checkSpan();
michael@0 291 JS_EXTRA_POISON(reinterpret_cast<void *>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
michael@0 292 return reinterpret_cast<void *>(thing);
michael@0 293 }
michael@0 294
michael@0 295 /* A version of allocate when we know that the span is not empty. */
michael@0 296 MOZ_ALWAYS_INLINE void *infallibleAllocate(size_t thingSize) {
michael@0 297 JS_ASSERT(thingSize % CellSize == 0);
michael@0 298 checkSpan();
michael@0 299 uintptr_t thing = first;
michael@0 300 if (thing < last) {
michael@0 301 first = thing + thingSize;
michael@0 302 } else {
michael@0 303 JS_ASSERT(thing == last);
michael@0 304 *this = *reinterpret_cast<FreeSpan *>(thing);
michael@0 305 }
michael@0 306 checkSpan();
michael@0 307 JS_EXTRA_POISON(reinterpret_cast<void *>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
michael@0 308 return reinterpret_cast<void *>(thing);
michael@0 309 }
michael@0 310
michael@0 311 /*
michael@0 312 * Allocate from a newly allocated arena. We do not move the free list
michael@0 313 * from the arena. Rather we set the arena up as fully used during the
michael@0 314 * initialization so to allocate we simply return the first thing in the
michael@0 315 * arena and set the free list to point to the second.
michael@0 316 */
michael@0 317 MOZ_ALWAYS_INLINE void *allocateFromNewArena(uintptr_t arenaAddr, size_t firstThingOffset,
michael@0 318 size_t thingSize) {
michael@0 319 JS_ASSERT(!(arenaAddr & ArenaMask));
michael@0 320 uintptr_t thing = arenaAddr | firstThingOffset;
michael@0 321 first = thing + thingSize;
michael@0 322 last = arenaAddr | ArenaMask;
michael@0 323 checkSpan();
michael@0 324 JS_EXTRA_POISON(reinterpret_cast<void *>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
michael@0 325 return reinterpret_cast<void *>(thing);
michael@0 326 }
michael@0 327
michael@0 328 void checkSpan() const {
michael@0 329 #ifdef DEBUG
michael@0 330 /* We do not allow spans at the end of the address space. */
michael@0 331 JS_ASSERT(last != uintptr_t(-1));
michael@0 332 JS_ASSERT(first);
michael@0 333 JS_ASSERT(last);
michael@0 334 JS_ASSERT(first - 1 <= last);
michael@0 335 uintptr_t arenaAddr = arenaAddressUnchecked();
michael@0 336 if (last & 1) {
michael@0 337 /* The span is the last. */
michael@0 338 JS_ASSERT((last & ArenaMask) == ArenaMask);
michael@0 339
michael@0 340 if (first - 1 == last) {
michael@0 341 /* The span is last and empty. The above start != 0 check
michael@0 342 * implies that we are not at the end of the address space.
michael@0 343 */
michael@0 344 return;
michael@0 345 }
michael@0 346 size_t spanLength = last - first + 1;
michael@0 347 JS_ASSERT(spanLength % CellSize == 0);
michael@0 348
michael@0 349 /* Start and end must belong to the same arena. */
michael@0 350 JS_ASSERT((first & ~ArenaMask) == arenaAddr);
michael@0 351 return;
michael@0 352 }
michael@0 353
michael@0 354 /* The span is not the last and we have more spans to follow. */
michael@0 355 JS_ASSERT(first <= last);
michael@0 356 size_t spanLengthWithoutOneThing = last - first;
michael@0 357 JS_ASSERT(spanLengthWithoutOneThing % CellSize == 0);
michael@0 358
michael@0 359 JS_ASSERT((first & ~ArenaMask) == arenaAddr);
michael@0 360
michael@0 361 /*
michael@0 362 * If there is not enough space before the arena end to allocate one
michael@0 363 * more thing, then the span must be marked as the last one to avoid
michael@0 364 * storing useless empty span reference.
michael@0 365 */
michael@0 366 size_t beforeTail = ArenaSize - (last & ArenaMask);
michael@0 367 JS_ASSERT(beforeTail >= sizeof(FreeSpan) + CellSize);
michael@0 368
michael@0 369 FreeSpan *next = reinterpret_cast<FreeSpan *>(last);
michael@0 370
michael@0 371 /*
michael@0 372 * The GC things on the list of free spans come from one arena
michael@0 373 * and the spans are linked in ascending address order with
michael@0 374 * at least one non-free thing between spans.
michael@0 375 */
michael@0 376 JS_ASSERT(last < next->first);
michael@0 377 JS_ASSERT(arenaAddr == next->arenaAddressUnchecked());
michael@0 378
michael@0 379 if (next->first > next->last) {
michael@0 380 /*
michael@0 381 * The next span is the empty span that terminates the list for
michael@0 382 * arenas that do not have any free things at the end.
michael@0 383 */
michael@0 384 JS_ASSERT(next->first - 1 == next->last);
michael@0 385 JS_ASSERT(arenaAddr + ArenaSize == next->first);
michael@0 386 }
michael@0 387 #endif
michael@0 388 }
michael@0 389
michael@0 390 };
michael@0 391
michael@0 392 /* Every arena has a header. */
michael@0 393 struct ArenaHeader : public JS::shadow::ArenaHeader
michael@0 394 {
michael@0 395 friend struct FreeLists;
michael@0 396
michael@0 397 /*
michael@0 398 * ArenaHeader::next has two purposes: when unallocated, it points to the
michael@0 399 * next available Arena's header. When allocated, it points to the next
michael@0 400 * arena of the same size class and compartment.
michael@0 401 */
michael@0 402 ArenaHeader *next;
michael@0 403
michael@0 404 private:
michael@0 405 /*
michael@0 406 * The first span of free things in the arena. We encode it as the start
michael@0 407 * and end offsets within the arena, not as FreeSpan structure, to
michael@0 408 * minimize the header size.
michael@0 409 */
michael@0 410 size_t firstFreeSpanOffsets;
michael@0 411
michael@0 412 /*
michael@0 413 * One of AllocKind constants or FINALIZE_LIMIT when the arena does not
michael@0 414 * contain any GC things and is on the list of empty arenas in the GC
michael@0 415 * chunk. The latter allows to quickly check if the arena is allocated
michael@0 416 * during the conservative GC scanning without searching the arena in the
michael@0 417 * list.
michael@0 418 *
michael@0 419 * We use 8 bits for the allocKind so the compiler can use byte-level memory
michael@0 420 * instructions to access it.
michael@0 421 */
michael@0 422 size_t allocKind : 8;
michael@0 423
michael@0 424 /*
michael@0 425 * When collecting we sometimes need to keep an auxillary list of arenas,
michael@0 426 * for which we use the following fields. This happens for several reasons:
michael@0 427 *
michael@0 428 * When recursive marking uses too much stack the marking is delayed and the
michael@0 429 * corresponding arenas are put into a stack. To distinguish the bottom of
michael@0 430 * the stack from the arenas not present in the stack we use the
michael@0 431 * markOverflow flag to tag arenas on the stack.
michael@0 432 *
michael@0 433 * Delayed marking is also used for arenas that we allocate into during an
michael@0 434 * incremental GC. In this case, we intend to mark all the objects in the
michael@0 435 * arena, and it's faster to do this marking in bulk.
michael@0 436 *
michael@0 437 * When sweeping we keep track of which arenas have been allocated since the
michael@0 438 * end of the mark phase. This allows us to tell whether a pointer to an
michael@0 439 * unmarked object is yet to be finalized or has already been reallocated.
michael@0 440 * We set the allocatedDuringIncremental flag for this and clear it at the
michael@0 441 * end of the sweep phase.
michael@0 442 *
michael@0 443 * To minimize the ArenaHeader size we record the next linkage as
michael@0 444 * arenaAddress() >> ArenaShift and pack it with the allocKind field and the
michael@0 445 * flags.
michael@0 446 */
michael@0 447 public:
michael@0 448 size_t hasDelayedMarking : 1;
michael@0 449 size_t allocatedDuringIncremental : 1;
michael@0 450 size_t markOverflow : 1;
michael@0 451 size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
michael@0 452 static_assert(ArenaShift >= 8 + 1 + 1 + 1,
michael@0 453 "ArenaHeader::auxNextLink packing assumes that ArenaShift has enough bits to "
michael@0 454 "cover allocKind and hasDelayedMarking.");
michael@0 455
michael@0 456 inline uintptr_t address() const;
michael@0 457 inline Chunk *chunk() const;
michael@0 458
michael@0 459 bool allocated() const {
michael@0 460 JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT));
michael@0 461 return allocKind < size_t(FINALIZE_LIMIT);
michael@0 462 }
michael@0 463
michael@0 464 void init(JS::Zone *zoneArg, AllocKind kind) {
michael@0 465 JS_ASSERT(!allocated());
michael@0 466 JS_ASSERT(!markOverflow);
michael@0 467 JS_ASSERT(!allocatedDuringIncremental);
michael@0 468 JS_ASSERT(!hasDelayedMarking);
michael@0 469 zone = zoneArg;
michael@0 470
michael@0 471 static_assert(FINALIZE_LIMIT <= 255, "We must be able to fit the allockind into uint8_t.");
michael@0 472 allocKind = size_t(kind);
michael@0 473
michael@0 474 /* See comments in FreeSpan::allocateFromNewArena. */
michael@0 475 firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
michael@0 476 }
michael@0 477
michael@0 478 void setAsNotAllocated() {
michael@0 479 allocKind = size_t(FINALIZE_LIMIT);
michael@0 480 markOverflow = 0;
michael@0 481 allocatedDuringIncremental = 0;
michael@0 482 hasDelayedMarking = 0;
michael@0 483 auxNextLink = 0;
michael@0 484 }
michael@0 485
michael@0 486 inline uintptr_t arenaAddress() const;
michael@0 487 inline Arena *getArena();
michael@0 488
michael@0 489 AllocKind getAllocKind() const {
michael@0 490 JS_ASSERT(allocated());
michael@0 491 return AllocKind(allocKind);
michael@0 492 }
michael@0 493
michael@0 494 inline size_t getThingSize() const;
michael@0 495
michael@0 496 bool hasFreeThings() const {
michael@0 497 return firstFreeSpanOffsets != FreeSpan::FullArenaOffsets;
michael@0 498 }
michael@0 499
michael@0 500 inline bool isEmpty() const;
michael@0 501
michael@0 502 void setAsFullyUsed() {
michael@0 503 firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
michael@0 504 }
michael@0 505
michael@0 506 inline FreeSpan getFirstFreeSpan() const;
michael@0 507 inline void setFirstFreeSpan(const FreeSpan *span);
michael@0 508
michael@0 509 #ifdef DEBUG
michael@0 510 void checkSynchronizedWithFreeList() const;
michael@0 511 #endif
michael@0 512
michael@0 513 inline ArenaHeader *getNextDelayedMarking() const;
michael@0 514 inline void setNextDelayedMarking(ArenaHeader *aheader);
michael@0 515 inline void unsetDelayedMarking();
michael@0 516
michael@0 517 inline ArenaHeader *getNextAllocDuringSweep() const;
michael@0 518 inline void setNextAllocDuringSweep(ArenaHeader *aheader);
michael@0 519 inline void unsetAllocDuringSweep();
michael@0 520 };
michael@0 521
michael@0 522 struct Arena
michael@0 523 {
michael@0 524 /*
michael@0 525 * Layout of an arena:
michael@0 526 * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader
michael@0 527 * descriptor followed by some pad bytes. The remainder of the arena is
michael@0 528 * filled with the array of T things. The pad bytes ensure that the thing
michael@0 529 * array ends exactly at the end of the arena.
michael@0 530 *
michael@0 531 * +-------------+-----+----+----+-----+----+
michael@0 532 * | ArenaHeader | pad | T0 | T1 | ... | Tn |
michael@0 533 * +-------------+-----+----+----+-----+----+
michael@0 534 *
michael@0 535 * <----------------------------------------> = ArenaSize bytes
michael@0 536 * <-------------------> = first thing offset
michael@0 537 */
michael@0 538 ArenaHeader aheader;
michael@0 539 uint8_t data[ArenaSize - sizeof(ArenaHeader)];
michael@0 540
michael@0 541 private:
michael@0 542 static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
michael@0 543 static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
michael@0 544
michael@0 545 public:
michael@0 546 static void staticAsserts();
michael@0 547
michael@0 548 static size_t thingSize(AllocKind kind) {
michael@0 549 return ThingSizes[kind];
michael@0 550 }
michael@0 551
michael@0 552 static size_t firstThingOffset(AllocKind kind) {
michael@0 553 return FirstThingOffsets[kind];
michael@0 554 }
michael@0 555
michael@0 556 static size_t thingsPerArena(size_t thingSize) {
michael@0 557 JS_ASSERT(thingSize % CellSize == 0);
michael@0 558
michael@0 559 /* We should be able to fit FreeSpan in any GC thing. */
michael@0 560 JS_ASSERT(thingSize >= sizeof(FreeSpan));
michael@0 561
michael@0 562 return (ArenaSize - sizeof(ArenaHeader)) / thingSize;
michael@0 563 }
michael@0 564
michael@0 565 static size_t thingsSpan(size_t thingSize) {
michael@0 566 return thingsPerArena(thingSize) * thingSize;
michael@0 567 }
michael@0 568
michael@0 569 static bool isAligned(uintptr_t thing, size_t thingSize) {
michael@0 570 /* Things ends at the arena end. */
michael@0 571 uintptr_t tailOffset = (ArenaSize - thing) & ArenaMask;
michael@0 572 return tailOffset % thingSize == 0;
michael@0 573 }
michael@0 574
michael@0 575 uintptr_t address() const {
michael@0 576 return aheader.address();
michael@0 577 }
michael@0 578
michael@0 579 uintptr_t thingsStart(AllocKind thingKind) {
michael@0 580 return address() | firstThingOffset(thingKind);
michael@0 581 }
michael@0 582
michael@0 583 uintptr_t thingsEnd() {
michael@0 584 return address() + ArenaSize;
michael@0 585 }
michael@0 586
michael@0 587 void setAsFullyUnused(AllocKind thingKind);
michael@0 588
michael@0 589 template <typename T>
michael@0 590 bool finalize(FreeOp *fop, AllocKind thingKind, size_t thingSize);
michael@0 591 };
michael@0 592
michael@0 593 static_assert(sizeof(Arena) == ArenaSize, "The hardcoded arena size must match the struct size.");
michael@0 594
michael@0 595 inline size_t
michael@0 596 ArenaHeader::getThingSize() const
michael@0 597 {
michael@0 598 JS_ASSERT(allocated());
michael@0 599 return Arena::thingSize(getAllocKind());
michael@0 600 }
michael@0 601
michael@0 602 /*
michael@0 603 * The tail of the chunk info is shared between all chunks in the system, both
michael@0 604 * nursery and tenured. This structure is locatable from any GC pointer by
michael@0 605 * aligning to 1MiB.
michael@0 606 */
michael@0 607 struct ChunkTrailer
michael@0 608 {
michael@0 609 /* The index the chunk in the nursery, or LocationTenuredHeap. */
michael@0 610 uint32_t location;
michael@0 611
michael@0 612 #if JS_BITS_PER_WORD == 64
michael@0 613 uint32_t padding;
michael@0 614 #endif
michael@0 615
michael@0 616 JSRuntime *runtime;
michael@0 617 };
michael@0 618
michael@0 619 static_assert(sizeof(ChunkTrailer) == 2 * sizeof(uintptr_t), "ChunkTrailer size is incorrect.");
michael@0 620
michael@0 621 /* The chunk header (located at the end of the chunk to preserve arena alignment). */
michael@0 622 struct ChunkInfo
michael@0 623 {
michael@0 624 Chunk *next;
michael@0 625 Chunk **prevp;
michael@0 626
michael@0 627 /* Free arenas are linked together with aheader.next. */
michael@0 628 ArenaHeader *freeArenasHead;
michael@0 629
michael@0 630 #if JS_BITS_PER_WORD == 32
michael@0 631 /*
michael@0 632 * Calculating sizes and offsets is simpler if sizeof(ChunkInfo) is
michael@0 633 * architecture-independent.
michael@0 634 */
michael@0 635 char padding[20];
michael@0 636 #endif
michael@0 637
michael@0 638 /*
michael@0 639 * Decommitted arenas are tracked by a bitmap in the chunk header. We use
michael@0 640 * this offset to start our search iteration close to a decommitted arena
michael@0 641 * that we can allocate.
michael@0 642 */
michael@0 643 uint32_t lastDecommittedArenaOffset;
michael@0 644
michael@0 645 /* Number of free arenas, either committed or decommitted. */
michael@0 646 uint32_t numArenasFree;
michael@0 647
michael@0 648 /* Number of free, committed arenas. */
michael@0 649 uint32_t numArenasFreeCommitted;
michael@0 650
michael@0 651 /* Number of GC cycles this chunk has survived. */
michael@0 652 uint32_t age;
michael@0 653
michael@0 654 /* Information shared by all Chunk types. */
michael@0 655 ChunkTrailer trailer;
michael@0 656 };
michael@0 657
michael@0 658 /*
michael@0 659 * Calculating ArenasPerChunk:
michael@0 660 *
michael@0 661 * In order to figure out how many Arenas will fit in a chunk, we need to know
michael@0 662 * how much extra space is available after we allocate the header data. This
michael@0 663 * is a problem because the header size depends on the number of arenas in the
michael@0 664 * chunk. The two dependent fields are bitmap and decommittedArenas.
michael@0 665 *
michael@0 666 * For the mark bitmap, we know that each arena will use a fixed number of full
michael@0 667 * bytes: ArenaBitmapBytes. The full size of the header data is this number
michael@0 668 * multiplied by the eventual number of arenas we have in the header. We,
michael@0 669 * conceptually, distribute this header data among the individual arenas and do
michael@0 670 * not include it in the header. This way we do not have to worry about its
michael@0 671 * variable size: it gets attached to the variable number we are computing.
michael@0 672 *
michael@0 673 * For the decommitted arena bitmap, we only have 1 bit per arena, so this
michael@0 674 * technique will not work. Instead, we observe that we do not have enough
michael@0 675 * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
michael@0 676 * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
michael@0 677 * This will not become 63 bytes unless we double the data required in the
michael@0 678 * header. Therefore, we just compute the number of bytes required to track
michael@0 679 * every possible arena and do not worry about slop bits, since there are too
michael@0 680 * few to usefully allocate.
michael@0 681 *
michael@0 682 * To actually compute the number of arenas we can allocate in a chunk, we
michael@0 683 * divide the amount of available space less the header info (not including
michael@0 684 * the mark bitmap which is distributed into the arena size) by the size of
michael@0 685 * the arena (with the mark bitmap bytes it uses).
michael@0 686 */
michael@0 687 const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
michael@0 688 const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
michael@0 689 const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
michael@0 690 const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
michael@0 691 static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density.");
michael@0 692
michael@0 693 /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
michael@0 694 struct ChunkBitmap
michael@0 695 {
michael@0 696 volatile uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
michael@0 697
michael@0 698 public:
michael@0 699 ChunkBitmap() { }
michael@0 700
michael@0 701 MOZ_ALWAYS_INLINE void getMarkWordAndMask(const Cell *cell, uint32_t color,
michael@0 702 uintptr_t **wordp, uintptr_t *maskp)
michael@0 703 {
michael@0 704 GetGCThingMarkWordAndMask(cell, color, wordp, maskp);
michael@0 705 }
michael@0 706
michael@0 707 MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarked(const Cell *cell, uint32_t color) {
michael@0 708 uintptr_t *word, mask;
michael@0 709 getMarkWordAndMask(cell, color, &word, &mask);
michael@0 710 return *word & mask;
michael@0 711 }
michael@0 712
michael@0 713 MOZ_ALWAYS_INLINE bool markIfUnmarked(const Cell *cell, uint32_t color) {
michael@0 714 uintptr_t *word, mask;
michael@0 715 getMarkWordAndMask(cell, BLACK, &word, &mask);
michael@0 716 if (*word & mask)
michael@0 717 return false;
michael@0 718 *word |= mask;
michael@0 719 if (color != BLACK) {
michael@0 720 /*
michael@0 721 * We use getMarkWordAndMask to recalculate both mask and word as
michael@0 722 * doing just mask << color may overflow the mask.
michael@0 723 */
michael@0 724 getMarkWordAndMask(cell, color, &word, &mask);
michael@0 725 if (*word & mask)
michael@0 726 return false;
michael@0 727 *word |= mask;
michael@0 728 }
michael@0 729 return true;
michael@0 730 }
michael@0 731
michael@0 732 MOZ_ALWAYS_INLINE void unmark(const Cell *cell, uint32_t color) {
michael@0 733 uintptr_t *word, mask;
michael@0 734 getMarkWordAndMask(cell, color, &word, &mask);
michael@0 735 *word &= ~mask;
michael@0 736 }
michael@0 737
michael@0 738 void clear() {
michael@0 739 memset((void *)bitmap, 0, sizeof(bitmap));
michael@0 740 }
michael@0 741
michael@0 742 uintptr_t *arenaBits(ArenaHeader *aheader) {
michael@0 743 static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
michael@0 744 "We assume that the part of the bitmap corresponding to the arena "
michael@0 745 "has the exact number of words so we do not need to deal with a word "
michael@0 746 "that covers bits from two arenas.");
michael@0 747
michael@0 748 uintptr_t *word, unused;
michael@0 749 getMarkWordAndMask(reinterpret_cast<Cell *>(aheader->address()), BLACK, &word, &unused);
michael@0 750 return word;
michael@0 751 }
michael@0 752 };
michael@0 753
michael@0 754 static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap),
michael@0 755 "Ensure our ChunkBitmap actually covers all arenas.");
michael@0 756 static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk,
michael@0 757 "Ensure that the mark bitmap has the right number of bits.");
michael@0 758
michael@0 759 typedef BitArray<ArenasPerChunk> PerArenaBitmap;
michael@0 760
michael@0 761 const size_t ChunkPadSize = ChunkSize
michael@0 762 - (sizeof(Arena) * ArenasPerChunk)
michael@0 763 - sizeof(ChunkBitmap)
michael@0 764 - sizeof(PerArenaBitmap)
michael@0 765 - sizeof(ChunkInfo);
michael@0 766 static_assert(ChunkPadSize < BytesPerArenaWithHeader,
michael@0 767 "If the chunk padding is larger than an arena, we should have one more arena.");
michael@0 768
michael@0 769 /*
michael@0 770 * Chunks contain arenas and associated data structures (mark bitmap, delayed
michael@0 771 * marking state).
michael@0 772 */
michael@0 773 struct Chunk
michael@0 774 {
michael@0 775 Arena arenas[ArenasPerChunk];
michael@0 776
michael@0 777 /* Pad to full size to ensure cache alignment of ChunkInfo. */
michael@0 778 uint8_t padding[ChunkPadSize];
michael@0 779
michael@0 780 ChunkBitmap bitmap;
michael@0 781 PerArenaBitmap decommittedArenas;
michael@0 782 ChunkInfo info;
michael@0 783
michael@0 784 static Chunk *fromAddress(uintptr_t addr) {
michael@0 785 addr &= ~ChunkMask;
michael@0 786 return reinterpret_cast<Chunk *>(addr);
michael@0 787 }
michael@0 788
michael@0 789 static bool withinArenasRange(uintptr_t addr) {
michael@0 790 uintptr_t offset = addr & ChunkMask;
michael@0 791 return offset < ArenasPerChunk * ArenaSize;
michael@0 792 }
michael@0 793
michael@0 794 static size_t arenaIndex(uintptr_t addr) {
michael@0 795 JS_ASSERT(withinArenasRange(addr));
michael@0 796 return (addr & ChunkMask) >> ArenaShift;
michael@0 797 }
michael@0 798
michael@0 799 uintptr_t address() const {
michael@0 800 uintptr_t addr = reinterpret_cast<uintptr_t>(this);
michael@0 801 JS_ASSERT(!(addr & ChunkMask));
michael@0 802 return addr;
michael@0 803 }
michael@0 804
michael@0 805 bool unused() const {
michael@0 806 return info.numArenasFree == ArenasPerChunk;
michael@0 807 }
michael@0 808
michael@0 809 bool hasAvailableArenas() const {
michael@0 810 return info.numArenasFree != 0;
michael@0 811 }
michael@0 812
michael@0 813 inline void addToAvailableList(JS::Zone *zone);
michael@0 814 inline void insertToAvailableList(Chunk **insertPoint);
michael@0 815 inline void removeFromAvailableList();
michael@0 816
michael@0 817 ArenaHeader *allocateArena(JS::Zone *zone, AllocKind kind);
michael@0 818
michael@0 819 void releaseArena(ArenaHeader *aheader);
michael@0 820 void recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind);
michael@0 821
michael@0 822 static Chunk *allocate(JSRuntime *rt);
michael@0 823
michael@0 824 void decommitAllArenas(JSRuntime *rt) {
michael@0 825 decommittedArenas.clear(true);
michael@0 826 MarkPagesUnused(rt, &arenas[0], ArenasPerChunk * ArenaSize);
michael@0 827
michael@0 828 info.freeArenasHead = nullptr;
michael@0 829 info.lastDecommittedArenaOffset = 0;
michael@0 830 info.numArenasFree = ArenasPerChunk;
michael@0 831 info.numArenasFreeCommitted = 0;
michael@0 832 }
michael@0 833
michael@0 834 /* Must be called with the GC lock taken. */
michael@0 835 static inline void release(JSRuntime *rt, Chunk *chunk);
michael@0 836 static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
michael@0 837
michael@0 838 /* Must be called with the GC lock taken. */
michael@0 839 inline void prepareToBeFreed(JSRuntime *rt);
michael@0 840
michael@0 841 /*
michael@0 842 * Assuming that the info.prevp points to the next field of the previous
michael@0 843 * chunk in a doubly-linked list, get that chunk.
michael@0 844 */
michael@0 845 Chunk *getPrevious() {
michael@0 846 JS_ASSERT(info.prevp);
michael@0 847 return fromPointerToNext(info.prevp);
michael@0 848 }
michael@0 849
michael@0 850 /* Get the chunk from a pointer to its info.next field. */
michael@0 851 static Chunk *fromPointerToNext(Chunk **nextFieldPtr) {
michael@0 852 uintptr_t addr = reinterpret_cast<uintptr_t>(nextFieldPtr);
michael@0 853 JS_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next));
michael@0 854 return reinterpret_cast<Chunk *>(addr - offsetof(Chunk, info.next));
michael@0 855 }
michael@0 856
michael@0 857 private:
michael@0 858 inline void init(JSRuntime *rt);
michael@0 859
michael@0 860 /* Search for a decommitted arena to allocate. */
michael@0 861 unsigned findDecommittedArenaOffset();
michael@0 862 ArenaHeader* fetchNextDecommittedArena();
michael@0 863
michael@0 864 public:
michael@0 865 /* Unlink and return the freeArenasHead. */
michael@0 866 inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt);
michael@0 867
michael@0 868 inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
michael@0 869 };
michael@0 870
michael@0 871 static_assert(sizeof(Chunk) == ChunkSize,
michael@0 872 "Ensure the hardcoded chunk size definition actually matches the struct.");
michael@0 873 static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
michael@0 874 "The hardcoded API bitmap offset must match the actual offset.");
michael@0 875 static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) +
michael@0 876 offsetof(ChunkInfo, trailer) +
michael@0 877 offsetof(ChunkTrailer, runtime),
michael@0 878 "The hardcoded API runtime offset must match the actual offset.");
michael@0 879
michael@0 880 inline uintptr_t
michael@0 881 ArenaHeader::address() const
michael@0 882 {
michael@0 883 uintptr_t addr = reinterpret_cast<uintptr_t>(this);
michael@0 884 JS_ASSERT(!(addr & ArenaMask));
michael@0 885 JS_ASSERT(Chunk::withinArenasRange(addr));
michael@0 886 return addr;
michael@0 887 }
michael@0 888
michael@0 889 inline Chunk *
michael@0 890 ArenaHeader::chunk() const
michael@0 891 {
michael@0 892 return Chunk::fromAddress(address());
michael@0 893 }
michael@0 894
michael@0 895 inline uintptr_t
michael@0 896 ArenaHeader::arenaAddress() const
michael@0 897 {
michael@0 898 return address();
michael@0 899 }
michael@0 900
michael@0 901 inline Arena *
michael@0 902 ArenaHeader::getArena()
michael@0 903 {
michael@0 904 return reinterpret_cast<Arena *>(arenaAddress());
michael@0 905 }
michael@0 906
michael@0 907 inline bool
michael@0 908 ArenaHeader::isEmpty() const
michael@0 909 {
michael@0 910 /* Arena is empty if its first span covers the whole arena. */
michael@0 911 JS_ASSERT(allocated());
michael@0 912 size_t firstThingOffset = Arena::firstThingOffset(getAllocKind());
michael@0 913 return firstFreeSpanOffsets == FreeSpan::encodeOffsets(firstThingOffset, ArenaMask);
michael@0 914 }
michael@0 915
michael@0 916 FreeSpan
michael@0 917 ArenaHeader::getFirstFreeSpan() const
michael@0 918 {
michael@0 919 #ifdef DEBUG
michael@0 920 checkSynchronizedWithFreeList();
michael@0 921 #endif
michael@0 922 return FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
michael@0 923 }
michael@0 924
michael@0 925 void
michael@0 926 ArenaHeader::setFirstFreeSpan(const FreeSpan *span)
michael@0 927 {
michael@0 928 JS_ASSERT(span->isWithinArena(arenaAddress()));
michael@0 929 firstFreeSpanOffsets = span->encodeAsOffsets();
michael@0 930 }
michael@0 931
michael@0 932 inline ArenaHeader *
michael@0 933 ArenaHeader::getNextDelayedMarking() const
michael@0 934 {
michael@0 935 JS_ASSERT(hasDelayedMarking);
michael@0 936 return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
michael@0 937 }
michael@0 938
michael@0 939 inline void
michael@0 940 ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
michael@0 941 {
michael@0 942 JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
michael@0 943 JS_ASSERT(!auxNextLink && !hasDelayedMarking);
michael@0 944 hasDelayedMarking = 1;
michael@0 945 auxNextLink = aheader->arenaAddress() >> ArenaShift;
michael@0 946 }
michael@0 947
michael@0 948 inline void
michael@0 949 ArenaHeader::unsetDelayedMarking()
michael@0 950 {
michael@0 951 JS_ASSERT(hasDelayedMarking);
michael@0 952 hasDelayedMarking = 0;
michael@0 953 auxNextLink = 0;
michael@0 954 }
michael@0 955
michael@0 956 inline ArenaHeader *
michael@0 957 ArenaHeader::getNextAllocDuringSweep() const
michael@0 958 {
michael@0 959 JS_ASSERT(allocatedDuringIncremental);
michael@0 960 return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
michael@0 961 }
michael@0 962
michael@0 963 inline void
michael@0 964 ArenaHeader::setNextAllocDuringSweep(ArenaHeader *aheader)
michael@0 965 {
michael@0 966 JS_ASSERT(!auxNextLink && !allocatedDuringIncremental);
michael@0 967 allocatedDuringIncremental = 1;
michael@0 968 auxNextLink = aheader->arenaAddress() >> ArenaShift;
michael@0 969 }
michael@0 970
michael@0 971 inline void
michael@0 972 ArenaHeader::unsetAllocDuringSweep()
michael@0 973 {
michael@0 974 JS_ASSERT(allocatedDuringIncremental);
michael@0 975 allocatedDuringIncremental = 0;
michael@0 976 auxNextLink = 0;
michael@0 977 }
michael@0 978
michael@0 979 static void
michael@0 980 AssertValidColor(const void *thing, uint32_t color)
michael@0 981 {
michael@0 982 #ifdef DEBUG
michael@0 983 ArenaHeader *aheader = reinterpret_cast<const Cell *>(thing)->arenaHeader();
michael@0 984 JS_ASSERT(color < aheader->getThingSize() / CellSize);
michael@0 985 #endif
michael@0 986 }
michael@0 987
michael@0 988 inline ArenaHeader *
michael@0 989 Cell::arenaHeader() const
michael@0 990 {
michael@0 991 JS_ASSERT(isTenured());
michael@0 992 uintptr_t addr = address();
michael@0 993 addr &= ~ArenaMask;
michael@0 994 return reinterpret_cast<ArenaHeader *>(addr);
michael@0 995 }
michael@0 996
michael@0 997 inline JSRuntime *
michael@0 998 Cell::runtimeFromMainThread() const
michael@0 999 {
michael@0 1000 JSRuntime *rt = chunk()->info.trailer.runtime;
michael@0 1001 JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
michael@0 1002 return rt;
michael@0 1003 }
michael@0 1004
michael@0 1005 inline JS::shadow::Runtime *
michael@0 1006 Cell::shadowRuntimeFromMainThread() const
michael@0 1007 {
michael@0 1008 return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromMainThread());
michael@0 1009 }
michael@0 1010
michael@0 1011 inline JSRuntime *
michael@0 1012 Cell::runtimeFromAnyThread() const
michael@0 1013 {
michael@0 1014 return chunk()->info.trailer.runtime;
michael@0 1015 }
michael@0 1016
michael@0 1017 inline JS::shadow::Runtime *
michael@0 1018 Cell::shadowRuntimeFromAnyThread() const
michael@0 1019 {
michael@0 1020 return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread());
michael@0 1021 }
michael@0 1022
michael@0 1023 bool
michael@0 1024 Cell::isMarked(uint32_t color /* = BLACK */) const
michael@0 1025 {
michael@0 1026 JS_ASSERT(isTenured());
michael@0 1027 JS_ASSERT(arenaHeader()->allocated());
michael@0 1028 AssertValidColor(this, color);
michael@0 1029 return chunk()->bitmap.isMarked(this, color);
michael@0 1030 }
michael@0 1031
michael@0 1032 bool
michael@0 1033 Cell::markIfUnmarked(uint32_t color /* = BLACK */) const
michael@0 1034 {
michael@0 1035 JS_ASSERT(isTenured());
michael@0 1036 AssertValidColor(this, color);
michael@0 1037 return chunk()->bitmap.markIfUnmarked(this, color);
michael@0 1038 }
michael@0 1039
michael@0 1040 void
michael@0 1041 Cell::unmark(uint32_t color) const
michael@0 1042 {
michael@0 1043 JS_ASSERT(isTenured());
michael@0 1044 JS_ASSERT(color != BLACK);
michael@0 1045 AssertValidColor(this, color);
michael@0 1046 chunk()->bitmap.unmark(this, color);
michael@0 1047 }
michael@0 1048
michael@0 1049 JS::Zone *
michael@0 1050 Cell::tenuredZone() const
michael@0 1051 {
michael@0 1052 JS::Zone *zone = arenaHeader()->zone;
michael@0 1053 JS_ASSERT(CurrentThreadCanAccessZone(zone));
michael@0 1054 JS_ASSERT(isTenured());
michael@0 1055 return zone;
michael@0 1056 }
michael@0 1057
michael@0 1058 JS::Zone *
michael@0 1059 Cell::tenuredZoneFromAnyThread() const
michael@0 1060 {
michael@0 1061 JS_ASSERT(isTenured());
michael@0 1062 return arenaHeader()->zone;
michael@0 1063 }
michael@0 1064
michael@0 1065 bool
michael@0 1066 Cell::tenuredIsInsideZone(JS::Zone *zone) const
michael@0 1067 {
michael@0 1068 JS_ASSERT(isTenured());
michael@0 1069 return zone == arenaHeader()->zone;
michael@0 1070 }
michael@0 1071
michael@0 1072 #ifdef DEBUG
michael@0 1073 bool
michael@0 1074 Cell::isAligned() const
michael@0 1075 {
michael@0 1076 return Arena::isAligned(address(), arenaHeader()->getThingSize());
michael@0 1077 }
michael@0 1078
michael@0 1079 bool
michael@0 1080 Cell::isTenured() const
michael@0 1081 {
michael@0 1082 #ifdef JSGC_GENERATIONAL
michael@0 1083 JS::shadow::Runtime *rt = js::gc::GetGCThingRuntime(this);
michael@0 1084 return !IsInsideNursery(rt, this);
michael@0 1085 #endif
michael@0 1086 return true;
michael@0 1087 }
michael@0 1088 #endif
michael@0 1089
michael@0 1090 inline uintptr_t
michael@0 1091 Cell::address() const
michael@0 1092 {
michael@0 1093 uintptr_t addr = uintptr_t(this);
michael@0 1094 JS_ASSERT(addr % CellSize == 0);
michael@0 1095 JS_ASSERT(Chunk::withinArenasRange(addr));
michael@0 1096 return addr;
michael@0 1097 }
michael@0 1098
michael@0 1099 Chunk *
michael@0 1100 Cell::chunk() const
michael@0 1101 {
michael@0 1102 uintptr_t addr = uintptr_t(this);
michael@0 1103 JS_ASSERT(addr % CellSize == 0);
michael@0 1104 addr &= ~(ChunkSize - 1);
michael@0 1105 return reinterpret_cast<Chunk *>(addr);
michael@0 1106 }
michael@0 1107
michael@0 1108 inline bool
michael@0 1109 InFreeList(ArenaHeader *aheader, void *thing)
michael@0 1110 {
michael@0 1111 if (!aheader->hasFreeThings())
michael@0 1112 return false;
michael@0 1113
michael@0 1114 FreeSpan firstSpan(aheader->getFirstFreeSpan());
michael@0 1115 uintptr_t addr = reinterpret_cast<uintptr_t>(thing);
michael@0 1116
michael@0 1117 for (const FreeSpan *span = &firstSpan;;) {
michael@0 1118 /* If the thing comes before the current span, it's not free. */
michael@0 1119 if (addr < span->first)
michael@0 1120 return false;
michael@0 1121
michael@0 1122 /*
michael@0 1123 * If we find it inside the span, it's dead. We use here "<=" and not
michael@0 1124 * "<" even for the last span as we know that thing is inside the
michael@0 1125 * arena. Thus, for the last span thing < span->end.
michael@0 1126 */
michael@0 1127 if (addr <= span->last)
michael@0 1128 return true;
michael@0 1129
michael@0 1130 /*
michael@0 1131 * The last possible empty span is an the end of the arena. Here
michael@0 1132 * span->end < thing < thingsEnd and so we must have more spans.
michael@0 1133 */
michael@0 1134 span = span->nextSpan();
michael@0 1135 }
michael@0 1136 }
michael@0 1137
michael@0 1138 } /* namespace gc */
michael@0 1139
michael@0 1140 gc::AllocKind
michael@0 1141 gc::Cell::tenuredGetAllocKind() const
michael@0 1142 {
michael@0 1143 return arenaHeader()->getAllocKind();
michael@0 1144 }
michael@0 1145
michael@0 1146 } /* namespace js */
michael@0 1147
michael@0 1148 #endif /* gc_Heap_h */

mercurial