js/src/jit/JitCompartment.h

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #ifndef jit_JitCompartment_h
michael@0 8 #define jit_JitCompartment_h
michael@0 9
michael@0 10 #ifdef JS_ION
michael@0 11
michael@0 12 #include "mozilla/MemoryReporting.h"
michael@0 13
michael@0 14 #include "jsweakcache.h"
michael@0 15
michael@0 16 #include "jit/CompileInfo.h"
michael@0 17 #include "jit/IonCode.h"
michael@0 18 #include "jit/IonFrames.h"
michael@0 19 #include "jit/shared/Assembler-shared.h"
michael@0 20 #include "js/Value.h"
michael@0 21 #include "vm/Stack.h"
michael@0 22
michael@0 23 namespace js {
michael@0 24 namespace jit {
michael@0 25
michael@0 26 class FrameSizeClass;
michael@0 27
michael@0 28 enum EnterJitType {
michael@0 29 EnterJitBaseline = 0,
michael@0 30 EnterJitOptimized = 1
michael@0 31 };
michael@0 32
michael@0 33 struct EnterJitData
michael@0 34 {
michael@0 35 explicit EnterJitData(JSContext *cx)
michael@0 36 : scopeChain(cx),
michael@0 37 result(cx)
michael@0 38 {}
michael@0 39
michael@0 40 uint8_t *jitcode;
michael@0 41 InterpreterFrame *osrFrame;
michael@0 42
michael@0 43 void *calleeToken;
michael@0 44
michael@0 45 Value *maxArgv;
michael@0 46 unsigned maxArgc;
michael@0 47 unsigned numActualArgs;
michael@0 48 unsigned osrNumStackValues;
michael@0 49
michael@0 50 RootedObject scopeChain;
michael@0 51 RootedValue result;
michael@0 52
michael@0 53 bool constructing;
michael@0 54 };
michael@0 55
michael@0 56 typedef void (*EnterJitCode)(void *code, unsigned argc, Value *argv, InterpreterFrame *fp,
michael@0 57 CalleeToken calleeToken, JSObject *scopeChain,
michael@0 58 size_t numStackValues, Value *vp);
michael@0 59
michael@0 60 class IonBuilder;
michael@0 61
michael@0 62 // ICStubSpace is an abstraction for allocation policy and storage for stub data.
michael@0 63 // There are two kinds of stubs: optimized stubs and fallback stubs (the latter
michael@0 64 // also includes stubs that can make non-tail calls that can GC).
michael@0 65 //
michael@0 66 // Optimized stubs are allocated per-compartment and are always purged when
michael@0 67 // JIT-code is discarded. Fallback stubs are allocated per BaselineScript and
michael@0 68 // are only destroyed when the BaselineScript is destroyed.
michael@0 69 class ICStubSpace
michael@0 70 {
michael@0 71 protected:
michael@0 72 LifoAlloc allocator_;
michael@0 73
michael@0 74 explicit ICStubSpace(size_t chunkSize)
michael@0 75 : allocator_(chunkSize)
michael@0 76 {}
michael@0 77
michael@0 78 public:
michael@0 79 inline void *alloc(size_t size) {
michael@0 80 return allocator_.alloc(size);
michael@0 81 }
michael@0 82
michael@0 83 JS_DECLARE_NEW_METHODS(allocate, alloc, inline)
michael@0 84
michael@0 85 size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
michael@0 86 return allocator_.sizeOfExcludingThis(mallocSizeOf);
michael@0 87 }
michael@0 88 };
michael@0 89
michael@0 90 // Space for optimized stubs. Every JitCompartment has a single
michael@0 91 // OptimizedICStubSpace.
michael@0 92 struct OptimizedICStubSpace : public ICStubSpace
michael@0 93 {
michael@0 94 static const size_t STUB_DEFAULT_CHUNK_SIZE = 4 * 1024;
michael@0 95
michael@0 96 public:
michael@0 97 OptimizedICStubSpace()
michael@0 98 : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
michael@0 99 {}
michael@0 100
michael@0 101 void free() {
michael@0 102 allocator_.freeAll();
michael@0 103 }
michael@0 104 };
michael@0 105
michael@0 106 // Space for fallback stubs. Every BaselineScript has a
michael@0 107 // FallbackICStubSpace.
michael@0 108 struct FallbackICStubSpace : public ICStubSpace
michael@0 109 {
michael@0 110 static const size_t STUB_DEFAULT_CHUNK_SIZE = 256;
michael@0 111
michael@0 112 public:
michael@0 113 FallbackICStubSpace()
michael@0 114 : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE)
michael@0 115 {}
michael@0 116
michael@0 117 inline void adoptFrom(FallbackICStubSpace *other) {
michael@0 118 allocator_.steal(&(other->allocator_));
michael@0 119 }
michael@0 120 };
michael@0 121
michael@0 122 // Information about a loop backedge in the runtime, which can be set to
michael@0 123 // point to either the loop header or to an OOL interrupt checking stub,
michael@0 124 // if signal handlers are being used to implement interrupts.
michael@0 125 class PatchableBackedge : public InlineListNode<PatchableBackedge>
michael@0 126 {
michael@0 127 friend class JitRuntime;
michael@0 128
michael@0 129 CodeLocationJump backedge;
michael@0 130 CodeLocationLabel loopHeader;
michael@0 131 CodeLocationLabel interruptCheck;
michael@0 132
michael@0 133 public:
michael@0 134 PatchableBackedge(CodeLocationJump backedge,
michael@0 135 CodeLocationLabel loopHeader,
michael@0 136 CodeLocationLabel interruptCheck)
michael@0 137 : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
michael@0 138 {}
michael@0 139 };
michael@0 140
michael@0 141 class JitRuntime
michael@0 142 {
michael@0 143 friend class JitCompartment;
michael@0 144
michael@0 145 // Executable allocator for all code except the main code in an IonScript.
michael@0 146 // Shared with the runtime.
michael@0 147 JSC::ExecutableAllocator *execAlloc_;
michael@0 148
michael@0 149 // Executable allocator used for allocating the main code in an IonScript.
michael@0 150 // All accesses on this allocator must be protected by the runtime's
michael@0 151 // interrupt lock, as the executable memory may be protected() when
michael@0 152 // requesting an interrupt to force a fault in the Ion code and avoid the
michael@0 153 // need for explicit interrupt checks.
michael@0 154 JSC::ExecutableAllocator *ionAlloc_;
michael@0 155
michael@0 156 // Shared post-exception-handler tail
michael@0 157 JitCode *exceptionTail_;
michael@0 158
michael@0 159 // Shared post-bailout-handler tail.
michael@0 160 JitCode *bailoutTail_;
michael@0 161
michael@0 162 // Trampoline for entering JIT code. Contains OSR prologue.
michael@0 163 JitCode *enterJIT_;
michael@0 164
michael@0 165 // Trampoline for entering baseline JIT code.
michael@0 166 JitCode *enterBaselineJIT_;
michael@0 167
michael@0 168 // Vector mapping frame class sizes to bailout tables.
michael@0 169 Vector<JitCode*, 4, SystemAllocPolicy> bailoutTables_;
michael@0 170
michael@0 171 // Generic bailout table; used if the bailout table overflows.
michael@0 172 JitCode *bailoutHandler_;
michael@0 173
michael@0 174 // Argument-rectifying thunk, in the case of insufficient arguments passed
michael@0 175 // to a function call site.
michael@0 176 JitCode *argumentsRectifier_;
michael@0 177 void *argumentsRectifierReturnAddr_;
michael@0 178
michael@0 179 // Arguments-rectifying thunk which loads |parallelIon| instead of |ion|.
michael@0 180 JitCode *parallelArgumentsRectifier_;
michael@0 181
michael@0 182 // Thunk that invalides an (Ion compiled) caller on the Ion stack.
michael@0 183 JitCode *invalidator_;
michael@0 184
michael@0 185 // Thunk that calls the GC pre barrier.
michael@0 186 JitCode *valuePreBarrier_;
michael@0 187 JitCode *shapePreBarrier_;
michael@0 188
michael@0 189 // Thunk used by the debugger for breakpoint and step mode.
michael@0 190 JitCode *debugTrapHandler_;
michael@0 191
michael@0 192 // Stub used to inline the ForkJoinGetSlice intrinsic.
michael@0 193 JitCode *forkJoinGetSliceStub_;
michael@0 194
michael@0 195 // Thunk used to fix up on-stack recompile of baseline scripts.
michael@0 196 JitCode *baselineDebugModeOSRHandler_;
michael@0 197 void *baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
michael@0 198
michael@0 199 // Map VMFunction addresses to the JitCode of the wrapper.
michael@0 200 typedef WeakCache<const VMFunction *, JitCode *> VMWrapperMap;
michael@0 201 VMWrapperMap *functionWrappers_;
michael@0 202
michael@0 203 // Buffer for OSR from baseline to Ion. To avoid holding on to this for
michael@0 204 // too long, it's also freed in JitCompartment::mark and in EnterBaseline
michael@0 205 // (after returning from JIT code).
michael@0 206 uint8_t *osrTempData_;
michael@0 207
michael@0 208 // Whether all Ion code in the runtime is protected, and will fault if it
michael@0 209 // is accessed.
michael@0 210 bool ionCodeProtected_;
michael@0 211
michael@0 212 // If signal handlers are installed, this contains all loop backedges for
michael@0 213 // IonScripts in the runtime.
michael@0 214 InlineList<PatchableBackedge> backedgeList_;
michael@0 215
michael@0 216 private:
michael@0 217 JitCode *generateExceptionTailStub(JSContext *cx);
michael@0 218 JitCode *generateBailoutTailStub(JSContext *cx);
michael@0 219 JitCode *generateEnterJIT(JSContext *cx, EnterJitType type);
michael@0 220 JitCode *generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut);
michael@0 221 JitCode *generateBailoutTable(JSContext *cx, uint32_t frameClass);
michael@0 222 JitCode *generateBailoutHandler(JSContext *cx);
michael@0 223 JitCode *generateInvalidator(JSContext *cx);
michael@0 224 JitCode *generatePreBarrier(JSContext *cx, MIRType type);
michael@0 225 JitCode *generateDebugTrapHandler(JSContext *cx);
michael@0 226 JitCode *generateForkJoinGetSliceStub(JSContext *cx);
michael@0 227 JitCode *generateBaselineDebugModeOSRHandler(JSContext *cx, uint32_t *noFrameRegPopOffsetOut);
michael@0 228 JitCode *generateVMWrapper(JSContext *cx, const VMFunction &f);
michael@0 229
michael@0 230 JSC::ExecutableAllocator *createIonAlloc(JSContext *cx);
michael@0 231
michael@0 232 public:
michael@0 233 JitRuntime();
michael@0 234 ~JitRuntime();
michael@0 235 bool initialize(JSContext *cx);
michael@0 236
michael@0 237 uint8_t *allocateOsrTempData(size_t size);
michael@0 238 void freeOsrTempData();
michael@0 239
michael@0 240 static void Mark(JSTracer *trc);
michael@0 241
michael@0 242 JSC::ExecutableAllocator *execAlloc() const {
michael@0 243 return execAlloc_;
michael@0 244 }
michael@0 245
michael@0 246 JSC::ExecutableAllocator *getIonAlloc(JSContext *cx) {
michael@0 247 JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
michael@0 248 return ionAlloc_ ? ionAlloc_ : createIonAlloc(cx);
michael@0 249 }
michael@0 250
michael@0 251 JSC::ExecutableAllocator *ionAlloc(JSRuntime *rt) {
michael@0 252 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
michael@0 253 return ionAlloc_;
michael@0 254 }
michael@0 255
michael@0 256 bool ionCodeProtected() {
michael@0 257 return ionCodeProtected_;
michael@0 258 }
michael@0 259
michael@0 260 void addPatchableBackedge(PatchableBackedge *backedge) {
michael@0 261 backedgeList_.pushFront(backedge);
michael@0 262 }
michael@0 263 void removePatchableBackedge(PatchableBackedge *backedge) {
michael@0 264 backedgeList_.remove(backedge);
michael@0 265 }
michael@0 266
michael@0 267 enum BackedgeTarget {
michael@0 268 BackedgeLoopHeader,
michael@0 269 BackedgeInterruptCheck
michael@0 270 };
michael@0 271
michael@0 272 void ensureIonCodeProtected(JSRuntime *rt);
michael@0 273 void ensureIonCodeAccessible(JSRuntime *rt);
michael@0 274 void patchIonBackedges(JSRuntime *rt, BackedgeTarget target);
michael@0 275
michael@0 276 bool handleAccessViolation(JSRuntime *rt, void *faultingAddress);
michael@0 277
michael@0 278 JitCode *getVMWrapper(const VMFunction &f) const;
michael@0 279 JitCode *debugTrapHandler(JSContext *cx);
michael@0 280 JitCode *getBaselineDebugModeOSRHandler(JSContext *cx);
michael@0 281 void *getBaselineDebugModeOSRHandlerAddress(JSContext *cx, bool popFrameReg);
michael@0 282
michael@0 283 JitCode *getGenericBailoutHandler() const {
michael@0 284 return bailoutHandler_;
michael@0 285 }
michael@0 286
michael@0 287 JitCode *getExceptionTail() const {
michael@0 288 return exceptionTail_;
michael@0 289 }
michael@0 290
michael@0 291 JitCode *getBailoutTail() const {
michael@0 292 return bailoutTail_;
michael@0 293 }
michael@0 294
michael@0 295 JitCode *getBailoutTable(const FrameSizeClass &frameClass) const;
michael@0 296
michael@0 297 JitCode *getArgumentsRectifier(ExecutionMode mode) const {
michael@0 298 switch (mode) {
michael@0 299 case SequentialExecution: return argumentsRectifier_;
michael@0 300 case ParallelExecution: return parallelArgumentsRectifier_;
michael@0 301 default: MOZ_ASSUME_UNREACHABLE("No such execution mode");
michael@0 302 }
michael@0 303 }
michael@0 304
michael@0 305 void *getArgumentsRectifierReturnAddr() const {
michael@0 306 return argumentsRectifierReturnAddr_;
michael@0 307 }
michael@0 308
michael@0 309 JitCode *getInvalidationThunk() const {
michael@0 310 return invalidator_;
michael@0 311 }
michael@0 312
michael@0 313 EnterJitCode enterIon() const {
michael@0 314 return enterJIT_->as<EnterJitCode>();
michael@0 315 }
michael@0 316
michael@0 317 EnterJitCode enterBaseline() const {
michael@0 318 return enterBaselineJIT_->as<EnterJitCode>();
michael@0 319 }
michael@0 320
michael@0 321 JitCode *valuePreBarrier() const {
michael@0 322 return valuePreBarrier_;
michael@0 323 }
michael@0 324
michael@0 325 JitCode *shapePreBarrier() const {
michael@0 326 return shapePreBarrier_;
michael@0 327 }
michael@0 328
michael@0 329 bool ensureForkJoinGetSliceStubExists(JSContext *cx);
michael@0 330 JitCode *forkJoinGetSliceStub() const {
michael@0 331 return forkJoinGetSliceStub_;
michael@0 332 }
michael@0 333 };
michael@0 334
michael@0 335 class JitZone
michael@0 336 {
michael@0 337 // Allocated space for optimized baseline stubs.
michael@0 338 OptimizedICStubSpace optimizedStubSpace_;
michael@0 339
michael@0 340 public:
michael@0 341 OptimizedICStubSpace *optimizedStubSpace() {
michael@0 342 return &optimizedStubSpace_;
michael@0 343 }
michael@0 344 };
michael@0 345
michael@0 346 class JitCompartment
michael@0 347 {
michael@0 348 friend class JitActivation;
michael@0 349
michael@0 350 // Map ICStub keys to ICStub shared code objects.
michael@0 351 typedef WeakValueCache<uint32_t, ReadBarriered<JitCode> > ICStubCodeMap;
michael@0 352 ICStubCodeMap *stubCodes_;
michael@0 353
michael@0 354 // Keep track of offset into various baseline stubs' code at return
michael@0 355 // point from called script.
michael@0 356 void *baselineCallReturnFromIonAddr_;
michael@0 357 void *baselineGetPropReturnFromIonAddr_;
michael@0 358 void *baselineSetPropReturnFromIonAddr_;
michael@0 359
michael@0 360 // Same as above, but is used for return from a baseline stub. This is
michael@0 361 // used for recompiles of on-stack baseline scripts (e.g., for debug
michael@0 362 // mode).
michael@0 363 void *baselineCallReturnFromStubAddr_;
michael@0 364 void *baselineGetPropReturnFromStubAddr_;
michael@0 365 void *baselineSetPropReturnFromStubAddr_;
michael@0 366
michael@0 367 // Stub to concatenate two strings inline. Note that it can't be
michael@0 368 // stored in JitRuntime because masm.newGCString bakes in zone-specific
michael@0 369 // pointers. This has to be a weak pointer to avoid keeping the whole
michael@0 370 // compartment alive.
michael@0 371 ReadBarriered<JitCode> stringConcatStub_;
michael@0 372 ReadBarriered<JitCode> parallelStringConcatStub_;
michael@0 373
michael@0 374 // Set of JSScripts invoked by ForkJoin (i.e. the entry script). These
michael@0 375 // scripts are marked if their respective parallel IonScripts' age is less
michael@0 376 // than a certain amount. See IonScript::parallelAge_.
michael@0 377 typedef HashSet<EncapsulatedPtrScript> ScriptSet;
michael@0 378 ScriptSet *activeParallelEntryScripts_;
michael@0 379
michael@0 380 JitCode *generateStringConcatStub(JSContext *cx, ExecutionMode mode);
michael@0 381
michael@0 382 public:
michael@0 383 JitCode *getStubCode(uint32_t key) {
michael@0 384 ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
michael@0 385 if (p)
michael@0 386 return p->value();
michael@0 387 return nullptr;
michael@0 388 }
michael@0 389 bool putStubCode(uint32_t key, Handle<JitCode *> stubCode) {
michael@0 390 // Make sure to do a lookupForAdd(key) and then insert into that slot, because
michael@0 391 // that way if stubCode gets moved due to a GC caused by lookupForAdd, then
michael@0 392 // we still write the correct pointer.
michael@0 393 JS_ASSERT(!stubCodes_->has(key));
michael@0 394 ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
michael@0 395 return stubCodes_->add(p, key, stubCode.get());
michael@0 396 }
michael@0 397 void initBaselineCallReturnFromIonAddr(void *addr) {
michael@0 398 JS_ASSERT(baselineCallReturnFromIonAddr_ == nullptr);
michael@0 399 baselineCallReturnFromIonAddr_ = addr;
michael@0 400 }
michael@0 401 void *baselineCallReturnFromIonAddr() {
michael@0 402 JS_ASSERT(baselineCallReturnFromIonAddr_ != nullptr);
michael@0 403 return baselineCallReturnFromIonAddr_;
michael@0 404 }
michael@0 405 void initBaselineGetPropReturnFromIonAddr(void *addr) {
michael@0 406 JS_ASSERT(baselineGetPropReturnFromIonAddr_ == nullptr);
michael@0 407 baselineGetPropReturnFromIonAddr_ = addr;
michael@0 408 }
michael@0 409 void *baselineGetPropReturnFromIonAddr() {
michael@0 410 JS_ASSERT(baselineGetPropReturnFromIonAddr_ != nullptr);
michael@0 411 return baselineGetPropReturnFromIonAddr_;
michael@0 412 }
michael@0 413 void initBaselineSetPropReturnFromIonAddr(void *addr) {
michael@0 414 JS_ASSERT(baselineSetPropReturnFromIonAddr_ == nullptr);
michael@0 415 baselineSetPropReturnFromIonAddr_ = addr;
michael@0 416 }
michael@0 417 void *baselineSetPropReturnFromIonAddr() {
michael@0 418 JS_ASSERT(baselineSetPropReturnFromIonAddr_ != nullptr);
michael@0 419 return baselineSetPropReturnFromIonAddr_;
michael@0 420 }
michael@0 421
michael@0 422 void initBaselineCallReturnFromStubAddr(void *addr) {
michael@0 423 MOZ_ASSERT(baselineCallReturnFromStubAddr_ == nullptr);
michael@0 424 baselineCallReturnFromStubAddr_ = addr;;
michael@0 425 }
michael@0 426 void *baselineCallReturnFromStubAddr() {
michael@0 427 JS_ASSERT(baselineCallReturnFromStubAddr_ != nullptr);
michael@0 428 return baselineCallReturnFromStubAddr_;
michael@0 429 }
michael@0 430 void initBaselineGetPropReturnFromStubAddr(void *addr) {
michael@0 431 JS_ASSERT(baselineGetPropReturnFromStubAddr_ == nullptr);
michael@0 432 baselineGetPropReturnFromStubAddr_ = addr;
michael@0 433 }
michael@0 434 void *baselineGetPropReturnFromStubAddr() {
michael@0 435 JS_ASSERT(baselineGetPropReturnFromStubAddr_ != nullptr);
michael@0 436 return baselineGetPropReturnFromStubAddr_;
michael@0 437 }
michael@0 438 void initBaselineSetPropReturnFromStubAddr(void *addr) {
michael@0 439 JS_ASSERT(baselineSetPropReturnFromStubAddr_ == nullptr);
michael@0 440 baselineSetPropReturnFromStubAddr_ = addr;
michael@0 441 }
michael@0 442 void *baselineSetPropReturnFromStubAddr() {
michael@0 443 JS_ASSERT(baselineSetPropReturnFromStubAddr_ != nullptr);
michael@0 444 return baselineSetPropReturnFromStubAddr_;
michael@0 445 }
michael@0 446
michael@0 447 bool notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script);
michael@0 448
michael@0 449 void toggleBaselineStubBarriers(bool enabled);
michael@0 450
michael@0 451 JSC::ExecutableAllocator *createIonAlloc();
michael@0 452
michael@0 453 public:
michael@0 454 JitCompartment();
michael@0 455 ~JitCompartment();
michael@0 456
michael@0 457 bool initialize(JSContext *cx);
michael@0 458
michael@0 459 // Initialize code stubs only used by Ion, not Baseline.
michael@0 460 bool ensureIonStubsExist(JSContext *cx);
michael@0 461
michael@0 462 void mark(JSTracer *trc, JSCompartment *compartment);
michael@0 463 void sweep(FreeOp *fop);
michael@0 464
michael@0 465 JitCode *stringConcatStub(ExecutionMode mode) const {
michael@0 466 switch (mode) {
michael@0 467 case SequentialExecution: return stringConcatStub_;
michael@0 468 case ParallelExecution: return parallelStringConcatStub_;
michael@0 469 default: MOZ_ASSUME_UNREACHABLE("No such execution mode");
michael@0 470 }
michael@0 471 }
michael@0 472 };
michael@0 473
michael@0 474 // Called from JSCompartment::discardJitCode().
michael@0 475 void InvalidateAll(FreeOp *fop, JS::Zone *zone);
michael@0 476 template <ExecutionMode mode>
michael@0 477 void FinishInvalidation(FreeOp *fop, JSScript *script);
michael@0 478
michael@0 479 inline bool
michael@0 480 ShouldPreserveParallelJITCode(JSRuntime *rt, JSScript *script, bool increase = false)
michael@0 481 {
michael@0 482 IonScript *parallelIon = script->parallelIonScript();
michael@0 483 uint32_t age = increase ? parallelIon->increaseParallelAge() : parallelIon->parallelAge();
michael@0 484 return age < jit::IonScript::MAX_PARALLEL_AGE && !rt->gcShouldCleanUpEverything;
michael@0 485 }
michael@0 486
michael@0 487 // On windows systems, really large frames need to be incrementally touched.
michael@0 488 // The following constant defines the minimum increment of the touch.
michael@0 489 #ifdef XP_WIN
michael@0 490 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
michael@0 491 #endif
michael@0 492
michael@0 493 } // namespace jit
michael@0 494 } // namespace js
michael@0 495
michael@0 496 #endif // JS_ION
michael@0 497
michael@0 498 #endif /* jit_JitCompartment_h */

mercurial