1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/jit/JitCompartment.h Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,498 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * This Source Code Form is subject to the terms of the Mozilla Public 1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.9 + 1.10 +#ifndef jit_JitCompartment_h 1.11 +#define jit_JitCompartment_h 1.12 + 1.13 +#ifdef JS_ION 1.14 + 1.15 +#include "mozilla/MemoryReporting.h" 1.16 + 1.17 +#include "jsweakcache.h" 1.18 + 1.19 +#include "jit/CompileInfo.h" 1.20 +#include "jit/IonCode.h" 1.21 +#include "jit/IonFrames.h" 1.22 +#include "jit/shared/Assembler-shared.h" 1.23 +#include "js/Value.h" 1.24 +#include "vm/Stack.h" 1.25 + 1.26 +namespace js { 1.27 +namespace jit { 1.28 + 1.29 +class FrameSizeClass; 1.30 + 1.31 +enum EnterJitType { 1.32 + EnterJitBaseline = 0, 1.33 + EnterJitOptimized = 1 1.34 +}; 1.35 + 1.36 +struct EnterJitData 1.37 +{ 1.38 + explicit EnterJitData(JSContext *cx) 1.39 + : scopeChain(cx), 1.40 + result(cx) 1.41 + {} 1.42 + 1.43 + uint8_t *jitcode; 1.44 + InterpreterFrame *osrFrame; 1.45 + 1.46 + void *calleeToken; 1.47 + 1.48 + Value *maxArgv; 1.49 + unsigned maxArgc; 1.50 + unsigned numActualArgs; 1.51 + unsigned osrNumStackValues; 1.52 + 1.53 + RootedObject scopeChain; 1.54 + RootedValue result; 1.55 + 1.56 + bool constructing; 1.57 +}; 1.58 + 1.59 +typedef void (*EnterJitCode)(void *code, unsigned argc, Value *argv, InterpreterFrame *fp, 1.60 + CalleeToken calleeToken, JSObject *scopeChain, 1.61 + size_t numStackValues, Value *vp); 1.62 + 1.63 +class IonBuilder; 1.64 + 1.65 +// ICStubSpace is an abstraction for allocation policy and storage for stub data. 1.66 +// There are two kinds of stubs: optimized stubs and fallback stubs (the latter 1.67 +// also includes stubs that can make non-tail calls that can GC). 1.68 +// 1.69 +// Optimized stubs are allocated per-compartment and are always purged when 1.70 +// JIT-code is discarded. Fallback stubs are allocated per BaselineScript and 1.71 +// are only destroyed when the BaselineScript is destroyed. 1.72 +class ICStubSpace 1.73 +{ 1.74 + protected: 1.75 + LifoAlloc allocator_; 1.76 + 1.77 + explicit ICStubSpace(size_t chunkSize) 1.78 + : allocator_(chunkSize) 1.79 + {} 1.80 + 1.81 + public: 1.82 + inline void *alloc(size_t size) { 1.83 + return allocator_.alloc(size); 1.84 + } 1.85 + 1.86 + JS_DECLARE_NEW_METHODS(allocate, alloc, inline) 1.87 + 1.88 + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { 1.89 + return allocator_.sizeOfExcludingThis(mallocSizeOf); 1.90 + } 1.91 +}; 1.92 + 1.93 +// Space for optimized stubs. Every JitCompartment has a single 1.94 +// OptimizedICStubSpace. 1.95 +struct OptimizedICStubSpace : public ICStubSpace 1.96 +{ 1.97 + static const size_t STUB_DEFAULT_CHUNK_SIZE = 4 * 1024; 1.98 + 1.99 + public: 1.100 + OptimizedICStubSpace() 1.101 + : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE) 1.102 + {} 1.103 + 1.104 + void free() { 1.105 + allocator_.freeAll(); 1.106 + } 1.107 +}; 1.108 + 1.109 +// Space for fallback stubs. Every BaselineScript has a 1.110 +// FallbackICStubSpace. 1.111 +struct FallbackICStubSpace : public ICStubSpace 1.112 +{ 1.113 + static const size_t STUB_DEFAULT_CHUNK_SIZE = 256; 1.114 + 1.115 + public: 1.116 + FallbackICStubSpace() 1.117 + : ICStubSpace(STUB_DEFAULT_CHUNK_SIZE) 1.118 + {} 1.119 + 1.120 + inline void adoptFrom(FallbackICStubSpace *other) { 1.121 + allocator_.steal(&(other->allocator_)); 1.122 + } 1.123 +}; 1.124 + 1.125 +// Information about a loop backedge in the runtime, which can be set to 1.126 +// point to either the loop header or to an OOL interrupt checking stub, 1.127 +// if signal handlers are being used to implement interrupts. 1.128 +class PatchableBackedge : public InlineListNode<PatchableBackedge> 1.129 +{ 1.130 + friend class JitRuntime; 1.131 + 1.132 + CodeLocationJump backedge; 1.133 + CodeLocationLabel loopHeader; 1.134 + CodeLocationLabel interruptCheck; 1.135 + 1.136 + public: 1.137 + PatchableBackedge(CodeLocationJump backedge, 1.138 + CodeLocationLabel loopHeader, 1.139 + CodeLocationLabel interruptCheck) 1.140 + : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck) 1.141 + {} 1.142 +}; 1.143 + 1.144 +class JitRuntime 1.145 +{ 1.146 + friend class JitCompartment; 1.147 + 1.148 + // Executable allocator for all code except the main code in an IonScript. 1.149 + // Shared with the runtime. 1.150 + JSC::ExecutableAllocator *execAlloc_; 1.151 + 1.152 + // Executable allocator used for allocating the main code in an IonScript. 1.153 + // All accesses on this allocator must be protected by the runtime's 1.154 + // interrupt lock, as the executable memory may be protected() when 1.155 + // requesting an interrupt to force a fault in the Ion code and avoid the 1.156 + // need for explicit interrupt checks. 1.157 + JSC::ExecutableAllocator *ionAlloc_; 1.158 + 1.159 + // Shared post-exception-handler tail 1.160 + JitCode *exceptionTail_; 1.161 + 1.162 + // Shared post-bailout-handler tail. 1.163 + JitCode *bailoutTail_; 1.164 + 1.165 + // Trampoline for entering JIT code. Contains OSR prologue. 1.166 + JitCode *enterJIT_; 1.167 + 1.168 + // Trampoline for entering baseline JIT code. 1.169 + JitCode *enterBaselineJIT_; 1.170 + 1.171 + // Vector mapping frame class sizes to bailout tables. 1.172 + Vector<JitCode*, 4, SystemAllocPolicy> bailoutTables_; 1.173 + 1.174 + // Generic bailout table; used if the bailout table overflows. 1.175 + JitCode *bailoutHandler_; 1.176 + 1.177 + // Argument-rectifying thunk, in the case of insufficient arguments passed 1.178 + // to a function call site. 1.179 + JitCode *argumentsRectifier_; 1.180 + void *argumentsRectifierReturnAddr_; 1.181 + 1.182 + // Arguments-rectifying thunk which loads |parallelIon| instead of |ion|. 1.183 + JitCode *parallelArgumentsRectifier_; 1.184 + 1.185 + // Thunk that invalides an (Ion compiled) caller on the Ion stack. 1.186 + JitCode *invalidator_; 1.187 + 1.188 + // Thunk that calls the GC pre barrier. 1.189 + JitCode *valuePreBarrier_; 1.190 + JitCode *shapePreBarrier_; 1.191 + 1.192 + // Thunk used by the debugger for breakpoint and step mode. 1.193 + JitCode *debugTrapHandler_; 1.194 + 1.195 + // Stub used to inline the ForkJoinGetSlice intrinsic. 1.196 + JitCode *forkJoinGetSliceStub_; 1.197 + 1.198 + // Thunk used to fix up on-stack recompile of baseline scripts. 1.199 + JitCode *baselineDebugModeOSRHandler_; 1.200 + void *baselineDebugModeOSRHandlerNoFrameRegPopAddr_; 1.201 + 1.202 + // Map VMFunction addresses to the JitCode of the wrapper. 1.203 + typedef WeakCache<const VMFunction *, JitCode *> VMWrapperMap; 1.204 + VMWrapperMap *functionWrappers_; 1.205 + 1.206 + // Buffer for OSR from baseline to Ion. To avoid holding on to this for 1.207 + // too long, it's also freed in JitCompartment::mark and in EnterBaseline 1.208 + // (after returning from JIT code). 1.209 + uint8_t *osrTempData_; 1.210 + 1.211 + // Whether all Ion code in the runtime is protected, and will fault if it 1.212 + // is accessed. 1.213 + bool ionCodeProtected_; 1.214 + 1.215 + // If signal handlers are installed, this contains all loop backedges for 1.216 + // IonScripts in the runtime. 1.217 + InlineList<PatchableBackedge> backedgeList_; 1.218 + 1.219 + private: 1.220 + JitCode *generateExceptionTailStub(JSContext *cx); 1.221 + JitCode *generateBailoutTailStub(JSContext *cx); 1.222 + JitCode *generateEnterJIT(JSContext *cx, EnterJitType type); 1.223 + JitCode *generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut); 1.224 + JitCode *generateBailoutTable(JSContext *cx, uint32_t frameClass); 1.225 + JitCode *generateBailoutHandler(JSContext *cx); 1.226 + JitCode *generateInvalidator(JSContext *cx); 1.227 + JitCode *generatePreBarrier(JSContext *cx, MIRType type); 1.228 + JitCode *generateDebugTrapHandler(JSContext *cx); 1.229 + JitCode *generateForkJoinGetSliceStub(JSContext *cx); 1.230 + JitCode *generateBaselineDebugModeOSRHandler(JSContext *cx, uint32_t *noFrameRegPopOffsetOut); 1.231 + JitCode *generateVMWrapper(JSContext *cx, const VMFunction &f); 1.232 + 1.233 + JSC::ExecutableAllocator *createIonAlloc(JSContext *cx); 1.234 + 1.235 + public: 1.236 + JitRuntime(); 1.237 + ~JitRuntime(); 1.238 + bool initialize(JSContext *cx); 1.239 + 1.240 + uint8_t *allocateOsrTempData(size_t size); 1.241 + void freeOsrTempData(); 1.242 + 1.243 + static void Mark(JSTracer *trc); 1.244 + 1.245 + JSC::ExecutableAllocator *execAlloc() const { 1.246 + return execAlloc_; 1.247 + } 1.248 + 1.249 + JSC::ExecutableAllocator *getIonAlloc(JSContext *cx) { 1.250 + JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock()); 1.251 + return ionAlloc_ ? ionAlloc_ : createIonAlloc(cx); 1.252 + } 1.253 + 1.254 + JSC::ExecutableAllocator *ionAlloc(JSRuntime *rt) { 1.255 + JS_ASSERT(rt->currentThreadOwnsInterruptLock()); 1.256 + return ionAlloc_; 1.257 + } 1.258 + 1.259 + bool ionCodeProtected() { 1.260 + return ionCodeProtected_; 1.261 + } 1.262 + 1.263 + void addPatchableBackedge(PatchableBackedge *backedge) { 1.264 + backedgeList_.pushFront(backedge); 1.265 + } 1.266 + void removePatchableBackedge(PatchableBackedge *backedge) { 1.267 + backedgeList_.remove(backedge); 1.268 + } 1.269 + 1.270 + enum BackedgeTarget { 1.271 + BackedgeLoopHeader, 1.272 + BackedgeInterruptCheck 1.273 + }; 1.274 + 1.275 + void ensureIonCodeProtected(JSRuntime *rt); 1.276 + void ensureIonCodeAccessible(JSRuntime *rt); 1.277 + void patchIonBackedges(JSRuntime *rt, BackedgeTarget target); 1.278 + 1.279 + bool handleAccessViolation(JSRuntime *rt, void *faultingAddress); 1.280 + 1.281 + JitCode *getVMWrapper(const VMFunction &f) const; 1.282 + JitCode *debugTrapHandler(JSContext *cx); 1.283 + JitCode *getBaselineDebugModeOSRHandler(JSContext *cx); 1.284 + void *getBaselineDebugModeOSRHandlerAddress(JSContext *cx, bool popFrameReg); 1.285 + 1.286 + JitCode *getGenericBailoutHandler() const { 1.287 + return bailoutHandler_; 1.288 + } 1.289 + 1.290 + JitCode *getExceptionTail() const { 1.291 + return exceptionTail_; 1.292 + } 1.293 + 1.294 + JitCode *getBailoutTail() const { 1.295 + return bailoutTail_; 1.296 + } 1.297 + 1.298 + JitCode *getBailoutTable(const FrameSizeClass &frameClass) const; 1.299 + 1.300 + JitCode *getArgumentsRectifier(ExecutionMode mode) const { 1.301 + switch (mode) { 1.302 + case SequentialExecution: return argumentsRectifier_; 1.303 + case ParallelExecution: return parallelArgumentsRectifier_; 1.304 + default: MOZ_ASSUME_UNREACHABLE("No such execution mode"); 1.305 + } 1.306 + } 1.307 + 1.308 + void *getArgumentsRectifierReturnAddr() const { 1.309 + return argumentsRectifierReturnAddr_; 1.310 + } 1.311 + 1.312 + JitCode *getInvalidationThunk() const { 1.313 + return invalidator_; 1.314 + } 1.315 + 1.316 + EnterJitCode enterIon() const { 1.317 + return enterJIT_->as<EnterJitCode>(); 1.318 + } 1.319 + 1.320 + EnterJitCode enterBaseline() const { 1.321 + return enterBaselineJIT_->as<EnterJitCode>(); 1.322 + } 1.323 + 1.324 + JitCode *valuePreBarrier() const { 1.325 + return valuePreBarrier_; 1.326 + } 1.327 + 1.328 + JitCode *shapePreBarrier() const { 1.329 + return shapePreBarrier_; 1.330 + } 1.331 + 1.332 + bool ensureForkJoinGetSliceStubExists(JSContext *cx); 1.333 + JitCode *forkJoinGetSliceStub() const { 1.334 + return forkJoinGetSliceStub_; 1.335 + } 1.336 +}; 1.337 + 1.338 +class JitZone 1.339 +{ 1.340 + // Allocated space for optimized baseline stubs. 1.341 + OptimizedICStubSpace optimizedStubSpace_; 1.342 + 1.343 + public: 1.344 + OptimizedICStubSpace *optimizedStubSpace() { 1.345 + return &optimizedStubSpace_; 1.346 + } 1.347 +}; 1.348 + 1.349 +class JitCompartment 1.350 +{ 1.351 + friend class JitActivation; 1.352 + 1.353 + // Map ICStub keys to ICStub shared code objects. 1.354 + typedef WeakValueCache<uint32_t, ReadBarriered<JitCode> > ICStubCodeMap; 1.355 + ICStubCodeMap *stubCodes_; 1.356 + 1.357 + // Keep track of offset into various baseline stubs' code at return 1.358 + // point from called script. 1.359 + void *baselineCallReturnFromIonAddr_; 1.360 + void *baselineGetPropReturnFromIonAddr_; 1.361 + void *baselineSetPropReturnFromIonAddr_; 1.362 + 1.363 + // Same as above, but is used for return from a baseline stub. This is 1.364 + // used for recompiles of on-stack baseline scripts (e.g., for debug 1.365 + // mode). 1.366 + void *baselineCallReturnFromStubAddr_; 1.367 + void *baselineGetPropReturnFromStubAddr_; 1.368 + void *baselineSetPropReturnFromStubAddr_; 1.369 + 1.370 + // Stub to concatenate two strings inline. Note that it can't be 1.371 + // stored in JitRuntime because masm.newGCString bakes in zone-specific 1.372 + // pointers. This has to be a weak pointer to avoid keeping the whole 1.373 + // compartment alive. 1.374 + ReadBarriered<JitCode> stringConcatStub_; 1.375 + ReadBarriered<JitCode> parallelStringConcatStub_; 1.376 + 1.377 + // Set of JSScripts invoked by ForkJoin (i.e. the entry script). These 1.378 + // scripts are marked if their respective parallel IonScripts' age is less 1.379 + // than a certain amount. See IonScript::parallelAge_. 1.380 + typedef HashSet<EncapsulatedPtrScript> ScriptSet; 1.381 + ScriptSet *activeParallelEntryScripts_; 1.382 + 1.383 + JitCode *generateStringConcatStub(JSContext *cx, ExecutionMode mode); 1.384 + 1.385 + public: 1.386 + JitCode *getStubCode(uint32_t key) { 1.387 + ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key); 1.388 + if (p) 1.389 + return p->value(); 1.390 + return nullptr; 1.391 + } 1.392 + bool putStubCode(uint32_t key, Handle<JitCode *> stubCode) { 1.393 + // Make sure to do a lookupForAdd(key) and then insert into that slot, because 1.394 + // that way if stubCode gets moved due to a GC caused by lookupForAdd, then 1.395 + // we still write the correct pointer. 1.396 + JS_ASSERT(!stubCodes_->has(key)); 1.397 + ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key); 1.398 + return stubCodes_->add(p, key, stubCode.get()); 1.399 + } 1.400 + void initBaselineCallReturnFromIonAddr(void *addr) { 1.401 + JS_ASSERT(baselineCallReturnFromIonAddr_ == nullptr); 1.402 + baselineCallReturnFromIonAddr_ = addr; 1.403 + } 1.404 + void *baselineCallReturnFromIonAddr() { 1.405 + JS_ASSERT(baselineCallReturnFromIonAddr_ != nullptr); 1.406 + return baselineCallReturnFromIonAddr_; 1.407 + } 1.408 + void initBaselineGetPropReturnFromIonAddr(void *addr) { 1.409 + JS_ASSERT(baselineGetPropReturnFromIonAddr_ == nullptr); 1.410 + baselineGetPropReturnFromIonAddr_ = addr; 1.411 + } 1.412 + void *baselineGetPropReturnFromIonAddr() { 1.413 + JS_ASSERT(baselineGetPropReturnFromIonAddr_ != nullptr); 1.414 + return baselineGetPropReturnFromIonAddr_; 1.415 + } 1.416 + void initBaselineSetPropReturnFromIonAddr(void *addr) { 1.417 + JS_ASSERT(baselineSetPropReturnFromIonAddr_ == nullptr); 1.418 + baselineSetPropReturnFromIonAddr_ = addr; 1.419 + } 1.420 + void *baselineSetPropReturnFromIonAddr() { 1.421 + JS_ASSERT(baselineSetPropReturnFromIonAddr_ != nullptr); 1.422 + return baselineSetPropReturnFromIonAddr_; 1.423 + } 1.424 + 1.425 + void initBaselineCallReturnFromStubAddr(void *addr) { 1.426 + MOZ_ASSERT(baselineCallReturnFromStubAddr_ == nullptr); 1.427 + baselineCallReturnFromStubAddr_ = addr;; 1.428 + } 1.429 + void *baselineCallReturnFromStubAddr() { 1.430 + JS_ASSERT(baselineCallReturnFromStubAddr_ != nullptr); 1.431 + return baselineCallReturnFromStubAddr_; 1.432 + } 1.433 + void initBaselineGetPropReturnFromStubAddr(void *addr) { 1.434 + JS_ASSERT(baselineGetPropReturnFromStubAddr_ == nullptr); 1.435 + baselineGetPropReturnFromStubAddr_ = addr; 1.436 + } 1.437 + void *baselineGetPropReturnFromStubAddr() { 1.438 + JS_ASSERT(baselineGetPropReturnFromStubAddr_ != nullptr); 1.439 + return baselineGetPropReturnFromStubAddr_; 1.440 + } 1.441 + void initBaselineSetPropReturnFromStubAddr(void *addr) { 1.442 + JS_ASSERT(baselineSetPropReturnFromStubAddr_ == nullptr); 1.443 + baselineSetPropReturnFromStubAddr_ = addr; 1.444 + } 1.445 + void *baselineSetPropReturnFromStubAddr() { 1.446 + JS_ASSERT(baselineSetPropReturnFromStubAddr_ != nullptr); 1.447 + return baselineSetPropReturnFromStubAddr_; 1.448 + } 1.449 + 1.450 + bool notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script); 1.451 + 1.452 + void toggleBaselineStubBarriers(bool enabled); 1.453 + 1.454 + JSC::ExecutableAllocator *createIonAlloc(); 1.455 + 1.456 + public: 1.457 + JitCompartment(); 1.458 + ~JitCompartment(); 1.459 + 1.460 + bool initialize(JSContext *cx); 1.461 + 1.462 + // Initialize code stubs only used by Ion, not Baseline. 1.463 + bool ensureIonStubsExist(JSContext *cx); 1.464 + 1.465 + void mark(JSTracer *trc, JSCompartment *compartment); 1.466 + void sweep(FreeOp *fop); 1.467 + 1.468 + JitCode *stringConcatStub(ExecutionMode mode) const { 1.469 + switch (mode) { 1.470 + case SequentialExecution: return stringConcatStub_; 1.471 + case ParallelExecution: return parallelStringConcatStub_; 1.472 + default: MOZ_ASSUME_UNREACHABLE("No such execution mode"); 1.473 + } 1.474 + } 1.475 +}; 1.476 + 1.477 +// Called from JSCompartment::discardJitCode(). 1.478 +void InvalidateAll(FreeOp *fop, JS::Zone *zone); 1.479 +template <ExecutionMode mode> 1.480 +void FinishInvalidation(FreeOp *fop, JSScript *script); 1.481 + 1.482 +inline bool 1.483 +ShouldPreserveParallelJITCode(JSRuntime *rt, JSScript *script, bool increase = false) 1.484 +{ 1.485 + IonScript *parallelIon = script->parallelIonScript(); 1.486 + uint32_t age = increase ? parallelIon->increaseParallelAge() : parallelIon->parallelAge(); 1.487 + return age < jit::IonScript::MAX_PARALLEL_AGE && !rt->gcShouldCleanUpEverything; 1.488 +} 1.489 + 1.490 +// On windows systems, really large frames need to be incrementally touched. 1.491 +// The following constant defines the minimum increment of the touch. 1.492 +#ifdef XP_WIN 1.493 +const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1; 1.494 +#endif 1.495 + 1.496 +} // namespace jit 1.497 +} // namespace js 1.498 + 1.499 +#endif // JS_ION 1.500 + 1.501 +#endif /* jit_JitCompartment_h */