michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/Ion.h" michael@0: michael@0: #include "mozilla/MemoryReporting.h" michael@0: #include "mozilla/ThreadLocal.h" michael@0: michael@0: #include "jscompartment.h" michael@0: #include "jsprf.h" michael@0: #include "jsworkers.h" michael@0: michael@0: #include "gc/Marking.h" michael@0: #include "jit/AliasAnalysis.h" michael@0: #include "jit/AsmJSModule.h" michael@0: #include "jit/BacktrackingAllocator.h" michael@0: #include "jit/BaselineDebugModeOSR.h" michael@0: #include "jit/BaselineFrame.h" michael@0: #include "jit/BaselineInspector.h" michael@0: #include "jit/BaselineJIT.h" michael@0: #include "jit/CodeGenerator.h" michael@0: #include "jit/EdgeCaseAnalysis.h" michael@0: #include "jit/EffectiveAddressAnalysis.h" michael@0: #include "jit/IonAnalysis.h" michael@0: #include "jit/IonBuilder.h" michael@0: #include "jit/IonOptimizationLevels.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/JitCommon.h" michael@0: #include "jit/JitCompartment.h" michael@0: #include "jit/LICM.h" michael@0: #include "jit/LinearScan.h" michael@0: #include "jit/LIR.h" michael@0: #include "jit/Lowering.h" michael@0: #include "jit/ParallelSafetyAnalysis.h" michael@0: #include "jit/PerfSpewer.h" michael@0: #include "jit/RangeAnalysis.h" michael@0: #include "jit/StupidAllocator.h" michael@0: #include "jit/UnreachableCodeElimination.h" michael@0: #include "jit/ValueNumbering.h" michael@0: #include "vm/ForkJoin.h" michael@0: #include "vm/TraceLogging.h" michael@0: michael@0: #include "jscompartmentinlines.h" michael@0: #include "jsgcinlines.h" michael@0: #include "jsinferinlines.h" michael@0: #include "jsobjinlines.h" michael@0: michael@0: #include "jit/ExecutionMode-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::ThreadLocal; michael@0: michael@0: // Assert that JitCode is gc::Cell aligned. michael@0: JS_STATIC_ASSERT(sizeof(JitCode) % gc::CellSize == 0); michael@0: michael@0: static ThreadLocal TlsIonContext; michael@0: michael@0: static IonContext * michael@0: CurrentIonContext() michael@0: { michael@0: if (!TlsIonContext.initialized()) michael@0: return nullptr; michael@0: return TlsIonContext.get(); michael@0: } michael@0: michael@0: void michael@0: jit::SetIonContext(IonContext *ctx) michael@0: { michael@0: TlsIonContext.set(ctx); michael@0: } michael@0: michael@0: IonContext * michael@0: jit::GetIonContext() michael@0: { michael@0: MOZ_ASSERT(CurrentIonContext()); michael@0: return CurrentIonContext(); michael@0: } michael@0: michael@0: IonContext * michael@0: jit::MaybeGetIonContext() michael@0: { michael@0: return CurrentIonContext(); michael@0: } michael@0: michael@0: IonContext::IonContext(JSContext *cx, TempAllocator *temp) michael@0: : cx(cx), michael@0: temp(temp), michael@0: runtime(CompileRuntime::get(cx->runtime())), michael@0: compartment(CompileCompartment::get(cx->compartment())), michael@0: prev_(CurrentIonContext()), michael@0: assemblerCount_(0) michael@0: { michael@0: SetIonContext(this); michael@0: } michael@0: michael@0: IonContext::IonContext(ExclusiveContext *cx, TempAllocator *temp) michael@0: : cx(nullptr), michael@0: temp(temp), michael@0: runtime(CompileRuntime::get(cx->runtime_)), michael@0: compartment(nullptr), michael@0: prev_(CurrentIonContext()), michael@0: assemblerCount_(0) michael@0: { michael@0: SetIonContext(this); michael@0: } michael@0: michael@0: IonContext::IonContext(CompileRuntime *rt, CompileCompartment *comp, TempAllocator *temp) michael@0: : cx(nullptr), michael@0: temp(temp), michael@0: runtime(rt), michael@0: compartment(comp), michael@0: prev_(CurrentIonContext()), michael@0: assemblerCount_(0) michael@0: { michael@0: SetIonContext(this); michael@0: } michael@0: michael@0: IonContext::IonContext(CompileRuntime *rt) michael@0: : cx(nullptr), michael@0: temp(nullptr), michael@0: runtime(rt), michael@0: compartment(nullptr), michael@0: prev_(CurrentIonContext()), michael@0: assemblerCount_(0) michael@0: { michael@0: SetIonContext(this); michael@0: } michael@0: michael@0: IonContext::~IonContext() michael@0: { michael@0: SetIonContext(prev_); michael@0: } michael@0: michael@0: bool michael@0: jit::InitializeIon() michael@0: { michael@0: if (!TlsIonContext.initialized() && !TlsIonContext.init()) michael@0: return false; michael@0: CheckLogging(); michael@0: CheckPerf(); michael@0: return true; michael@0: } michael@0: michael@0: JitRuntime::JitRuntime() michael@0: : execAlloc_(nullptr), michael@0: ionAlloc_(nullptr), michael@0: exceptionTail_(nullptr), michael@0: bailoutTail_(nullptr), michael@0: enterJIT_(nullptr), michael@0: bailoutHandler_(nullptr), michael@0: argumentsRectifier_(nullptr), michael@0: argumentsRectifierReturnAddr_(nullptr), michael@0: parallelArgumentsRectifier_(nullptr), michael@0: invalidator_(nullptr), michael@0: debugTrapHandler_(nullptr), michael@0: forkJoinGetSliceStub_(nullptr), michael@0: baselineDebugModeOSRHandler_(nullptr), michael@0: functionWrappers_(nullptr), michael@0: osrTempData_(nullptr), michael@0: ionCodeProtected_(false) michael@0: { michael@0: } michael@0: michael@0: JitRuntime::~JitRuntime() michael@0: { michael@0: js_delete(functionWrappers_); michael@0: freeOsrTempData(); michael@0: michael@0: // Note: The interrupt lock is not taken here, as JitRuntime is only michael@0: // destroyed along with its containing JSRuntime. michael@0: js_delete(ionAlloc_); michael@0: } michael@0: michael@0: bool michael@0: JitRuntime::initialize(JSContext *cx) michael@0: { michael@0: JS_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess()); michael@0: JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock()); michael@0: michael@0: AutoCompartment ac(cx, cx->atomsCompartment()); michael@0: michael@0: IonContext ictx(cx, nullptr); michael@0: michael@0: execAlloc_ = cx->runtime()->getExecAlloc(cx); michael@0: if (!execAlloc_) michael@0: return false; michael@0: michael@0: if (!cx->compartment()->ensureJitCompartmentExists(cx)) michael@0: return false; michael@0: michael@0: functionWrappers_ = cx->new_(cx); michael@0: if (!functionWrappers_ || !functionWrappers_->init()) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting exception tail stub"); michael@0: exceptionTail_ = generateExceptionTailStub(cx); michael@0: if (!exceptionTail_) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting bailout tail stub"); michael@0: bailoutTail_ = generateBailoutTailStub(cx); michael@0: if (!bailoutTail_) michael@0: return false; michael@0: michael@0: if (cx->runtime()->jitSupportsFloatingPoint) { michael@0: IonSpew(IonSpew_Codegen, "# Emitting bailout tables"); michael@0: michael@0: // Initialize some Ion-only stubs that require floating-point support. michael@0: if (!bailoutTables_.reserve(FrameSizeClass::ClassLimit().classId())) michael@0: return false; michael@0: michael@0: for (uint32_t id = 0;; id++) { michael@0: FrameSizeClass class_ = FrameSizeClass::FromClass(id); michael@0: if (class_ == FrameSizeClass::ClassLimit()) michael@0: break; michael@0: bailoutTables_.infallibleAppend((JitCode *)nullptr); michael@0: bailoutTables_[id] = generateBailoutTable(cx, id); michael@0: if (!bailoutTables_[id]) michael@0: return false; michael@0: } michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting bailout handler"); michael@0: bailoutHandler_ = generateBailoutHandler(cx); michael@0: if (!bailoutHandler_) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting invalidator"); michael@0: invalidator_ = generateInvalidator(cx); michael@0: if (!invalidator_) michael@0: return false; michael@0: } michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting sequential arguments rectifier"); michael@0: argumentsRectifier_ = generateArgumentsRectifier(cx, SequentialExecution, &argumentsRectifierReturnAddr_); michael@0: if (!argumentsRectifier_) michael@0: return false; michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: IonSpew(IonSpew_Codegen, "# Emitting parallel arguments rectifier"); michael@0: parallelArgumentsRectifier_ = generateArgumentsRectifier(cx, ParallelExecution, nullptr); michael@0: if (!parallelArgumentsRectifier_) michael@0: return false; michael@0: #endif michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting EnterJIT sequence"); michael@0: enterJIT_ = generateEnterJIT(cx, EnterJitOptimized); michael@0: if (!enterJIT_) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting EnterBaselineJIT sequence"); michael@0: enterBaselineJIT_ = generateEnterJIT(cx, EnterJitBaseline); michael@0: if (!enterBaselineJIT_) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Value"); michael@0: valuePreBarrier_ = generatePreBarrier(cx, MIRType_Value); michael@0: if (!valuePreBarrier_) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Shape"); michael@0: shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape); michael@0: if (!shapePreBarrier_) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Codegen, "# Emitting VM function wrappers"); michael@0: for (VMFunction *fun = VMFunction::functions; fun; fun = fun->next) { michael@0: if (!generateVMWrapper(cx, *fun)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::debugTrapHandler(JSContext *cx) michael@0: { michael@0: if (!debugTrapHandler_) { michael@0: // JitRuntime code stubs are shared across compartments and have to michael@0: // be allocated in the atoms compartment. michael@0: AutoLockForExclusiveAccess lock(cx); michael@0: AutoCompartment ac(cx, cx->runtime()->atomsCompartment()); michael@0: debugTrapHandler_ = generateDebugTrapHandler(cx); michael@0: } michael@0: return debugTrapHandler_; michael@0: } michael@0: michael@0: bool michael@0: JitRuntime::ensureForkJoinGetSliceStubExists(JSContext *cx) michael@0: { michael@0: if (!forkJoinGetSliceStub_) { michael@0: IonSpew(IonSpew_Codegen, "# Emitting ForkJoinGetSlice stub"); michael@0: AutoLockForExclusiveAccess lock(cx); michael@0: AutoCompartment ac(cx, cx->runtime()->atomsCompartment()); michael@0: forkJoinGetSliceStub_ = generateForkJoinGetSliceStub(cx); michael@0: } michael@0: return !!forkJoinGetSliceStub_; michael@0: } michael@0: michael@0: uint8_t * michael@0: JitRuntime::allocateOsrTempData(size_t size) michael@0: { michael@0: osrTempData_ = (uint8_t *)js_realloc(osrTempData_, size); michael@0: return osrTempData_; michael@0: } michael@0: michael@0: void michael@0: JitRuntime::freeOsrTempData() michael@0: { michael@0: js_free(osrTempData_); michael@0: osrTempData_ = nullptr; michael@0: } michael@0: michael@0: JSC::ExecutableAllocator * michael@0: JitRuntime::createIonAlloc(JSContext *cx) michael@0: { michael@0: JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock()); michael@0: michael@0: ionAlloc_ = js_new(); michael@0: if (!ionAlloc_) michael@0: js_ReportOutOfMemory(cx); michael@0: return ionAlloc_; michael@0: } michael@0: michael@0: void michael@0: JitRuntime::ensureIonCodeProtected(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(rt->currentThreadOwnsInterruptLock()); michael@0: michael@0: if (!rt->signalHandlersInstalled() || ionCodeProtected_ || !ionAlloc_) michael@0: return; michael@0: michael@0: // Protect all Ion code in the runtime to trigger an access violation the michael@0: // next time any of it runs on the main thread. michael@0: ionAlloc_->toggleAllCodeAsAccessible(false); michael@0: ionCodeProtected_ = true; michael@0: } michael@0: michael@0: bool michael@0: JitRuntime::handleAccessViolation(JSRuntime *rt, void *faultingAddress) michael@0: { michael@0: if (!rt->signalHandlersInstalled() || !ionAlloc_ || !ionAlloc_->codeContains((char *) faultingAddress)) michael@0: return false; michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: // All places where the interrupt lock is taken must either ensure that Ion michael@0: // code memory won't be accessed within, or call ensureIonCodeAccessible to michael@0: // render the memory safe for accessing. Otherwise taking the lock below michael@0: // will deadlock the process. michael@0: JS_ASSERT(!rt->currentThreadOwnsInterruptLock()); michael@0: #endif michael@0: michael@0: // Taking this lock is necessary to prevent the interrupting thread from marking michael@0: // the memory as inaccessible while we are patching backedges. This will cause us michael@0: // to SEGV while still inside the signal handler, and the process will terminate. michael@0: JSRuntime::AutoLockForInterrupt lock(rt); michael@0: michael@0: // Ion code in the runtime faulted after it was made inaccessible. Reset michael@0: // the code privileges and patch all loop backedges to perform an interrupt michael@0: // check instead. michael@0: ensureIonCodeAccessible(rt); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: JitRuntime::ensureIonCodeAccessible(JSRuntime *rt) michael@0: { michael@0: JS_ASSERT(rt->currentThreadOwnsInterruptLock()); michael@0: michael@0: // This can only be called on the main thread and while handling signals, michael@0: // which happens on a separate thread in OS X. michael@0: #ifndef XP_MACOSX michael@0: JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); michael@0: #endif michael@0: michael@0: if (ionCodeProtected_) { michael@0: ionAlloc_->toggleAllCodeAsAccessible(true); michael@0: ionCodeProtected_ = false; michael@0: } michael@0: michael@0: if (rt->interrupt) { michael@0: // The interrupt handler needs to be invoked by this thread, but we may michael@0: // be inside a signal handler and have no idea what is above us on the michael@0: // stack (probably we are executing Ion code at an arbitrary point, but michael@0: // we could be elsewhere, say repatching a jump for an IonCache). michael@0: // Patch all backedges in the runtime so they will invoke the interrupt michael@0: // handler the next time they execute. michael@0: patchIonBackedges(rt, BackedgeInterruptCheck); michael@0: } michael@0: } michael@0: michael@0: void michael@0: JitRuntime::patchIonBackedges(JSRuntime *rt, BackedgeTarget target) michael@0: { michael@0: #ifndef XP_MACOSX michael@0: JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); michael@0: #endif michael@0: michael@0: // Patch all loop backedges in Ion code so that they either jump to the michael@0: // normal loop header or to an interrupt handler each time they run. michael@0: for (InlineListIterator iter(backedgeList_.begin()); michael@0: iter != backedgeList_.end(); michael@0: iter++) michael@0: { michael@0: PatchableBackedge *patchableBackedge = *iter; michael@0: PatchJump(patchableBackedge->backedge, target == BackedgeLoopHeader michael@0: ? patchableBackedge->loopHeader michael@0: : patchableBackedge->interruptCheck); michael@0: } michael@0: } michael@0: michael@0: void michael@0: jit::RequestInterruptForIonCode(JSRuntime *rt, JSRuntime::InterruptMode mode) michael@0: { michael@0: JitRuntime *jitRuntime = rt->jitRuntime(); michael@0: if (!jitRuntime) michael@0: return; michael@0: michael@0: JS_ASSERT(rt->currentThreadOwnsInterruptLock()); michael@0: michael@0: // The mechanism for interrupting normal ion code varies depending on how michael@0: // the interrupt is being requested. michael@0: switch (mode) { michael@0: case JSRuntime::RequestInterruptMainThread: michael@0: // When requesting an interrupt from the main thread, Ion loop michael@0: // backedges can be patched directly. Make sure we don't segv while michael@0: // patching the backedges, to avoid deadlocking inside the signal michael@0: // handler. michael@0: JS_ASSERT(CurrentThreadCanAccessRuntime(rt)); michael@0: jitRuntime->ensureIonCodeAccessible(rt); michael@0: break; michael@0: michael@0: case JSRuntime::RequestInterruptAnyThread: michael@0: // When requesting an interrupt from off the main thread, protect michael@0: // Ion code memory so that the main thread will fault and enter a michael@0: // signal handler when trying to execute the code. The signal michael@0: // handler will unprotect the code and patch loop backedges so michael@0: // that the interrupt handler is invoked afterwards. michael@0: jitRuntime->ensureIonCodeProtected(rt); michael@0: break; michael@0: michael@0: case JSRuntime::RequestInterruptAnyThreadDontStopIon: michael@0: case JSRuntime::RequestInterruptAnyThreadForkJoin: michael@0: // The caller does not require Ion code to be interrupted. michael@0: // Nothing more needs to be done. michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Bad interrupt mode"); michael@0: } michael@0: } michael@0: michael@0: JitCompartment::JitCompartment() michael@0: : stubCodes_(nullptr), michael@0: baselineCallReturnFromIonAddr_(nullptr), michael@0: baselineGetPropReturnFromIonAddr_(nullptr), michael@0: baselineSetPropReturnFromIonAddr_(nullptr), michael@0: baselineCallReturnFromStubAddr_(nullptr), michael@0: baselineGetPropReturnFromStubAddr_(nullptr), michael@0: baselineSetPropReturnFromStubAddr_(nullptr), michael@0: stringConcatStub_(nullptr), michael@0: parallelStringConcatStub_(nullptr), michael@0: activeParallelEntryScripts_(nullptr) michael@0: { michael@0: } michael@0: michael@0: JitCompartment::~JitCompartment() michael@0: { michael@0: js_delete(stubCodes_); michael@0: js_delete(activeParallelEntryScripts_); michael@0: } michael@0: michael@0: bool michael@0: JitCompartment::initialize(JSContext *cx) michael@0: { michael@0: stubCodes_ = cx->new_(cx); michael@0: if (!stubCodes_ || !stubCodes_->init()) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: JitCompartment::ensureIonStubsExist(JSContext *cx) michael@0: { michael@0: if (!stringConcatStub_) { michael@0: stringConcatStub_ = generateStringConcatStub(cx, SequentialExecution); michael@0: if (!stringConcatStub_) michael@0: return false; michael@0: } michael@0: michael@0: #ifdef JS_THREADSAFE michael@0: if (!parallelStringConcatStub_) { michael@0: parallelStringConcatStub_ = generateStringConcatStub(cx, ParallelExecution); michael@0: if (!parallelStringConcatStub_) michael@0: return false; michael@0: } michael@0: #endif michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: JitCompartment::notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script) michael@0: { michael@0: // Fast path. The isParallelEntryScript bit guarantees that the script is michael@0: // already in the set. michael@0: if (script->parallelIonScript()->isParallelEntryScript()) { michael@0: MOZ_ASSERT(activeParallelEntryScripts_ && activeParallelEntryScripts_->has(script)); michael@0: script->parallelIonScript()->resetParallelAge(); michael@0: return true; michael@0: } michael@0: michael@0: if (!activeParallelEntryScripts_) { michael@0: activeParallelEntryScripts_ = cx->new_(cx); michael@0: if (!activeParallelEntryScripts_ || !activeParallelEntryScripts_->init()) michael@0: return false; michael@0: } michael@0: michael@0: script->parallelIonScript()->setIsParallelEntryScript(); michael@0: ScriptSet::AddPtr p = activeParallelEntryScripts_->lookupForAdd(script); michael@0: return p || activeParallelEntryScripts_->add(p, script); michael@0: } michael@0: michael@0: void michael@0: jit::FinishOffThreadBuilder(IonBuilder *builder) michael@0: { michael@0: ExecutionMode executionMode = builder->info().executionMode(); michael@0: michael@0: // Clear the recompiling flag of the old ionScript, since we continue to michael@0: // use the old ionScript if recompiling fails. michael@0: if (executionMode == SequentialExecution && builder->script()->hasIonScript()) michael@0: builder->script()->ionScript()->clearRecompiling(); michael@0: michael@0: // Clean up if compilation did not succeed. michael@0: if (CompilingOffThread(builder->script(), executionMode)) michael@0: SetIonScript(builder->script(), executionMode, nullptr); michael@0: michael@0: // The builder is allocated into its LifoAlloc, so destroying that will michael@0: // destroy the builder and all other data accumulated during compilation, michael@0: // except any final codegen (which includes an assembler and needs to be michael@0: // explicitly destroyed). michael@0: js_delete(builder->backgroundCodegen()); michael@0: js_delete(builder->alloc().lifoAlloc()); michael@0: } michael@0: michael@0: static inline void michael@0: FinishAllOffThreadCompilations(JSCompartment *comp) michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: AutoLockWorkerThreadState lock; michael@0: GlobalWorkerThreadState::IonBuilderVector &finished = WorkerThreadState().ionFinishedList(); michael@0: michael@0: for (size_t i = 0; i < finished.length(); i++) { michael@0: IonBuilder *builder = finished[i]; michael@0: if (builder->compartment == CompileCompartment::get(comp)) { michael@0: FinishOffThreadBuilder(builder); michael@0: WorkerThreadState().remove(finished, &i); michael@0: } michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: /* static */ void michael@0: JitRuntime::Mark(JSTracer *trc) michael@0: { michael@0: JS_ASSERT(!trc->runtime()->isHeapMinorCollecting()); michael@0: Zone *zone = trc->runtime()->atomsCompartment()->zone(); michael@0: for (gc::CellIterUnderGC i(zone, gc::FINALIZE_JITCODE); !i.done(); i.next()) { michael@0: JitCode *code = i.get(); michael@0: MarkJitCodeRoot(trc, &code, "wrapper"); michael@0: } michael@0: } michael@0: michael@0: void michael@0: JitCompartment::mark(JSTracer *trc, JSCompartment *compartment) michael@0: { michael@0: // Cancel any active or pending off thread compilations. Note that the michael@0: // MIR graph does not hold any nursery pointers, so there's no need to michael@0: // do this for minor GCs. michael@0: JS_ASSERT(!trc->runtime()->isHeapMinorCollecting()); michael@0: CancelOffThreadIonCompile(compartment, nullptr); michael@0: FinishAllOffThreadCompilations(compartment); michael@0: michael@0: // Free temporary OSR buffer. michael@0: trc->runtime()->jitRuntime()->freeOsrTempData(); michael@0: michael@0: // Mark scripts with parallel IonScripts if we should preserve them. michael@0: if (activeParallelEntryScripts_) { michael@0: for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) { michael@0: JSScript *script = e.front(); michael@0: michael@0: // If the script has since been invalidated or was attached by an michael@0: // off-thread worker too late (i.e., the ForkJoin finished with michael@0: // warmup doing all the work), remove it. michael@0: if (!script->hasParallelIonScript() || michael@0: !script->parallelIonScript()->isParallelEntryScript()) michael@0: { michael@0: e.removeFront(); michael@0: continue; michael@0: } michael@0: michael@0: // Check and increment the age. If the script is below the max michael@0: // age, mark it. michael@0: // michael@0: // Subtlety: We depend on the tracing of the parallel IonScript's michael@0: // callTargetEntries to propagate the parallel age to the entire michael@0: // call graph. michael@0: if (ShouldPreserveParallelJITCode(trc->runtime(), script, /* increase = */ true)) { michael@0: MarkScript(trc, const_cast(&e.front()), "par-script"); michael@0: MOZ_ASSERT(script == e.front()); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: JitCompartment::sweep(FreeOp *fop) michael@0: { michael@0: stubCodes_->sweep(fop); michael@0: michael@0: // If the sweep removed the ICCall_Fallback stub, nullptr the baselineCallReturnAddr_ field. michael@0: if (!stubCodes_->lookup(static_cast(ICStub::Call_Fallback))) { michael@0: baselineCallReturnFromIonAddr_ = nullptr; michael@0: baselineCallReturnFromStubAddr_ = nullptr; michael@0: } michael@0: // Similarly for the ICGetProp_Fallback stub. michael@0: if (!stubCodes_->lookup(static_cast(ICStub::GetProp_Fallback))) { michael@0: baselineGetPropReturnFromIonAddr_ = nullptr; michael@0: baselineGetPropReturnFromStubAddr_ = nullptr; michael@0: } michael@0: if (!stubCodes_->lookup(static_cast(ICStub::SetProp_Fallback))) { michael@0: baselineSetPropReturnFromIonAddr_ = nullptr; michael@0: baselineSetPropReturnFromStubAddr_ = nullptr; michael@0: } michael@0: michael@0: if (stringConcatStub_ && !IsJitCodeMarked(stringConcatStub_.unsafeGet())) michael@0: stringConcatStub_ = nullptr; michael@0: michael@0: if (parallelStringConcatStub_ && !IsJitCodeMarked(parallelStringConcatStub_.unsafeGet())) michael@0: parallelStringConcatStub_ = nullptr; michael@0: michael@0: if (activeParallelEntryScripts_) { michael@0: for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) { michael@0: JSScript *script = e.front(); michael@0: if (!IsScriptMarked(&script)) michael@0: e.removeFront(); michael@0: else michael@0: MOZ_ASSERT(script == e.front()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::getBailoutTable(const FrameSizeClass &frameClass) const michael@0: { michael@0: JS_ASSERT(frameClass != FrameSizeClass::None()); michael@0: return bailoutTables_[frameClass.classId()]; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::getVMWrapper(const VMFunction &f) const michael@0: { michael@0: JS_ASSERT(functionWrappers_); michael@0: JS_ASSERT(functionWrappers_->initialized()); michael@0: JitRuntime::VMWrapperMap::Ptr p = functionWrappers_->readonlyThreadsafeLookup(&f); michael@0: JS_ASSERT(p); michael@0: michael@0: return p->value(); michael@0: } michael@0: michael@0: template michael@0: JitCode * michael@0: JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, michael@0: JSC::ExecutablePool *pool, JSC::CodeKind kind) michael@0: { michael@0: JitCode *codeObj = js::NewJitCode(cx); michael@0: if (!codeObj) { michael@0: pool->release(headerSize + bufferSize, kind); michael@0: return nullptr; michael@0: } michael@0: michael@0: new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind); michael@0: return codeObj; michael@0: } michael@0: michael@0: template michael@0: JitCode * michael@0: JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, michael@0: JSC::ExecutablePool *pool, JSC::CodeKind kind); michael@0: michael@0: template michael@0: JitCode * michael@0: JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, michael@0: JSC::ExecutablePool *pool, JSC::CodeKind kind); michael@0: michael@0: void michael@0: JitCode::copyFrom(MacroAssembler &masm) michael@0: { michael@0: // Store the JitCode pointer right before the code buffer, so we can michael@0: // recover the gcthing from relocation tables. michael@0: *(JitCode **)(code_ - sizeof(JitCode *)) = this; michael@0: insnSize_ = masm.instructionsSize(); michael@0: masm.executableCopy(code_); michael@0: michael@0: jumpRelocTableBytes_ = masm.jumpRelocationTableBytes(); michael@0: masm.copyJumpRelocationTable(code_ + jumpRelocTableOffset()); michael@0: michael@0: dataRelocTableBytes_ = masm.dataRelocationTableBytes(); michael@0: masm.copyDataRelocationTable(code_ + dataRelocTableOffset()); michael@0: michael@0: preBarrierTableBytes_ = masm.preBarrierTableBytes(); michael@0: masm.copyPreBarrierTable(code_ + preBarrierTableOffset()); michael@0: michael@0: masm.processCodeLabels(code_); michael@0: } michael@0: michael@0: void michael@0: JitCode::trace(JSTracer *trc) michael@0: { michael@0: // Note that we cannot mark invalidated scripts, since we've basically michael@0: // corrupted the code stream by injecting bailouts. michael@0: if (invalidated()) michael@0: return; michael@0: michael@0: if (jumpRelocTableBytes_) { michael@0: uint8_t *start = code_ + jumpRelocTableOffset(); michael@0: CompactBufferReader reader(start, start + jumpRelocTableBytes_); michael@0: MacroAssembler::TraceJumpRelocations(trc, this, reader); michael@0: } michael@0: if (dataRelocTableBytes_) { michael@0: uint8_t *start = code_ + dataRelocTableOffset(); michael@0: CompactBufferReader reader(start, start + dataRelocTableBytes_); michael@0: MacroAssembler::TraceDataRelocations(trc, this, reader); michael@0: } michael@0: } michael@0: michael@0: void michael@0: JitCode::finalize(FreeOp *fop) michael@0: { michael@0: // Make sure this can't race with an interrupting thread, which may try michael@0: // to read the contents of the pool we are releasing references in. michael@0: JS_ASSERT(fop->runtime()->currentThreadOwnsInterruptLock()); michael@0: michael@0: // Buffer can be freed at any time hereafter. Catch use-after-free bugs. michael@0: // Don't do this if the Ion code is protected, as the signal handler will michael@0: // deadlock trying to reacquire the interrupt lock. michael@0: if (fop->runtime()->jitRuntime() && !fop->runtime()->jitRuntime()->ionCodeProtected()) michael@0: memset(code_, JS_SWEPT_CODE_PATTERN, bufferSize_); michael@0: code_ = nullptr; michael@0: michael@0: // Code buffers are stored inside JSC pools. michael@0: // Pools are refcounted. Releasing the pool may free it. michael@0: if (pool_) { michael@0: // Horrible hack: if we are using perf integration, we don't michael@0: // want to reuse code addresses, so we just leak the memory instead. michael@0: if (!PerfEnabled()) michael@0: pool_->release(headerSize_ + bufferSize_, JSC::CodeKind(kind_)); michael@0: pool_ = nullptr; michael@0: } michael@0: } michael@0: michael@0: void michael@0: JitCode::togglePreBarriers(bool enabled) michael@0: { michael@0: uint8_t *start = code_ + preBarrierTableOffset(); michael@0: CompactBufferReader reader(start, start + preBarrierTableBytes_); michael@0: michael@0: while (reader.more()) { michael@0: size_t offset = reader.readUnsigned(); michael@0: CodeLocationLabel loc(this, offset); michael@0: if (enabled) michael@0: Assembler::ToggleToCmp(loc); michael@0: else michael@0: Assembler::ToggleToJmp(loc); michael@0: } michael@0: } michael@0: michael@0: IonScript::IonScript() michael@0: : method_(nullptr), michael@0: deoptTable_(nullptr), michael@0: osrPc_(nullptr), michael@0: osrEntryOffset_(0), michael@0: skipArgCheckEntryOffset_(0), michael@0: invalidateEpilogueOffset_(0), michael@0: invalidateEpilogueDataOffset_(0), michael@0: numBailouts_(0), michael@0: hasUncompiledCallTarget_(false), michael@0: isParallelEntryScript_(false), michael@0: hasSPSInstrumentation_(false), michael@0: recompiling_(false), michael@0: runtimeData_(0), michael@0: runtimeSize_(0), michael@0: cacheIndex_(0), michael@0: cacheEntries_(0), michael@0: safepointIndexOffset_(0), michael@0: safepointIndexEntries_(0), michael@0: safepointsStart_(0), michael@0: safepointsSize_(0), michael@0: frameSlots_(0), michael@0: frameSize_(0), michael@0: bailoutTable_(0), michael@0: bailoutEntries_(0), michael@0: osiIndexOffset_(0), michael@0: osiIndexEntries_(0), michael@0: snapshots_(0), michael@0: snapshotsListSize_(0), michael@0: snapshotsRVATableSize_(0), michael@0: constantTable_(0), michael@0: constantEntries_(0), michael@0: callTargetList_(0), michael@0: callTargetEntries_(0), michael@0: backedgeList_(0), michael@0: backedgeEntries_(0), michael@0: refcount_(0), michael@0: parallelAge_(0), michael@0: recompileInfo_(), michael@0: osrPcMismatchCounter_(0), michael@0: dependentAsmJSModules(nullptr) michael@0: { michael@0: } michael@0: michael@0: IonScript * michael@0: IonScript::New(JSContext *cx, types::RecompileInfo recompileInfo, michael@0: uint32_t frameSlots, uint32_t frameSize, michael@0: size_t snapshotsListSize, size_t snapshotsRVATableSize, michael@0: size_t recoversSize, size_t bailoutEntries, michael@0: size_t constants, size_t safepointIndices, michael@0: size_t osiIndices, size_t cacheEntries, michael@0: size_t runtimeSize, size_t safepointsSize, michael@0: size_t callTargetEntries, size_t backedgeEntries, michael@0: OptimizationLevel optimizationLevel) michael@0: { michael@0: static const int DataAlignment = sizeof(void *); michael@0: michael@0: if (snapshotsListSize >= MAX_BUFFER_SIZE || michael@0: (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t))) michael@0: { michael@0: js_ReportOutOfMemory(cx); michael@0: return nullptr; michael@0: } michael@0: michael@0: // This should not overflow on x86, because the memory is already allocated michael@0: // *somewhere* and if their total overflowed there would be no memory left michael@0: // at all. michael@0: size_t paddedSnapshotsSize = AlignBytes(snapshotsListSize + snapshotsRVATableSize, DataAlignment); michael@0: size_t paddedRecoversSize = AlignBytes(recoversSize, DataAlignment); michael@0: size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment); michael@0: size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment); michael@0: size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment); michael@0: size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment); michael@0: size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(uint32_t), DataAlignment); michael@0: size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment); michael@0: size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment); michael@0: size_t paddedCallTargetSize = AlignBytes(callTargetEntries * sizeof(JSScript *), DataAlignment); michael@0: size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment); michael@0: size_t bytes = paddedSnapshotsSize + michael@0: paddedRecoversSize + michael@0: paddedBailoutSize + michael@0: paddedConstantsSize + michael@0: paddedSafepointIndicesSize+ michael@0: paddedOsiIndicesSize + michael@0: paddedCacheEntriesSize + michael@0: paddedRuntimeSize + michael@0: paddedSafepointSize + michael@0: paddedCallTargetSize + michael@0: paddedBackedgeSize; michael@0: uint8_t *buffer = (uint8_t *)cx->malloc_(sizeof(IonScript) + bytes); michael@0: if (!buffer) michael@0: return nullptr; michael@0: michael@0: IonScript *script = reinterpret_cast(buffer); michael@0: new (script) IonScript(); michael@0: michael@0: uint32_t offsetCursor = sizeof(IonScript); michael@0: michael@0: script->runtimeData_ = offsetCursor; michael@0: script->runtimeSize_ = runtimeSize; michael@0: offsetCursor += paddedRuntimeSize; michael@0: michael@0: script->cacheIndex_ = offsetCursor; michael@0: script->cacheEntries_ = cacheEntries; michael@0: offsetCursor += paddedCacheEntriesSize; michael@0: michael@0: script->safepointIndexOffset_ = offsetCursor; michael@0: script->safepointIndexEntries_ = safepointIndices; michael@0: offsetCursor += paddedSafepointIndicesSize; michael@0: michael@0: script->safepointsStart_ = offsetCursor; michael@0: script->safepointsSize_ = safepointsSize; michael@0: offsetCursor += paddedSafepointSize; michael@0: michael@0: script->bailoutTable_ = offsetCursor; michael@0: script->bailoutEntries_ = bailoutEntries; michael@0: offsetCursor += paddedBailoutSize; michael@0: michael@0: script->osiIndexOffset_ = offsetCursor; michael@0: script->osiIndexEntries_ = osiIndices; michael@0: offsetCursor += paddedOsiIndicesSize; michael@0: michael@0: script->snapshots_ = offsetCursor; michael@0: script->snapshotsListSize_ = snapshotsListSize; michael@0: script->snapshotsRVATableSize_ = snapshotsRVATableSize; michael@0: offsetCursor += paddedSnapshotsSize; michael@0: michael@0: script->recovers_ = offsetCursor; michael@0: script->recoversSize_ = recoversSize; michael@0: offsetCursor += paddedRecoversSize; michael@0: michael@0: script->constantTable_ = offsetCursor; michael@0: script->constantEntries_ = constants; michael@0: offsetCursor += paddedConstantsSize; michael@0: michael@0: script->callTargetList_ = offsetCursor; michael@0: script->callTargetEntries_ = callTargetEntries; michael@0: offsetCursor += paddedCallTargetSize; michael@0: michael@0: script->backedgeList_ = offsetCursor; michael@0: script->backedgeEntries_ = backedgeEntries; michael@0: offsetCursor += paddedBackedgeSize; michael@0: michael@0: script->frameSlots_ = frameSlots; michael@0: script->frameSize_ = frameSize; michael@0: michael@0: script->recompileInfo_ = recompileInfo; michael@0: script->optimizationLevel_ = optimizationLevel; michael@0: michael@0: return script; michael@0: } michael@0: michael@0: void michael@0: IonScript::trace(JSTracer *trc) michael@0: { michael@0: if (method_) michael@0: MarkJitCode(trc, &method_, "method"); michael@0: michael@0: if (deoptTable_) michael@0: MarkJitCode(trc, &deoptTable_, "deoptimizationTable"); michael@0: michael@0: for (size_t i = 0; i < numConstants(); i++) michael@0: gc::MarkValue(trc, &getConstant(i), "constant"); michael@0: michael@0: // No write barrier is needed for the call target list, as it's attached michael@0: // at compilation time and is read only. michael@0: for (size_t i = 0; i < callTargetEntries(); i++) { michael@0: // Propagate the parallelAge to the call targets. michael@0: if (callTargetList()[i]->hasParallelIonScript()) michael@0: callTargetList()[i]->parallelIonScript()->parallelAge_ = parallelAge_; michael@0: michael@0: gc::MarkScriptUnbarriered(trc, &callTargetList()[i], "callTarget"); michael@0: } michael@0: } michael@0: michael@0: /* static */ void michael@0: IonScript::writeBarrierPre(Zone *zone, IonScript *ionScript) michael@0: { michael@0: #ifdef JSGC_INCREMENTAL michael@0: if (zone->needsBarrier()) michael@0: ionScript->trace(zone->barrierTracer()); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: IonScript::copySnapshots(const SnapshotWriter *writer) michael@0: { michael@0: MOZ_ASSERT(writer->listSize() == snapshotsListSize_); michael@0: memcpy((uint8_t *)this + snapshots_, michael@0: writer->listBuffer(), snapshotsListSize_); michael@0: michael@0: MOZ_ASSERT(snapshotsRVATableSize_); michael@0: MOZ_ASSERT(writer->RVATableSize() == snapshotsRVATableSize_); michael@0: memcpy((uint8_t *)this + snapshots_ + snapshotsListSize_, michael@0: writer->RVATableBuffer(), snapshotsRVATableSize_); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyRecovers(const RecoverWriter *writer) michael@0: { michael@0: MOZ_ASSERT(writer->size() == recoversSize_); michael@0: memcpy((uint8_t *)this + recovers_, writer->buffer(), recoversSize_); michael@0: } michael@0: michael@0: void michael@0: IonScript::copySafepoints(const SafepointWriter *writer) michael@0: { michael@0: JS_ASSERT(writer->size() == safepointsSize_); michael@0: memcpy((uint8_t *)this + safepointsStart_, writer->buffer(), safepointsSize_); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyBailoutTable(const SnapshotOffset *table) michael@0: { michael@0: memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32_t)); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyConstants(const Value *vp) michael@0: { michael@0: for (size_t i = 0; i < constantEntries_; i++) michael@0: constants()[i].init(vp[i]); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyCallTargetEntries(JSScript **callTargets) michael@0: { michael@0: for (size_t i = 0; i < callTargetEntries_; i++) michael@0: callTargetList()[i] = callTargets[i]; michael@0: } michael@0: michael@0: void michael@0: IonScript::copyPatchableBackedges(JSContext *cx, JitCode *code, michael@0: PatchableBackedgeInfo *backedges) michael@0: { michael@0: for (size_t i = 0; i < backedgeEntries_; i++) { michael@0: const PatchableBackedgeInfo &info = backedges[i]; michael@0: PatchableBackedge *patchableBackedge = &backedgeList()[i]; michael@0: michael@0: CodeLocationJump backedge(code, info.backedge); michael@0: CodeLocationLabel loopHeader(code, CodeOffsetLabel(info.loopHeader->offset())); michael@0: CodeLocationLabel interruptCheck(code, CodeOffsetLabel(info.interruptCheck->offset())); michael@0: new(patchableBackedge) PatchableBackedge(backedge, loopHeader, interruptCheck); michael@0: michael@0: // Point the backedge to either of its possible targets, according to michael@0: // whether an interrupt is currently desired, matching the targets michael@0: // established by ensureIonCodeAccessible() above. We don't handle the michael@0: // interrupt immediately as the interrupt lock is held here. michael@0: PatchJump(backedge, cx->runtime()->interrupt ? interruptCheck : loopHeader); michael@0: michael@0: cx->runtime()->jitRuntime()->addPatchableBackedge(patchableBackedge); michael@0: } michael@0: } michael@0: michael@0: void michael@0: IonScript::copySafepointIndices(const SafepointIndex *si, MacroAssembler &masm) michael@0: { michael@0: // Jumps in the caches reflect the offset of those jumps in the compiled michael@0: // code, not the absolute positions of the jumps. Update according to the michael@0: // final code address now. michael@0: SafepointIndex *table = safepointIndices(); michael@0: memcpy(table, si, safepointIndexEntries_ * sizeof(SafepointIndex)); michael@0: for (size_t i = 0; i < safepointIndexEntries_; i++) michael@0: table[i].adjustDisplacement(masm.actualOffset(table[i].displacement())); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyOsiIndices(const OsiIndex *oi, MacroAssembler &masm) michael@0: { michael@0: memcpy(osiIndices(), oi, osiIndexEntries_ * sizeof(OsiIndex)); michael@0: for (unsigned i = 0; i < osiIndexEntries_; i++) michael@0: osiIndices()[i].fixUpOffset(masm); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyRuntimeData(const uint8_t *data) michael@0: { michael@0: memcpy(runtimeData(), data, runtimeSize()); michael@0: } michael@0: michael@0: void michael@0: IonScript::copyCacheEntries(const uint32_t *caches, MacroAssembler &masm) michael@0: { michael@0: memcpy(cacheIndex(), caches, numCaches() * sizeof(uint32_t)); michael@0: michael@0: // Jumps in the caches reflect the offset of those jumps in the compiled michael@0: // code, not the absolute positions of the jumps. Update according to the michael@0: // final code address now. michael@0: for (size_t i = 0; i < numCaches(); i++) michael@0: getCacheFromIndex(i).updateBaseAddress(method_, masm); michael@0: } michael@0: michael@0: const SafepointIndex * michael@0: IonScript::getSafepointIndex(uint32_t disp) const michael@0: { michael@0: JS_ASSERT(safepointIndexEntries_ > 0); michael@0: michael@0: const SafepointIndex *table = safepointIndices(); michael@0: if (safepointIndexEntries_ == 1) { michael@0: JS_ASSERT(disp == table[0].displacement()); michael@0: return &table[0]; michael@0: } michael@0: michael@0: size_t minEntry = 0; michael@0: size_t maxEntry = safepointIndexEntries_ - 1; michael@0: uint32_t min = table[minEntry].displacement(); michael@0: uint32_t max = table[maxEntry].displacement(); michael@0: michael@0: // Raise if the element is not in the list. michael@0: JS_ASSERT(min <= disp && disp <= max); michael@0: michael@0: // Approximate the location of the FrameInfo. michael@0: size_t guess = (disp - min) * (maxEntry - minEntry) / (max - min) + minEntry; michael@0: uint32_t guessDisp = table[guess].displacement(); michael@0: michael@0: if (table[guess].displacement() == disp) michael@0: return &table[guess]; michael@0: michael@0: // Doing a linear scan from the guess should be more efficient in case of michael@0: // small group which are equally distributed on the code. michael@0: // michael@0: // such as: <... ... ... ... . ... ...> michael@0: if (guessDisp > disp) { michael@0: while (--guess >= minEntry) { michael@0: guessDisp = table[guess].displacement(); michael@0: JS_ASSERT(guessDisp >= disp); michael@0: if (guessDisp == disp) michael@0: return &table[guess]; michael@0: } michael@0: } else { michael@0: while (++guess <= maxEntry) { michael@0: guessDisp = table[guess].displacement(); michael@0: JS_ASSERT(guessDisp <= disp); michael@0: if (guessDisp == disp) michael@0: return &table[guess]; michael@0: } michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("displacement not found."); michael@0: } michael@0: michael@0: const OsiIndex * michael@0: IonScript::getOsiIndex(uint32_t disp) const michael@0: { michael@0: for (const OsiIndex *it = osiIndices(), *end = osiIndices() + osiIndexEntries_; michael@0: it != end; michael@0: ++it) michael@0: { michael@0: if (it->returnPointDisplacement() == disp) michael@0: return it; michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Failed to find OSI point return address"); michael@0: } michael@0: michael@0: const OsiIndex * michael@0: IonScript::getOsiIndex(uint8_t *retAddr) const michael@0: { michael@0: IonSpew(IonSpew_Invalidate, "IonScript %p has method %p raw %p", (void *) this, (void *) michael@0: method(), method()->raw()); michael@0: michael@0: JS_ASSERT(containsCodeAddress(retAddr)); michael@0: uint32_t disp = retAddr - method()->raw(); michael@0: return getOsiIndex(disp); michael@0: } michael@0: michael@0: void michael@0: IonScript::Trace(JSTracer *trc, IonScript *script) michael@0: { michael@0: if (script != ION_DISABLED_SCRIPT) michael@0: script->trace(trc); michael@0: } michael@0: michael@0: void michael@0: IonScript::Destroy(FreeOp *fop, IonScript *script) michael@0: { michael@0: script->destroyCaches(); michael@0: script->unlinkFromRuntime(fop); michael@0: fop->free_(script); michael@0: } michael@0: michael@0: void michael@0: IonScript::toggleBarriers(bool enabled) michael@0: { michael@0: method()->togglePreBarriers(enabled); michael@0: } michael@0: michael@0: void michael@0: IonScript::purgeCaches() michael@0: { michael@0: // Don't reset any ICs if we're invalidated, otherwise, repointing the michael@0: // inline jump could overwrite an invalidation marker. These ICs can michael@0: // no longer run, however, the IC slow paths may be active on the stack. michael@0: // ICs therefore are required to check for invalidation before patching, michael@0: // to ensure the same invariant. michael@0: if (invalidated()) michael@0: return; michael@0: michael@0: for (size_t i = 0; i < numCaches(); i++) michael@0: getCacheFromIndex(i).reset(); michael@0: } michael@0: michael@0: void michael@0: IonScript::destroyCaches() michael@0: { michael@0: for (size_t i = 0; i < numCaches(); i++) michael@0: getCacheFromIndex(i).destroy(); michael@0: } michael@0: michael@0: bool michael@0: IonScript::addDependentAsmJSModule(JSContext *cx, DependentAsmJSModuleExit exit) michael@0: { michael@0: if (!dependentAsmJSModules) { michael@0: dependentAsmJSModules = cx->new_ >(cx); michael@0: if (!dependentAsmJSModules) michael@0: return false; michael@0: } michael@0: return dependentAsmJSModules->append(exit); michael@0: } michael@0: michael@0: void michael@0: IonScript::unlinkFromRuntime(FreeOp *fop) michael@0: { michael@0: // Remove any links from AsmJSModules that contain optimized FFI calls into michael@0: // this IonScript. michael@0: if (dependentAsmJSModules) { michael@0: for (size_t i = 0; i < dependentAsmJSModules->length(); i++) { michael@0: DependentAsmJSModuleExit exit = dependentAsmJSModules->begin()[i]; michael@0: exit.module->detachIonCompilation(exit.exitIndex); michael@0: } michael@0: michael@0: fop->delete_(dependentAsmJSModules); michael@0: dependentAsmJSModules = nullptr; michael@0: } michael@0: michael@0: // The writes to the executable buffer below may clobber backedge jumps, so michael@0: // make sure that those backedges are unlinked from the runtime and not michael@0: // reclobbered with garbage if an interrupt is requested. michael@0: JSRuntime *rt = fop->runtime(); michael@0: for (size_t i = 0; i < backedgeEntries_; i++) { michael@0: PatchableBackedge *backedge = &backedgeList()[i]; michael@0: rt->jitRuntime()->removePatchableBackedge(backedge); michael@0: } michael@0: michael@0: // Clear the list of backedges, so that this method is idempotent. It is michael@0: // called during destruction, and may be additionally called when the michael@0: // script is invalidated. michael@0: backedgeEntries_ = 0; michael@0: } michael@0: michael@0: void michael@0: jit::ToggleBarriers(JS::Zone *zone, bool needs) michael@0: { michael@0: JSRuntime *rt = zone->runtimeFromMainThread(); michael@0: if (!rt->hasJitRuntime()) michael@0: return; michael@0: michael@0: for (gc::CellIterUnderGC i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) { michael@0: JSScript *script = i.get(); michael@0: if (script->hasIonScript()) michael@0: script->ionScript()->toggleBarriers(needs); michael@0: if (script->hasBaselineScript()) michael@0: script->baselineScript()->toggleBarriers(needs); michael@0: } michael@0: michael@0: for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) { michael@0: if (comp->jitCompartment()) michael@0: comp->jitCompartment()->toggleBaselineStubBarriers(needs); michael@0: } michael@0: } michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: bool michael@0: OptimizeMIR(MIRGenerator *mir) michael@0: { michael@0: MIRGraph &graph = mir->graph(); michael@0: TraceLogger *logger; michael@0: if (GetIonContext()->runtime->onMainThread()) michael@0: logger = TraceLoggerForMainThread(GetIonContext()->runtime); michael@0: else michael@0: logger = TraceLoggerForCurrentThread(); michael@0: michael@0: if (!mir->compilingAsmJS()) { michael@0: if (!MakeMRegExpHoistable(graph)) michael@0: return false; michael@0: } michael@0: michael@0: IonSpewPass("BuildSSA"); michael@0: AssertBasicGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Start")) michael@0: return false; michael@0: michael@0: { michael@0: AutoTraceLog log(logger, TraceLogger::SplitCriticalEdges); michael@0: if (!SplitCriticalEdges(graph)) michael@0: return false; michael@0: IonSpewPass("Split Critical Edges"); michael@0: AssertGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Split Critical Edges")) michael@0: return false; michael@0: } michael@0: michael@0: { michael@0: AutoTraceLog log(logger, TraceLogger::RenumberBlocks); michael@0: if (!RenumberBlocks(graph)) michael@0: return false; michael@0: IonSpewPass("Renumber Blocks"); michael@0: AssertGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Renumber Blocks")) michael@0: return false; michael@0: } michael@0: michael@0: { michael@0: AutoTraceLog log(logger, TraceLogger::DominatorTree); michael@0: if (!BuildDominatorTree(graph)) michael@0: return false; michael@0: // No spew: graph not changed. michael@0: michael@0: if (mir->shouldCancel("Dominator Tree")) michael@0: return false; michael@0: } michael@0: michael@0: { michael@0: AutoTraceLog log(logger, TraceLogger::PhiAnalysis); michael@0: // Aggressive phi elimination must occur before any code elimination. If the michael@0: // script contains a try-statement, we only compiled the try block and not michael@0: // the catch or finally blocks, so in this case it's also invalid to use michael@0: // aggressive phi elimination. michael@0: Observability observability = graph.hasTryBlock() michael@0: ? ConservativeObservability michael@0: : AggressiveObservability; michael@0: if (!EliminatePhis(mir, graph, observability)) michael@0: return false; michael@0: IonSpewPass("Eliminate phis"); michael@0: AssertGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Eliminate phis")) michael@0: return false; michael@0: michael@0: if (!BuildPhiReverseMapping(graph)) michael@0: return false; michael@0: AssertExtendedGraphCoherency(graph); michael@0: // No spew: graph not changed. michael@0: michael@0: if (mir->shouldCancel("Phi reverse mapping")) michael@0: return false; michael@0: } michael@0: michael@0: if (!mir->compilingAsmJS()) { michael@0: AutoTraceLog log(logger, TraceLogger::ApplyTypes); michael@0: if (!ApplyTypeInformation(mir, graph)) michael@0: return false; michael@0: IonSpewPass("Apply types"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Apply types")) michael@0: return false; michael@0: } michael@0: michael@0: if (graph.entryBlock()->info().executionMode() == ParallelExecution) { michael@0: AutoTraceLog log(logger, TraceLogger::ParallelSafetyAnalysis); michael@0: ParallelSafetyAnalysis analysis(mir, graph); michael@0: if (!analysis.analyze()) michael@0: return false; michael@0: } michael@0: michael@0: // Alias analysis is required for LICM and GVN so that we don't move michael@0: // loads across stores. michael@0: if (mir->optimizationInfo().licmEnabled() || michael@0: mir->optimizationInfo().gvnEnabled()) michael@0: { michael@0: AutoTraceLog log(logger, TraceLogger::AliasAnalysis); michael@0: AliasAnalysis analysis(mir, graph); michael@0: if (!analysis.analyze()) michael@0: return false; michael@0: IonSpewPass("Alias analysis"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Alias analysis")) michael@0: return false; michael@0: michael@0: // Eliminating dead resume point operands requires basic block michael@0: // instructions to be numbered. Reuse the numbering computed during michael@0: // alias analysis. michael@0: if (!EliminateDeadResumePointOperands(mir, graph)) michael@0: return false; michael@0: michael@0: if (mir->shouldCancel("Eliminate dead resume point operands")) michael@0: return false; michael@0: } michael@0: michael@0: if (mir->optimizationInfo().gvnEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::GVN); michael@0: ValueNumberer gvn(mir, graph, mir->optimizationInfo().gvnKind() == GVN_Optimistic); michael@0: if (!gvn.analyze()) michael@0: return false; michael@0: IonSpewPass("GVN"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("GVN")) michael@0: return false; michael@0: } michael@0: michael@0: if (mir->optimizationInfo().uceEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::UCE); michael@0: UnreachableCodeElimination uce(mir, graph); michael@0: if (!uce.analyze()) michael@0: return false; michael@0: IonSpewPass("UCE"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("UCE")) michael@0: return false; michael@0: } michael@0: michael@0: if (mir->optimizationInfo().licmEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::LICM); michael@0: // LICM can hoist instructions from conditional branches and trigger michael@0: // repeated bailouts. Disable it if this script is known to bailout michael@0: // frequently. michael@0: JSScript *script = mir->info().script(); michael@0: if (!script || !script->hadFrequentBailouts()) { michael@0: LICM licm(mir, graph); michael@0: if (!licm.analyze()) michael@0: return false; michael@0: IonSpewPass("LICM"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("LICM")) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (mir->optimizationInfo().rangeAnalysisEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::RangeAnalysis); michael@0: RangeAnalysis r(mir, graph); michael@0: if (!r.addBetaNodes()) michael@0: return false; michael@0: IonSpewPass("Beta"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("RA Beta")) michael@0: return false; michael@0: michael@0: if (!r.analyze() || !r.addRangeAssertions()) michael@0: return false; michael@0: IonSpewPass("Range Analysis"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Range Analysis")) michael@0: return false; michael@0: michael@0: if (!r.removeBetaNodes()) michael@0: return false; michael@0: IonSpewPass("De-Beta"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("RA De-Beta")) michael@0: return false; michael@0: michael@0: if (mir->optimizationInfo().uceEnabled()) { michael@0: bool shouldRunUCE = false; michael@0: if (!r.prepareForUCE(&shouldRunUCE)) michael@0: return false; michael@0: IonSpewPass("RA check UCE"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("RA check UCE")) michael@0: return false; michael@0: michael@0: if (shouldRunUCE) { michael@0: UnreachableCodeElimination uce(mir, graph); michael@0: uce.disableAliasAnalysis(); michael@0: if (!uce.analyze()) michael@0: return false; michael@0: IonSpewPass("UCE After RA"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("UCE After RA")) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (!r.truncate()) michael@0: return false; michael@0: IonSpewPass("Truncate Doubles"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Truncate Doubles")) michael@0: return false; michael@0: } michael@0: michael@0: if (mir->optimizationInfo().eaaEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::EffectiveAddressAnalysis); michael@0: EffectiveAddressAnalysis eaa(graph); michael@0: if (!eaa.analyze()) michael@0: return false; michael@0: IonSpewPass("Effective Address Analysis"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Effective Address Analysis")) michael@0: return false; michael@0: } michael@0: michael@0: { michael@0: AutoTraceLog log(logger, TraceLogger::EliminateDeadCode); michael@0: if (!EliminateDeadCode(mir, graph)) michael@0: return false; michael@0: IonSpewPass("DCE"); michael@0: AssertExtendedGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("DCE")) michael@0: return false; michael@0: } michael@0: michael@0: // Passes after this point must not move instructions; these analyses michael@0: // depend on knowing the final order in which instructions will execute. michael@0: michael@0: if (mir->optimizationInfo().edgeCaseAnalysisEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::EdgeCaseAnalysis); michael@0: EdgeCaseAnalysis edgeCaseAnalysis(mir, graph); michael@0: if (!edgeCaseAnalysis.analyzeLate()) michael@0: return false; michael@0: IonSpewPass("Edge Case Analysis (Late)"); michael@0: AssertGraphCoherency(graph); michael@0: michael@0: if (mir->shouldCancel("Edge Case Analysis (Late)")) michael@0: return false; michael@0: } michael@0: michael@0: if (mir->optimizationInfo().eliminateRedundantChecksEnabled()) { michael@0: AutoTraceLog log(logger, TraceLogger::EliminateRedundantChecks); michael@0: // Note: check elimination has to run after all other passes that move michael@0: // instructions. Since check uses are replaced with the actual index, michael@0: // code motion after this pass could incorrectly move a load or store michael@0: // before its bounds check. michael@0: if (!EliminateRedundantChecks(graph)) michael@0: return false; michael@0: IonSpewPass("Bounds Check Elimination"); michael@0: AssertGraphCoherency(graph); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: LIRGraph * michael@0: GenerateLIR(MIRGenerator *mir) michael@0: { michael@0: MIRGraph &graph = mir->graph(); michael@0: michael@0: LIRGraph *lir = mir->alloc().lifoAlloc()->new_(&graph); michael@0: if (!lir || !lir->init()) michael@0: return nullptr; michael@0: michael@0: LIRGenerator lirgen(mir, graph, *lir); michael@0: if (!lirgen.generate()) michael@0: return nullptr; michael@0: IonSpewPass("Generate LIR"); michael@0: michael@0: if (mir->shouldCancel("Generate LIR")) michael@0: return nullptr; michael@0: michael@0: AllocationIntegrityState integrity(*lir); michael@0: michael@0: switch (mir->optimizationInfo().registerAllocator()) { michael@0: case RegisterAllocator_LSRA: { michael@0: #ifdef DEBUG michael@0: if (!integrity.record()) michael@0: return nullptr; michael@0: #endif michael@0: michael@0: LinearScanAllocator regalloc(mir, &lirgen, *lir); michael@0: if (!regalloc.go()) michael@0: return nullptr; michael@0: michael@0: #ifdef DEBUG michael@0: if (!integrity.check(false)) michael@0: return nullptr; michael@0: #endif michael@0: michael@0: IonSpewPass("Allocate Registers [LSRA]", ®alloc); michael@0: break; michael@0: } michael@0: michael@0: case RegisterAllocator_Backtracking: { michael@0: #ifdef DEBUG michael@0: if (!integrity.record()) michael@0: return nullptr; michael@0: #endif michael@0: michael@0: BacktrackingAllocator regalloc(mir, &lirgen, *lir); michael@0: if (!regalloc.go()) michael@0: return nullptr; michael@0: michael@0: #ifdef DEBUG michael@0: if (!integrity.check(false)) michael@0: return nullptr; michael@0: #endif michael@0: michael@0: IonSpewPass("Allocate Registers [Backtracking]"); michael@0: break; michael@0: } michael@0: michael@0: case RegisterAllocator_Stupid: { michael@0: // Use the integrity checker to populate safepoint information, so michael@0: // run it in all builds. michael@0: if (!integrity.record()) michael@0: return nullptr; michael@0: michael@0: StupidAllocator regalloc(mir, &lirgen, *lir); michael@0: if (!regalloc.go()) michael@0: return nullptr; michael@0: if (!integrity.check(true)) michael@0: return nullptr; michael@0: IonSpewPass("Allocate Registers [Stupid]"); michael@0: break; michael@0: } michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Bad regalloc"); michael@0: } michael@0: michael@0: if (mir->shouldCancel("Allocate Registers")) michael@0: return nullptr; michael@0: michael@0: // Now that all optimization and register allocation is done, re-introduce michael@0: // critical edges to avoid unnecessary jumps. michael@0: if (!UnsplitEdges(lir)) michael@0: return nullptr; michael@0: IonSpewPass("Unsplit Critical Edges"); michael@0: AssertBasicGraphCoherency(graph); michael@0: michael@0: return lir; michael@0: } michael@0: michael@0: CodeGenerator * michael@0: GenerateCode(MIRGenerator *mir, LIRGraph *lir) michael@0: { michael@0: CodeGenerator *codegen = js_new(mir, lir); michael@0: if (!codegen) michael@0: return nullptr; michael@0: michael@0: if (!codegen->generate()) { michael@0: js_delete(codegen); michael@0: return nullptr; michael@0: } michael@0: michael@0: return codegen; michael@0: } michael@0: michael@0: CodeGenerator * michael@0: CompileBackEnd(MIRGenerator *mir) michael@0: { michael@0: if (!OptimizeMIR(mir)) michael@0: return nullptr; michael@0: michael@0: LIRGraph *lir = GenerateLIR(mir); michael@0: if (!lir) michael@0: return nullptr; michael@0: michael@0: return GenerateCode(mir, lir); michael@0: } michael@0: michael@0: void michael@0: AttachFinishedCompilations(JSContext *cx) michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: JitCompartment *ion = cx->compartment()->jitCompartment(); michael@0: if (!ion) michael@0: return; michael@0: michael@0: types::AutoEnterAnalysis enterTypes(cx); michael@0: AutoLockWorkerThreadState lock; michael@0: michael@0: GlobalWorkerThreadState::IonBuilderVector &finished = WorkerThreadState().ionFinishedList(); michael@0: michael@0: TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); michael@0: michael@0: // Incorporate any off thread compilations for the compartment which have michael@0: // finished, failed or have been cancelled. michael@0: while (true) { michael@0: IonBuilder *builder = nullptr; michael@0: michael@0: // Find a finished builder for the compartment. michael@0: for (size_t i = 0; i < finished.length(); i++) { michael@0: IonBuilder *testBuilder = finished[i]; michael@0: if (testBuilder->compartment == CompileCompartment::get(cx->compartment())) { michael@0: builder = testBuilder; michael@0: WorkerThreadState().remove(finished, &i); michael@0: break; michael@0: } michael@0: } michael@0: if (!builder) michael@0: break; michael@0: michael@0: if (CodeGenerator *codegen = builder->backgroundCodegen()) { michael@0: RootedScript script(cx, builder->script()); michael@0: IonContext ictx(cx, &builder->alloc()); michael@0: AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script)); michael@0: AutoTraceLog logLink(logger, TraceLogger::IonLinking); michael@0: michael@0: // Root the assembler until the builder is finished below. As it michael@0: // was constructed off thread, the assembler has not been rooted michael@0: // previously, though any GC activity would discard the builder. michael@0: codegen->masm.constructRoot(cx); michael@0: michael@0: bool success; michael@0: { michael@0: // Release the worker thread lock and root the compiler for GC. michael@0: AutoTempAllocatorRooter root(cx, &builder->alloc()); michael@0: AutoUnlockWorkerThreadState unlock; michael@0: success = codegen->link(cx, builder->constraints()); michael@0: } michael@0: michael@0: if (!success) { michael@0: // Silently ignore OOM during code generation. The caller is michael@0: // InvokeInterruptCallback, which always runs at a michael@0: // nondeterministic time. It's not OK to throw a catchable michael@0: // exception from there. michael@0: cx->clearPendingException(); michael@0: } michael@0: } michael@0: michael@0: FinishOffThreadBuilder(builder); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: static const size_t BUILDER_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12; michael@0: michael@0: static inline bool michael@0: OffThreadCompilationAvailable(JSContext *cx) michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: // Even if off thread compilation is enabled, compilation must still occur michael@0: // on the main thread in some cases. Do not compile off thread during an michael@0: // incremental GC, as this may trip incremental read barriers. michael@0: // michael@0: // Require cpuCount > 1 so that Ion compilation jobs and main-thread michael@0: // execution are not competing for the same resources. michael@0: // michael@0: // Skip off thread compilation if PC count profiling is enabled, as michael@0: // CodeGenerator::maybeCreateScriptCounts will not attach script profiles michael@0: // when running off thread. michael@0: return cx->runtime()->canUseParallelIonCompilation() michael@0: && WorkerThreadState().cpuCount > 1 michael@0: && cx->runtime()->gcIncrementalState == gc::NO_INCREMENTAL michael@0: && !cx->runtime()->profilingScripts; michael@0: #else michael@0: return false; michael@0: #endif michael@0: } michael@0: michael@0: static void michael@0: TrackAllProperties(JSContext *cx, JSObject *obj) michael@0: { michael@0: JS_ASSERT(obj->hasSingletonType()); michael@0: michael@0: for (Shape::Range range(obj->lastProperty()); !range.empty(); range.popFront()) michael@0: types::EnsureTrackPropertyTypes(cx, obj, range.front().propid()); michael@0: } michael@0: michael@0: static void michael@0: TrackPropertiesForSingletonScopes(JSContext *cx, JSScript *script, BaselineFrame *baselineFrame) michael@0: { michael@0: // Ensure that all properties of singleton call objects which the script michael@0: // could access are tracked. These are generally accessed through michael@0: // ALIASEDVAR operations in baseline and will not be tracked even if they michael@0: // have been accessed in baseline code. michael@0: JSObject *environment = script->functionNonDelazifying() michael@0: ? script->functionNonDelazifying()->environment() michael@0: : nullptr; michael@0: michael@0: while (environment && !environment->is()) { michael@0: if (environment->is() && environment->hasSingletonType()) michael@0: TrackAllProperties(cx, environment); michael@0: environment = environment->enclosingScope(); michael@0: } michael@0: michael@0: if (baselineFrame) { michael@0: JSObject *scope = baselineFrame->scopeChain(); michael@0: if (scope->is() && scope->hasSingletonType()) michael@0: TrackAllProperties(cx, scope); michael@0: } michael@0: } michael@0: michael@0: static AbortReason michael@0: IonCompile(JSContext *cx, JSScript *script, michael@0: BaselineFrame *baselineFrame, jsbytecode *osrPc, bool constructing, michael@0: ExecutionMode executionMode, bool recompile, michael@0: OptimizationLevel optimizationLevel) michael@0: { michael@0: TraceLogger *logger = TraceLoggerForMainThread(cx->runtime()); michael@0: AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script)); michael@0: AutoTraceLog logCompile(logger, TraceLogger::IonCompilation); michael@0: michael@0: JS_ASSERT(optimizationLevel > Optimization_DontCompile); michael@0: michael@0: // Make sure the script's canonical function isn't lazy. We can't de-lazify michael@0: // it in a worker thread. michael@0: script->ensureNonLazyCanonicalFunction(cx); michael@0: michael@0: TrackPropertiesForSingletonScopes(cx, script, baselineFrame); michael@0: michael@0: LifoAlloc *alloc = cx->new_(BUILDER_LIFO_ALLOC_PRIMARY_CHUNK_SIZE); michael@0: if (!alloc) michael@0: return AbortReason_Alloc; michael@0: michael@0: ScopedJSDeletePtr autoDelete(alloc); michael@0: michael@0: TempAllocator *temp = alloc->new_(alloc); michael@0: if (!temp) michael@0: return AbortReason_Alloc; michael@0: michael@0: IonContext ictx(cx, temp); michael@0: michael@0: types::AutoEnterAnalysis enter(cx); michael@0: michael@0: if (!cx->compartment()->ensureJitCompartmentExists(cx)) michael@0: return AbortReason_Alloc; michael@0: michael@0: if (!cx->compartment()->jitCompartment()->ensureIonStubsExist(cx)) michael@0: return AbortReason_Alloc; michael@0: michael@0: if (executionMode == ParallelExecution && michael@0: LIRGenerator::allowInlineForkJoinGetSlice() && michael@0: !cx->runtime()->jitRuntime()->ensureForkJoinGetSliceStubExists(cx)) michael@0: { michael@0: return AbortReason_Alloc; michael@0: } michael@0: michael@0: MIRGraph *graph = alloc->new_(temp); michael@0: if (!graph) michael@0: return AbortReason_Alloc; michael@0: michael@0: CompileInfo *info = alloc->new_(script, script->functionNonDelazifying(), osrPc, michael@0: constructing, executionMode, michael@0: script->needsArgsObj()); michael@0: if (!info) michael@0: return AbortReason_Alloc; michael@0: michael@0: BaselineInspector *inspector = alloc->new_(script); michael@0: if (!inspector) michael@0: return AbortReason_Alloc; michael@0: michael@0: BaselineFrameInspector *baselineFrameInspector = nullptr; michael@0: if (baselineFrame) { michael@0: baselineFrameInspector = NewBaselineFrameInspector(temp, baselineFrame, info); michael@0: if (!baselineFrameInspector) michael@0: return AbortReason_Alloc; michael@0: } michael@0: michael@0: AutoTempAllocatorRooter root(cx, temp); michael@0: types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(*temp); michael@0: if (!constraints) michael@0: return AbortReason_Alloc; michael@0: michael@0: const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(optimizationLevel); michael@0: const JitCompileOptions options(cx); michael@0: michael@0: IonBuilder *builder = alloc->new_((JSContext *) nullptr, michael@0: CompileCompartment::get(cx->compartment()), michael@0: options, temp, graph, constraints, michael@0: inspector, info, optimizationInfo, michael@0: baselineFrameInspector); michael@0: if (!builder) michael@0: return AbortReason_Alloc; michael@0: michael@0: JS_ASSERT(recompile == HasIonScript(builder->script(), executionMode)); michael@0: JS_ASSERT(CanIonCompile(builder->script(), executionMode)); michael@0: michael@0: RootedScript builderScript(cx, builder->script()); michael@0: michael@0: if (recompile) { michael@0: JS_ASSERT(executionMode == SequentialExecution); michael@0: builderScript->ionScript()->setRecompiling(); michael@0: } michael@0: michael@0: IonSpewNewFunction(graph, builderScript); michael@0: michael@0: bool succeeded = builder->build(); michael@0: builder->clearForBackEnd(); michael@0: michael@0: if (!succeeded) michael@0: return builder->abortReason(); michael@0: michael@0: // If possible, compile the script off thread. michael@0: if (OffThreadCompilationAvailable(cx)) { michael@0: if (!recompile) michael@0: SetIonScript(builderScript, executionMode, ION_COMPILING_SCRIPT); michael@0: michael@0: IonSpew(IonSpew_Logs, "Can't log script %s:%d. (Compiled on background thread.)", michael@0: builderScript->filename(), builderScript->lineno()); michael@0: michael@0: if (!StartOffThreadIonCompile(cx, builder)) { michael@0: IonSpew(IonSpew_Abort, "Unable to start off-thread ion compilation."); michael@0: return AbortReason_Alloc; michael@0: } michael@0: michael@0: // The allocator and associated data will be destroyed after being michael@0: // processed in the finishedOffThreadCompilations list. michael@0: autoDelete.forget(); michael@0: michael@0: return AbortReason_NoAbort; michael@0: } michael@0: michael@0: ScopedJSDeletePtr codegen(CompileBackEnd(builder)); michael@0: if (!codegen) { michael@0: IonSpew(IonSpew_Abort, "Failed during back-end compilation."); michael@0: return AbortReason_Disable; michael@0: } michael@0: michael@0: bool success = codegen->link(cx, builder->constraints()); michael@0: michael@0: IonSpewEndFunction(); michael@0: michael@0: return success ? AbortReason_NoAbort : AbortReason_Disable; michael@0: } michael@0: michael@0: static bool michael@0: CheckFrame(BaselineFrame *frame) michael@0: { michael@0: JS_ASSERT(!frame->isGeneratorFrame()); michael@0: JS_ASSERT(!frame->isDebuggerFrame()); michael@0: michael@0: // This check is to not overrun the stack. michael@0: if (frame->isFunctionFrame() && TooManyArguments(frame->numActualArgs())) { michael@0: IonSpew(IonSpew_Abort, "too many actual args"); michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: CheckScript(JSContext *cx, JSScript *script, bool osr) michael@0: { michael@0: if (script->isForEval()) { michael@0: // Eval frames are not yet supported. Supporting this will require new michael@0: // logic in pushBailoutFrame to deal with linking prev. michael@0: // Additionally, JSOP_DEFVAR support will require baking in isEvalFrame(). michael@0: IonSpew(IonSpew_Abort, "eval script"); michael@0: return false; michael@0: } michael@0: michael@0: if (!script->compileAndGo()) { michael@0: IonSpew(IonSpew_Abort, "not compile-and-go"); michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static MethodStatus michael@0: CheckScriptSize(JSContext *cx, JSScript* script) michael@0: { michael@0: if (!js_JitOptions.limitScriptSize) michael@0: return Method_Compiled; michael@0: michael@0: if (script->length() > MAX_OFF_THREAD_SCRIPT_SIZE) { michael@0: // Some scripts are so large we never try to Ion compile them. michael@0: IonSpew(IonSpew_Abort, "Script too large (%u bytes)", script->length()); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: uint32_t numLocalsAndArgs = analyze::TotalSlots(script); michael@0: if (cx->runtime()->isWorkerRuntime()) { michael@0: // DOM Workers don't have off thread compilation enabled. Since workers michael@0: // don't block the browser's event loop, allow them to compile larger michael@0: // scripts. michael@0: JS_ASSERT(!cx->runtime()->canUseParallelIonCompilation()); michael@0: michael@0: if (script->length() > MAX_DOM_WORKER_SCRIPT_SIZE || michael@0: numLocalsAndArgs > MAX_DOM_WORKER_LOCALS_AND_ARGS) michael@0: { michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE || michael@0: numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS) michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: size_t cpuCount = WorkerThreadState().cpuCount; michael@0: #else michael@0: size_t cpuCount = 1; michael@0: #endif michael@0: if (cx->runtime()->canUseParallelIonCompilation() && cpuCount > 1) { michael@0: // Even if off thread compilation is enabled, there are cases where michael@0: // compilation must still occur on the main thread. Don't compile michael@0: // in these cases (except when profiling scripts, as compilations michael@0: // occurring with profiling should reflect those without), but do michael@0: // not forbid compilation so that the script may be compiled later. michael@0: if (!OffThreadCompilationAvailable(cx) && !cx->runtime()->profilingScripts) { michael@0: IonSpew(IonSpew_Abort, michael@0: "Script too large for main thread, skipping (%u bytes) (%u locals/args)", michael@0: script->length(), numLocalsAndArgs); michael@0: return Method_Skipped; michael@0: } michael@0: } else { michael@0: IonSpew(IonSpew_Abort, "Script too large (%u bytes) (%u locals/args)", michael@0: script->length(), numLocalsAndArgs); michael@0: return Method_CantCompile; michael@0: } michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: bool michael@0: CanIonCompileScript(JSContext *cx, JSScript *script, bool osr) michael@0: { michael@0: if (!script->canIonCompile() || !CheckScript(cx, script, osr)) michael@0: return false; michael@0: michael@0: return CheckScriptSize(cx, script) == Method_Compiled; michael@0: } michael@0: michael@0: static OptimizationLevel michael@0: GetOptimizationLevel(HandleScript script, jsbytecode *pc, ExecutionMode executionMode) michael@0: { michael@0: if (executionMode == ParallelExecution) michael@0: return Optimization_Normal; michael@0: michael@0: JS_ASSERT(executionMode == SequentialExecution); michael@0: michael@0: return js_IonOptimizations.levelForScript(script, pc); michael@0: } michael@0: michael@0: static MethodStatus michael@0: Compile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc, michael@0: bool constructing, ExecutionMode executionMode) michael@0: { michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: JS_ASSERT(jit::IsBaselineEnabled(cx)); michael@0: JS_ASSERT_IF(osrPc != nullptr, LoopEntryCanIonOsr(osrPc)); michael@0: JS_ASSERT_IF(executionMode == ParallelExecution, !osrFrame && !osrPc); michael@0: JS_ASSERT_IF(executionMode == ParallelExecution, !HasIonScript(script, executionMode)); michael@0: michael@0: if (!script->hasBaselineScript()) michael@0: return Method_Skipped; michael@0: michael@0: if (cx->compartment()->debugMode()) { michael@0: IonSpew(IonSpew_Abort, "debugging"); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: if (!CheckScript(cx, script, bool(osrPc))) { michael@0: IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename(), script->lineno()); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: MethodStatus status = CheckScriptSize(cx, script); michael@0: if (status != Method_Compiled) { michael@0: IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename(), script->lineno()); michael@0: return status; michael@0: } michael@0: michael@0: bool recompile = false; michael@0: OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc, executionMode); michael@0: if (optimizationLevel == Optimization_DontCompile) michael@0: return Method_Skipped; michael@0: michael@0: IonScript *scriptIon = GetIonScript(script, executionMode); michael@0: if (scriptIon) { michael@0: if (!scriptIon->method()) michael@0: return Method_CantCompile; michael@0: michael@0: MethodStatus failedState = Method_Compiled; michael@0: michael@0: // If we keep failing to enter the script due to an OSR pc mismatch, michael@0: // recompile with the right pc. michael@0: if (osrPc && script->ionScript()->osrPc() != osrPc) { michael@0: uint32_t count = script->ionScript()->incrOsrPcMismatchCounter(); michael@0: if (count <= js_JitOptions.osrPcMismatchesBeforeRecompile) michael@0: return Method_Skipped; michael@0: michael@0: failedState = Method_Skipped; michael@0: } michael@0: michael@0: // Don't recompile/overwrite higher optimized code, michael@0: // with a lower optimization level. michael@0: if (optimizationLevel < scriptIon->optimizationLevel()) michael@0: return failedState; michael@0: michael@0: if (optimizationLevel == scriptIon->optimizationLevel() && michael@0: (!osrPc || script->ionScript()->osrPc() == osrPc)) michael@0: { michael@0: return failedState; michael@0: } michael@0: michael@0: // Don't start compiling if already compiling michael@0: if (scriptIon->isRecompiling()) michael@0: return failedState; michael@0: michael@0: if (osrPc) michael@0: script->ionScript()->resetOsrPcMismatchCounter(); michael@0: michael@0: recompile = true; michael@0: } michael@0: michael@0: AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, constructing, executionMode, michael@0: recompile, optimizationLevel); michael@0: if (reason == AbortReason_Error) michael@0: return Method_Error; michael@0: michael@0: if (reason == AbortReason_Disable) michael@0: return Method_CantCompile; michael@0: michael@0: if (reason == AbortReason_Alloc) { michael@0: js_ReportOutOfMemory(cx); michael@0: return Method_Error; michael@0: } michael@0: michael@0: // Compilation succeeded or we invalidated right away or an inlining/alloc abort michael@0: if (HasIonScript(script, executionMode)) { michael@0: if (osrPc && script->ionScript()->osrPc() != osrPc) michael@0: return Method_Skipped; michael@0: return Method_Compiled; michael@0: } michael@0: return Method_Skipped; michael@0: } michael@0: michael@0: } // namespace jit michael@0: } // namespace js michael@0: michael@0: // Decide if a transition from interpreter execution to Ion code should occur. michael@0: // May compile or recompile the target JSScript. michael@0: MethodStatus michael@0: jit::CanEnterAtBranch(JSContext *cx, JSScript *script, BaselineFrame *osrFrame, michael@0: jsbytecode *pc, bool isConstructing) michael@0: { michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: JS_ASSERT((JSOp)*pc == JSOP_LOOPENTRY); michael@0: JS_ASSERT(LoopEntryCanIonOsr(pc)); michael@0: michael@0: // Skip if the script has been disabled. michael@0: if (!script->canIonCompile()) michael@0: return Method_Skipped; michael@0: michael@0: // Skip if the script is being compiled off thread. michael@0: if (script->isIonCompilingOffThread()) michael@0: return Method_Skipped; michael@0: michael@0: // Skip if the code is expected to result in a bailout. michael@0: if (script->hasIonScript() && script->ionScript()->bailoutExpected()) michael@0: return Method_Skipped; michael@0: michael@0: // Optionally ignore on user request. michael@0: if (!js_JitOptions.osr) michael@0: return Method_Skipped; michael@0: michael@0: // Mark as forbidden if frame can't be handled. michael@0: if (!CheckFrame(osrFrame)) { michael@0: ForbidCompilation(cx, script); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: // Attempt compilation. michael@0: // - Returns Method_Compiled if the right ionscript is present michael@0: // (Meaning it was present or a sequantial compile finished) michael@0: // - Returns Method_Skipped if pc doesn't match michael@0: // (This means a background thread compilation with that pc could have started or not.) michael@0: RootedScript rscript(cx, script); michael@0: MethodStatus status = Compile(cx, rscript, osrFrame, pc, isConstructing, SequentialExecution); michael@0: if (status != Method_Compiled) { michael@0: if (status == Method_CantCompile) michael@0: ForbidCompilation(cx, script); michael@0: return status; michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: MethodStatus michael@0: jit::CanEnter(JSContext *cx, RunState &state) michael@0: { michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: michael@0: JSScript *script = state.script(); michael@0: michael@0: // Skip if the script has been disabled. michael@0: if (!script->canIonCompile()) michael@0: return Method_Skipped; michael@0: michael@0: // Skip if the script is being compiled off thread. michael@0: if (script->isIonCompilingOffThread()) michael@0: return Method_Skipped; michael@0: michael@0: // Skip if the code is expected to result in a bailout. michael@0: if (script->hasIonScript() && script->ionScript()->bailoutExpected()) michael@0: return Method_Skipped; michael@0: michael@0: // If constructing, allocate a new |this| object before building Ion. michael@0: // Creating |this| is done before building Ion because it may change the michael@0: // type information and invalidate compilation results. michael@0: if (state.isInvoke()) { michael@0: InvokeState &invoke = *state.asInvoke(); michael@0: michael@0: if (TooManyArguments(invoke.args().length())) { michael@0: IonSpew(IonSpew_Abort, "too many actual args"); michael@0: ForbidCompilation(cx, script); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: if (TooManyArguments(invoke.args().callee().as().nargs())) { michael@0: IonSpew(IonSpew_Abort, "too many args"); michael@0: ForbidCompilation(cx, script); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: if (invoke.constructing() && invoke.args().thisv().isPrimitive()) { michael@0: RootedScript scriptRoot(cx, script); michael@0: RootedObject callee(cx, &invoke.args().callee()); michael@0: RootedObject obj(cx, CreateThisForFunction(cx, callee, michael@0: invoke.useNewType() michael@0: ? SingletonObject michael@0: : GenericObject)); michael@0: if (!obj || !jit::IsIonEnabled(cx)) // Note: OOM under CreateThis can disable TI. michael@0: return Method_Skipped; michael@0: invoke.args().setThis(ObjectValue(*obj)); michael@0: script = scriptRoot; michael@0: } michael@0: } else if (state.isGenerator()) { michael@0: IonSpew(IonSpew_Abort, "generator frame"); michael@0: ForbidCompilation(cx, script); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: // If --ion-eager is used, compile with Baseline first, so that we michael@0: // can directly enter IonMonkey. michael@0: RootedScript rscript(cx, script); michael@0: if (js_JitOptions.eagerCompilation && !rscript->hasBaselineScript()) { michael@0: MethodStatus status = CanEnterBaselineMethod(cx, state); michael@0: if (status != Method_Compiled) michael@0: return status; michael@0: } michael@0: michael@0: // Attempt compilation. Returns Method_Compiled if already compiled. michael@0: bool constructing = state.isInvoke() && state.asInvoke()->constructing(); michael@0: MethodStatus status = michael@0: Compile(cx, rscript, nullptr, nullptr, constructing, SequentialExecution); michael@0: if (status != Method_Compiled) { michael@0: if (status == Method_CantCompile) michael@0: ForbidCompilation(cx, rscript); michael@0: return status; michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: MethodStatus michael@0: jit::CompileFunctionForBaseline(JSContext *cx, HandleScript script, BaselineFrame *frame, michael@0: bool isConstructing) michael@0: { michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: JS_ASSERT(frame->fun()->nonLazyScript()->canIonCompile()); michael@0: JS_ASSERT(!frame->fun()->nonLazyScript()->isIonCompilingOffThread()); michael@0: JS_ASSERT(!frame->fun()->nonLazyScript()->hasIonScript()); michael@0: JS_ASSERT(frame->isFunctionFrame()); michael@0: michael@0: // Mark as forbidden if frame can't be handled. michael@0: if (!CheckFrame(frame)) { michael@0: ForbidCompilation(cx, script); michael@0: return Method_CantCompile; michael@0: } michael@0: michael@0: // Attempt compilation. Returns Method_Compiled if already compiled. michael@0: MethodStatus status = michael@0: Compile(cx, script, frame, nullptr, isConstructing, SequentialExecution); michael@0: if (status != Method_Compiled) { michael@0: if (status == Method_CantCompile) michael@0: ForbidCompilation(cx, script); michael@0: return status; michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: MethodStatus michael@0: jit::Recompile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc, michael@0: bool constructing) michael@0: { michael@0: JS_ASSERT(script->hasIonScript()); michael@0: if (script->ionScript()->isRecompiling()) michael@0: return Method_Compiled; michael@0: michael@0: MethodStatus status = michael@0: Compile(cx, script, osrFrame, osrPc, constructing, SequentialExecution); michael@0: if (status != Method_Compiled) { michael@0: if (status == Method_CantCompile) michael@0: ForbidCompilation(cx, script); michael@0: return status; michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: MethodStatus michael@0: jit::CanEnterInParallel(JSContext *cx, HandleScript script) michael@0: { michael@0: // Skip if the script has been disabled. michael@0: // michael@0: // Note: We return Method_Skipped in this case because the other michael@0: // CanEnter() methods do so. However, ForkJoin.cpp detects this michael@0: // condition differently treats it more like an error. michael@0: if (!script->canParallelIonCompile()) michael@0: return Method_Skipped; michael@0: michael@0: // Skip if the script is being compiled off thread. michael@0: if (script->isParallelIonCompilingOffThread()) michael@0: return Method_Skipped; michael@0: michael@0: MethodStatus status = Compile(cx, script, nullptr, nullptr, false, ParallelExecution); michael@0: if (status != Method_Compiled) { michael@0: if (status == Method_CantCompile) michael@0: ForbidCompilation(cx, script, ParallelExecution); michael@0: return status; michael@0: } michael@0: michael@0: // This can GC, so afterward, script->parallelIon is michael@0: // not guaranteed to be valid. michael@0: if (!cx->runtime()->jitRuntime()->enterIon()) michael@0: return Method_Error; michael@0: michael@0: // Subtle: it is possible for GC to occur during michael@0: // compilation of one of the invoked functions, which michael@0: // would cause the earlier functions (such as the michael@0: // kernel itself) to be collected. In this event, we michael@0: // give up and fallback to sequential for now. michael@0: if (!script->hasParallelIonScript()) { michael@0: parallel::Spew( michael@0: parallel::SpewCompile, michael@0: "Script %p:%s:%u was garbage-collected or invalidated", michael@0: script.get(), script->filename(), script->lineno()); michael@0: return Method_Skipped; michael@0: } michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: MethodStatus michael@0: jit::CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs) michael@0: { michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: michael@0: // Skip if the code is expected to result in a bailout. michael@0: if (!script->hasIonScript() || script->ionScript()->bailoutExpected()) michael@0: return Method_Skipped; michael@0: michael@0: // Don't handle arguments underflow, to make this work we would have to pad michael@0: // missing arguments with |undefined|. michael@0: if (numActualArgs < script->functionNonDelazifying()->nargs()) michael@0: return Method_Skipped; michael@0: michael@0: if (!cx->compartment()->ensureJitCompartmentExists(cx)) michael@0: return Method_Error; michael@0: michael@0: // This can GC, so afterward, script->ion is not guaranteed to be valid. michael@0: if (!cx->runtime()->jitRuntime()->enterIon()) michael@0: return Method_Error; michael@0: michael@0: if (!script->hasIonScript()) michael@0: return Method_Skipped; michael@0: michael@0: return Method_Compiled; michael@0: } michael@0: michael@0: static IonExecStatus michael@0: EnterIon(JSContext *cx, EnterJitData &data) michael@0: { michael@0: JS_CHECK_RECURSION(cx, return IonExec_Aborted); michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: JS_ASSERT(!data.osrFrame); michael@0: michael@0: EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon(); michael@0: michael@0: // Caller must construct |this| before invoking the Ion function. michael@0: JS_ASSERT_IF(data.constructing, data.maxArgv[0].isObject()); michael@0: michael@0: data.result.setInt32(data.numActualArgs); michael@0: { michael@0: AssertCompartmentUnchanged pcc(cx); michael@0: JitActivation activation(cx, data.constructing); michael@0: michael@0: CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, /* osrFrame = */nullptr, data.calleeToken, michael@0: /* scopeChain = */ nullptr, 0, data.result.address()); michael@0: } michael@0: michael@0: JS_ASSERT(!cx->runtime()->hasIonReturnOverride()); michael@0: michael@0: // Jit callers wrap primitive constructor return. michael@0: if (!data.result.isMagic() && data.constructing && data.result.isPrimitive()) michael@0: data.result = data.maxArgv[0]; michael@0: michael@0: // Release temporary buffer used for OSR into Ion. michael@0: cx->runtime()->getJitRuntime(cx)->freeOsrTempData(); michael@0: michael@0: JS_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR)); michael@0: return data.result.isMagic() ? IonExec_Error : IonExec_Ok; michael@0: } michael@0: michael@0: bool michael@0: jit::SetEnterJitData(JSContext *cx, EnterJitData &data, RunState &state, AutoValueVector &vals) michael@0: { michael@0: data.osrFrame = nullptr; michael@0: michael@0: if (state.isInvoke()) { michael@0: CallArgs &args = state.asInvoke()->args(); michael@0: unsigned numFormals = state.script()->functionNonDelazifying()->nargs(); michael@0: data.constructing = state.asInvoke()->constructing(); michael@0: data.numActualArgs = args.length(); michael@0: data.maxArgc = Max(args.length(), numFormals) + 1; michael@0: data.scopeChain = nullptr; michael@0: data.calleeToken = CalleeToToken(&args.callee().as()); michael@0: michael@0: if (data.numActualArgs >= numFormals) { michael@0: data.maxArgv = args.base() + 1; michael@0: } else { michael@0: // Pad missing arguments with |undefined|. michael@0: for (size_t i = 1; i < args.length() + 2; i++) { michael@0: if (!vals.append(args.base()[i])) michael@0: return false; michael@0: } michael@0: michael@0: while (vals.length() < numFormals + 1) { michael@0: if (!vals.append(UndefinedValue())) michael@0: return false; michael@0: } michael@0: michael@0: JS_ASSERT(vals.length() >= numFormals + 1); michael@0: data.maxArgv = vals.begin(); michael@0: } michael@0: } else { michael@0: data.constructing = false; michael@0: data.numActualArgs = 0; michael@0: data.maxArgc = 1; michael@0: data.maxArgv = state.asExecute()->addressOfThisv(); michael@0: data.scopeChain = state.asExecute()->scopeChain(); michael@0: michael@0: data.calleeToken = CalleeToToken(state.script()); michael@0: michael@0: if (state.script()->isForEval() && michael@0: !(state.asExecute()->type() & InterpreterFrame::GLOBAL)) michael@0: { michael@0: ScriptFrameIter iter(cx); michael@0: if (iter.isFunctionFrame()) michael@0: data.calleeToken = CalleeToToken(iter.callee()); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: IonExecStatus michael@0: jit::IonCannon(JSContext *cx, RunState &state) michael@0: { michael@0: IonScript *ion = state.script()->ionScript(); michael@0: michael@0: EnterJitData data(cx); michael@0: data.jitcode = ion->method()->raw(); michael@0: michael@0: AutoValueVector vals(cx); michael@0: if (!SetEnterJitData(cx, data, state, vals)) michael@0: return IonExec_Error; michael@0: michael@0: IonExecStatus status = EnterIon(cx, data); michael@0: michael@0: if (status == IonExec_Ok) michael@0: state.setReturnValue(data.result); michael@0: michael@0: return status; michael@0: } michael@0: michael@0: IonExecStatus michael@0: jit::FastInvoke(JSContext *cx, HandleFunction fun, CallArgs &args) michael@0: { michael@0: JS_CHECK_RECURSION(cx, return IonExec_Error); michael@0: michael@0: IonScript *ion = fun->nonLazyScript()->ionScript(); michael@0: JitCode *code = ion->method(); michael@0: void *jitcode = code->raw(); michael@0: michael@0: JS_ASSERT(jit::IsIonEnabled(cx)); michael@0: JS_ASSERT(!ion->bailoutExpected()); michael@0: michael@0: JitActivation activation(cx, /* firstFrameIsConstructing = */false); michael@0: michael@0: EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon(); michael@0: void *calleeToken = CalleeToToken(fun); michael@0: michael@0: RootedValue result(cx, Int32Value(args.length())); michael@0: JS_ASSERT(args.length() >= fun->nargs()); michael@0: michael@0: CALL_GENERATED_CODE(enter, jitcode, args.length() + 1, args.array() - 1, /* osrFrame = */nullptr, michael@0: calleeToken, /* scopeChain = */ nullptr, 0, result.address()); michael@0: michael@0: JS_ASSERT(!cx->runtime()->hasIonReturnOverride()); michael@0: michael@0: args.rval().set(result); michael@0: michael@0: JS_ASSERT_IF(result.isMagic(), result.isMagic(JS_ION_ERROR)); michael@0: return result.isMagic() ? IonExec_Error : IonExec_Ok; michael@0: } michael@0: michael@0: static void michael@0: InvalidateActivation(FreeOp *fop, uint8_t *ionTop, bool invalidateAll) michael@0: { michael@0: IonSpew(IonSpew_Invalidate, "BEGIN invalidating activation"); michael@0: michael@0: size_t frameno = 1; michael@0: michael@0: for (JitFrameIterator it(ionTop, SequentialExecution); !it.done(); ++it, ++frameno) { michael@0: JS_ASSERT_IF(frameno == 1, it.type() == JitFrame_Exit); michael@0: michael@0: #ifdef DEBUG michael@0: switch (it.type()) { michael@0: case JitFrame_Exit: michael@0: IonSpew(IonSpew_Invalidate, "#%d exit frame @ %p", frameno, it.fp()); michael@0: break; michael@0: case JitFrame_BaselineJS: michael@0: case JitFrame_IonJS: michael@0: { michael@0: JS_ASSERT(it.isScripted()); michael@0: const char *type = it.isIonJS() ? "Optimized" : "Baseline"; michael@0: IonSpew(IonSpew_Invalidate, "#%d %s JS frame @ %p, %s:%d (fun: %p, script: %p, pc %p)", michael@0: frameno, type, it.fp(), it.script()->filename(), it.script()->lineno(), michael@0: it.maybeCallee(), (JSScript *)it.script(), it.returnAddressToFp()); michael@0: break; michael@0: } michael@0: case JitFrame_BaselineStub: michael@0: IonSpew(IonSpew_Invalidate, "#%d baseline stub frame @ %p", frameno, it.fp()); michael@0: break; michael@0: case JitFrame_Rectifier: michael@0: IonSpew(IonSpew_Invalidate, "#%d rectifier frame @ %p", frameno, it.fp()); michael@0: break; michael@0: case JitFrame_Unwound_IonJS: michael@0: case JitFrame_Unwound_BaselineStub: michael@0: MOZ_ASSUME_UNREACHABLE("invalid"); michael@0: case JitFrame_Unwound_Rectifier: michael@0: IonSpew(IonSpew_Invalidate, "#%d unwound rectifier frame @ %p", frameno, it.fp()); michael@0: break; michael@0: case JitFrame_Entry: michael@0: IonSpew(IonSpew_Invalidate, "#%d entry frame @ %p", frameno, it.fp()); michael@0: break; michael@0: } michael@0: #endif michael@0: michael@0: if (!it.isIonJS()) michael@0: continue; michael@0: michael@0: // See if the frame has already been invalidated. michael@0: if (it.checkInvalidation()) michael@0: continue; michael@0: michael@0: JSScript *script = it.script(); michael@0: if (!script->hasIonScript()) michael@0: continue; michael@0: michael@0: if (!invalidateAll && !script->ionScript()->invalidated()) michael@0: continue; michael@0: michael@0: IonScript *ionScript = script->ionScript(); michael@0: michael@0: // Purge ICs before we mark this script as invalidated. This will michael@0: // prevent lastJump_ from appearing to be a bogus pointer, just michael@0: // in case anyone tries to read it. michael@0: ionScript->purgeCaches(); michael@0: michael@0: // Clean up any pointers from elsewhere in the runtime to this IonScript michael@0: // which is about to become disconnected from its JSScript. michael@0: ionScript->unlinkFromRuntime(fop); michael@0: michael@0: // This frame needs to be invalidated. We do the following: michael@0: // michael@0: // 1. Increment the reference counter to keep the ionScript alive michael@0: // for the invalidation bailout or for the exception handler. michael@0: // 2. Determine safepoint that corresponds to the current call. michael@0: // 3. From safepoint, get distance to the OSI-patchable offset. michael@0: // 4. From the IonScript, determine the distance between the michael@0: // call-patchable offset and the invalidation epilogue. michael@0: // 5. Patch the OSI point with a call-relative to the michael@0: // invalidation epilogue. michael@0: // michael@0: // The code generator ensures that there's enough space for us michael@0: // to patch in a call-relative operation at each invalidation michael@0: // point. michael@0: // michael@0: // Note: you can't simplify this mechanism to "just patch the michael@0: // instruction immediately after the call" because things may michael@0: // need to move into a well-defined register state (using move michael@0: // instructions after the call) in to capture an appropriate michael@0: // snapshot after the call occurs. michael@0: michael@0: ionScript->incref(); michael@0: michael@0: const SafepointIndex *si = ionScript->getSafepointIndex(it.returnAddressToFp()); michael@0: JitCode *ionCode = ionScript->method(); michael@0: michael@0: JS::Zone *zone = script->zone(); michael@0: if (zone->needsBarrier()) { michael@0: // We're about to remove edges from the JSScript to gcthings michael@0: // embedded in the JitCode. Perform one final trace of the michael@0: // JitCode for the incremental GC, as it must know about michael@0: // those edges. michael@0: ionCode->trace(zone->barrierTracer()); michael@0: } michael@0: ionCode->setInvalidated(); michael@0: michael@0: // Write the delta (from the return address offset to the michael@0: // IonScript pointer embedded into the invalidation epilogue) michael@0: // where the safepointed call instruction used to be. We rely on michael@0: // the call sequence causing the safepoint being >= the size of michael@0: // a uint32, which is checked during safepoint index michael@0: // construction. michael@0: CodeLocationLabel dataLabelToMunge(it.returnAddressToFp()); michael@0: ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() - michael@0: (it.returnAddressToFp() - ionCode->raw()); michael@0: Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta)); michael@0: michael@0: CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si); michael@0: CodeLocationLabel invalidateEpilogue(ionCode, ionScript->invalidateEpilogueOffset()); michael@0: michael@0: IonSpew(IonSpew_Invalidate, " ! Invalidate ionScript %p (ref %u) -> patching osipoint %p", michael@0: ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw()); michael@0: Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue); michael@0: } michael@0: michael@0: IonSpew(IonSpew_Invalidate, "END invalidating activation"); michael@0: } michael@0: michael@0: void michael@0: jit::StopAllOffThreadCompilations(JSCompartment *comp) michael@0: { michael@0: if (!comp->jitCompartment()) michael@0: return; michael@0: CancelOffThreadIonCompile(comp, nullptr); michael@0: FinishAllOffThreadCompilations(comp); michael@0: } michael@0: michael@0: void michael@0: jit::InvalidateAll(FreeOp *fop, Zone *zone) michael@0: { michael@0: for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) michael@0: StopAllOffThreadCompilations(comp); michael@0: michael@0: for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) { michael@0: if (iter->compartment()->zone() == zone) { michael@0: IonSpew(IonSpew_Invalidate, "Invalidating all frames for GC"); michael@0: InvalidateActivation(fop, iter.jitTop(), true); michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: void michael@0: jit::Invalidate(types::TypeZone &types, FreeOp *fop, michael@0: const Vector &invalid, bool resetUses, michael@0: bool cancelOffThread) michael@0: { michael@0: IonSpew(IonSpew_Invalidate, "Start invalidation."); michael@0: michael@0: // Add an invalidation reference to all invalidated IonScripts to indicate michael@0: // to the traversal which frames have been invalidated. michael@0: size_t numInvalidations = 0; michael@0: for (size_t i = 0; i < invalid.length(); i++) { michael@0: const types::CompilerOutput &co = *invalid[i].compilerOutput(types); michael@0: if (!co.isValid()) michael@0: continue; michael@0: michael@0: if (cancelOffThread) michael@0: CancelOffThreadIonCompile(co.script()->compartment(), co.script()); michael@0: michael@0: if (!co.ion()) michael@0: continue; michael@0: michael@0: IonSpew(IonSpew_Invalidate, " Invalidate %s:%u, IonScript %p", michael@0: co.script()->filename(), co.script()->lineno(), co.ion()); michael@0: michael@0: // Keep the ion script alive during the invalidation and flag this michael@0: // ionScript as being invalidated. This increment is removed by the michael@0: // loop after the calls to InvalidateActivation. michael@0: co.ion()->incref(); michael@0: numInvalidations++; michael@0: } michael@0: michael@0: if (!numInvalidations) { michael@0: IonSpew(IonSpew_Invalidate, " No IonScript invalidation."); michael@0: return; michael@0: } michael@0: michael@0: for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) michael@0: InvalidateActivation(fop, iter.jitTop(), false); michael@0: michael@0: // Drop the references added above. If a script was never active, its michael@0: // IonScript will be immediately destroyed. Otherwise, it will be held live michael@0: // until its last invalidated frame is destroyed. michael@0: for (size_t i = 0; i < invalid.length(); i++) { michael@0: types::CompilerOutput &co = *invalid[i].compilerOutput(types); michael@0: if (!co.isValid()) michael@0: continue; michael@0: michael@0: ExecutionMode executionMode = co.mode(); michael@0: JSScript *script = co.script(); michael@0: IonScript *ionScript = co.ion(); michael@0: if (!ionScript) michael@0: continue; michael@0: michael@0: SetIonScript(script, executionMode, nullptr); michael@0: ionScript->decref(fop); michael@0: co.invalidate(); michael@0: numInvalidations--; michael@0: michael@0: // Wait for the scripts to get warm again before doing another michael@0: // compile, unless either: michael@0: // (1) we are recompiling *because* a script got hot; michael@0: // (resetUses is false); or, michael@0: // (2) we are invalidating a parallel script. This is because michael@0: // the useCount only applies to sequential uses. Parallel michael@0: // execution *requires* ion, and so we don't limit it to michael@0: // methods with a high usage count (though we do check that michael@0: // the useCount is at least 1 when compiling the transitive michael@0: // closure of potential callees, to avoid compiling things michael@0: // that are never run at all). michael@0: if (resetUses && executionMode != ParallelExecution) michael@0: script->resetUseCount(); michael@0: } michael@0: michael@0: // Make sure we didn't leak references by invalidating the same IonScript michael@0: // multiple times in the above loop. michael@0: JS_ASSERT(!numInvalidations); michael@0: } michael@0: michael@0: void michael@0: jit::Invalidate(JSContext *cx, const Vector &invalid, bool resetUses, michael@0: bool cancelOffThread) michael@0: { michael@0: jit::Invalidate(cx->zone()->types, cx->runtime()->defaultFreeOp(), invalid, resetUses, michael@0: cancelOffThread); michael@0: } michael@0: michael@0: bool michael@0: jit::Invalidate(JSContext *cx, JSScript *script, ExecutionMode mode, bool resetUses, michael@0: bool cancelOffThread) michael@0: { michael@0: JS_ASSERT(script->hasIonScript()); michael@0: michael@0: if (cx->runtime()->spsProfiler.enabled()) { michael@0: // Register invalidation with profiler. michael@0: // Format of event payload string: michael@0: // ":" michael@0: michael@0: // Get the script filename, if any, and its length. michael@0: const char *filename = script->filename(); michael@0: if (filename == nullptr) michael@0: filename = ""; michael@0: michael@0: size_t len = strlen(filename) + 20; michael@0: char *buf = js_pod_malloc(len); michael@0: if (!buf) michael@0: return false; michael@0: michael@0: // Construct the descriptive string. michael@0: JS_snprintf(buf, len, "Invalidate %s:%llu", filename, script->lineno()); michael@0: cx->runtime()->spsProfiler.markEvent(buf); michael@0: js_free(buf); michael@0: } michael@0: michael@0: Vector scripts(cx); michael@0: michael@0: switch (mode) { michael@0: case SequentialExecution: michael@0: JS_ASSERT(script->hasIonScript()); michael@0: if (!scripts.append(script->ionScript()->recompileInfo())) michael@0: return false; michael@0: break; michael@0: case ParallelExecution: michael@0: JS_ASSERT(script->hasParallelIonScript()); michael@0: if (!scripts.append(script->parallelIonScript()->recompileInfo())) michael@0: return false; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: michael@0: Invalidate(cx, scripts, resetUses, cancelOffThread); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: jit::Invalidate(JSContext *cx, JSScript *script, bool resetUses, bool cancelOffThread) michael@0: { michael@0: return Invalidate(cx, script, SequentialExecution, resetUses, cancelOffThread); michael@0: } michael@0: michael@0: static void michael@0: FinishInvalidationOf(FreeOp *fop, JSScript *script, IonScript *ionScript) michael@0: { michael@0: types::TypeZone &types = script->zone()->types; michael@0: michael@0: // Note: If the script is about to be swept, the compiler output may have michael@0: // already been destroyed. michael@0: if (types::CompilerOutput *output = ionScript->recompileInfo().compilerOutput(types)) michael@0: output->invalidate(); michael@0: michael@0: // If this script has Ion code on the stack, invalidated() will return michael@0: // true. In this case we have to wait until destroying it. michael@0: if (!ionScript->invalidated()) michael@0: jit::IonScript::Destroy(fop, ionScript); michael@0: } michael@0: michael@0: template michael@0: void michael@0: jit::FinishInvalidation(FreeOp *fop, JSScript *script) michael@0: { michael@0: // In all cases, nullptr out script->ion or script->parallelIon to avoid michael@0: // re-entry. michael@0: switch (mode) { michael@0: case SequentialExecution: michael@0: if (script->hasIonScript()) { michael@0: IonScript *ion = script->ionScript(); michael@0: script->setIonScript(nullptr); michael@0: FinishInvalidationOf(fop, script, ion); michael@0: } michael@0: return; michael@0: michael@0: case ParallelExecution: michael@0: if (script->hasParallelIonScript()) { michael@0: IonScript *parallelIon = script->parallelIonScript(); michael@0: script->setParallelIonScript(nullptr); michael@0: FinishInvalidationOf(fop, script, parallelIon); michael@0: } michael@0: return; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("bad execution mode"); michael@0: } michael@0: } michael@0: michael@0: template void michael@0: jit::FinishInvalidation(FreeOp *fop, JSScript *script); michael@0: michael@0: template void michael@0: jit::FinishInvalidation(FreeOp *fop, JSScript *script); michael@0: michael@0: void michael@0: jit::MarkValueFromIon(JSRuntime *rt, Value *vp) michael@0: { michael@0: gc::MarkValueUnbarriered(&rt->gcMarker, vp, "write barrier"); michael@0: } michael@0: michael@0: void michael@0: jit::MarkShapeFromIon(JSRuntime *rt, Shape **shapep) michael@0: { michael@0: gc::MarkShapeUnbarriered(&rt->gcMarker, shapep, "write barrier"); michael@0: } michael@0: michael@0: void michael@0: jit::ForbidCompilation(JSContext *cx, JSScript *script) michael@0: { michael@0: ForbidCompilation(cx, script, SequentialExecution); michael@0: } michael@0: michael@0: void michael@0: jit::ForbidCompilation(JSContext *cx, JSScript *script, ExecutionMode mode) michael@0: { michael@0: IonSpew(IonSpew_Abort, "Disabling Ion mode %d compilation of script %s:%d", michael@0: mode, script->filename(), script->lineno()); michael@0: michael@0: CancelOffThreadIonCompile(cx->compartment(), script); michael@0: michael@0: switch (mode) { michael@0: case SequentialExecution: michael@0: if (script->hasIonScript()) { michael@0: // It is only safe to modify script->ion if the script is not currently michael@0: // running, because JitFrameIterator needs to tell what ionScript to michael@0: // use (either the one on the JSScript, or the one hidden in the michael@0: // breadcrumbs Invalidation() leaves). Therefore, if invalidation michael@0: // fails, we cannot disable the script. michael@0: if (!Invalidate(cx, script, mode, false)) michael@0: return; michael@0: } michael@0: michael@0: script->setIonScript(ION_DISABLED_SCRIPT); michael@0: return; michael@0: michael@0: case ParallelExecution: michael@0: if (script->hasParallelIonScript()) { michael@0: if (!Invalidate(cx, script, mode, false)) michael@0: return; michael@0: } michael@0: michael@0: script->setParallelIonScript(ION_DISABLED_SCRIPT); michael@0: return; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: michael@0: AutoFlushICache * michael@0: PerThreadData::autoFlushICache() const michael@0: { michael@0: return autoFlushICache_; michael@0: } michael@0: michael@0: void michael@0: PerThreadData::setAutoFlushICache(AutoFlushICache *afc) michael@0: { michael@0: autoFlushICache_ = afc; michael@0: } michael@0: michael@0: // Set the range for the merging of flushes. The flushing is deferred until the end of michael@0: // the AutoFlushICache context. Subsequent flushing within this range will is also michael@0: // deferred. This is only expected to be defined once for each AutoFlushICache michael@0: // context. It assumes the range will be flushed is required to be within an michael@0: // AutoFlushICache context. michael@0: void michael@0: AutoFlushICache::setRange(uintptr_t start, size_t len) michael@0: { michael@0: #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) michael@0: AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache(); michael@0: JS_ASSERT(afc); michael@0: JS_ASSERT(!afc->start_); michael@0: IonSpewCont(IonSpew_CacheFlush, "(%x %x):", start, len); michael@0: michael@0: uintptr_t stop = start + len; michael@0: afc->start_ = start; michael@0: afc->stop_ = stop; michael@0: #endif michael@0: } michael@0: michael@0: // Flush the instruction cache. michael@0: // michael@0: // If called within a dynamic AutoFlushICache context and if the range is already pending michael@0: // flushing for this AutoFlushICache context then the request is ignored with the michael@0: // understanding that it will be flushed on exit from the AutoFlushICache context. michael@0: // Otherwise the range is flushed immediately. michael@0: // michael@0: // Updates outside the current code object are typically the exception so they are flushed michael@0: // immediately rather than attempting to merge them. michael@0: // michael@0: // For efficiency it is expected that all large ranges will be flushed within an michael@0: // AutoFlushICache, so check. If this assertion is hit then it does not necessarily michael@0: // indicate a progam fault but it might indicate a lost opportunity to merge cache michael@0: // flushing. It can be corrected by wrapping the call in an AutoFlushICache to context. michael@0: void michael@0: AutoFlushICache::flush(uintptr_t start, size_t len) michael@0: { michael@0: #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) michael@0: AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache(); michael@0: if (!afc) { michael@0: IonSpewCont(IonSpew_CacheFlush, "#"); michael@0: JSC::ExecutableAllocator::cacheFlush((void*)start, len); michael@0: JS_ASSERT(len <= 16); michael@0: return; michael@0: } michael@0: michael@0: uintptr_t stop = start + len; michael@0: if (start >= afc->start_ && stop <= afc->stop_) { michael@0: // Update is within the pending flush range, so defer to the end of the context. michael@0: IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "-" : "="); michael@0: return; michael@0: } michael@0: michael@0: IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "x" : "*"); michael@0: JSC::ExecutableAllocator::cacheFlush((void *)start, len); michael@0: #endif michael@0: } michael@0: michael@0: // Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in error paths michael@0: // where the changes are being abandoned. michael@0: void michael@0: AutoFlushICache::setInhibit() michael@0: { michael@0: #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) michael@0: AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache(); michael@0: JS_ASSERT(afc); michael@0: JS_ASSERT(afc->start_); michael@0: IonSpewCont(IonSpew_CacheFlush, "I"); michael@0: afc->inhibit_ = true; michael@0: #endif michael@0: } michael@0: michael@0: // The common use case is merging cache flushes when preparing a code object. In this michael@0: // case the entire range of the code object is being flushed and as the code is patched michael@0: // smaller redundant flushes could occur. The design allows an AutoFlushICache dynamic michael@0: // thread local context to be declared in which the range of the code object can be set michael@0: // which defers flushing until the end of this dynamic context. The redundant flushing michael@0: // within this code range is also deferred avoiding redundant flushing. Flushing outside michael@0: // this code range is not affected and proceeds immediately. michael@0: // michael@0: // In some cases flushing is not necessary, such as when compiling an asm.js module which michael@0: // is flushed again when dynamically linked, and also in error paths that abandon the michael@0: // code. Flushing within the set code range can be inhibited within the AutoFlushICache michael@0: // dynamic context by setting an inhibit flag. michael@0: // michael@0: // The JS compiler can be re-entered while within an AutoFlushICache dynamic context and michael@0: // it is assumed that code being assembled or patched is not executed before the exit of michael@0: // the respective AutoFlushICache dynamic context. michael@0: // michael@0: AutoFlushICache::AutoFlushICache(const char *nonce, bool inhibit) michael@0: : start_(0), michael@0: stop_(0), michael@0: name_(nonce), michael@0: inhibit_(inhibit) michael@0: { michael@0: #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) michael@0: PerThreadData *pt = TlsPerThreadData.get(); michael@0: AutoFlushICache *afc = pt->PerThreadData::autoFlushICache(); michael@0: if (afc) michael@0: IonSpew(IonSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, inhibit ? " I" : ""); michael@0: else michael@0: IonSpewCont(IonSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : ""); michael@0: michael@0: prev_ = afc; michael@0: pt->PerThreadData::setAutoFlushICache(this); michael@0: #endif michael@0: } michael@0: michael@0: AutoFlushICache::~AutoFlushICache() michael@0: { michael@0: #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS) michael@0: PerThreadData *pt = TlsPerThreadData.get(); michael@0: JS_ASSERT(pt->PerThreadData::autoFlushICache() == this); michael@0: michael@0: if (!inhibit_ && start_) michael@0: JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_)); michael@0: michael@0: IonSpewCont(IonSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U"); michael@0: IonSpewFin(IonSpew_CacheFlush); michael@0: pt->PerThreadData::setAutoFlushICache(prev_); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: jit::PurgeCaches(JSScript *script) michael@0: { michael@0: if (script->hasIonScript()) michael@0: script->ionScript()->purgeCaches(); michael@0: michael@0: if (script->hasParallelIonScript()) michael@0: script->parallelIonScript()->purgeCaches(); michael@0: } michael@0: michael@0: size_t michael@0: jit::SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf) michael@0: { michael@0: size_t result = 0; michael@0: michael@0: if (script->hasIonScript()) michael@0: result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf); michael@0: michael@0: if (script->hasParallelIonScript()) michael@0: result += script->parallelIonScript()->sizeOfIncludingThis(mallocSizeOf); michael@0: michael@0: return result; michael@0: } michael@0: michael@0: void michael@0: jit::DestroyIonScripts(FreeOp *fop, JSScript *script) michael@0: { michael@0: if (script->hasIonScript()) michael@0: jit::IonScript::Destroy(fop, script->ionScript()); michael@0: michael@0: if (script->hasParallelIonScript()) michael@0: jit::IonScript::Destroy(fop, script->parallelIonScript()); michael@0: michael@0: if (script->hasBaselineScript()) michael@0: jit::BaselineScript::Destroy(fop, script->baselineScript()); michael@0: } michael@0: michael@0: void michael@0: jit::TraceIonScripts(JSTracer* trc, JSScript *script) michael@0: { michael@0: if (script->hasIonScript()) michael@0: jit::IonScript::Trace(trc, script->ionScript()); michael@0: michael@0: if (script->hasParallelIonScript()) michael@0: jit::IonScript::Trace(trc, script->parallelIonScript()); michael@0: michael@0: if (script->hasBaselineScript()) michael@0: jit::BaselineScript::Trace(trc, script->baselineScript()); michael@0: } michael@0: michael@0: bool michael@0: jit::RematerializeAllFrames(JSContext *cx, JSCompartment *comp) michael@0: { michael@0: for (JitActivationIterator iter(comp->runtimeFromMainThread()); !iter.done(); ++iter) { michael@0: if (iter.activation()->compartment() == comp) { michael@0: for (JitFrameIterator frameIter(iter); !frameIter.done(); ++frameIter) { michael@0: if (!frameIter.isIonJS()) michael@0: continue; michael@0: if (!iter.activation()->asJit()->getRematerializedFrame(cx, frameIter)) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: jit::UpdateForDebugMode(JSContext *maybecx, JSCompartment *comp, michael@0: AutoDebugModeInvalidation &invalidate) michael@0: { michael@0: MOZ_ASSERT(invalidate.isFor(comp)); michael@0: michael@0: // Schedule invalidation of all optimized JIT code since debug mode michael@0: // invalidates assumptions. michael@0: invalidate.scheduleInvalidation(comp->debugMode()); michael@0: michael@0: // Recompile on-stack baseline scripts if we have a cx. michael@0: if (maybecx) { michael@0: IonContext ictx(maybecx, nullptr); michael@0: if (!RecompileOnStackBaselineScriptsForDebugMode(maybecx, comp)) { michael@0: js_ReportOutOfMemory(maybecx); michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: AutoDebugModeInvalidation::~AutoDebugModeInvalidation() michael@0: { michael@0: MOZ_ASSERT(!!comp_ != !!zone_); michael@0: michael@0: if (needInvalidation_ == NoNeed) michael@0: return; michael@0: michael@0: Zone *zone = zone_ ? zone_ : comp_->zone(); michael@0: JSRuntime *rt = zone->runtimeFromMainThread(); michael@0: FreeOp *fop = rt->defaultFreeOp(); michael@0: michael@0: if (comp_) { michael@0: StopAllOffThreadCompilations(comp_); michael@0: } else { michael@0: for (CompartmentsInZoneIter comp(zone_); !comp.done(); comp.next()) michael@0: StopAllOffThreadCompilations(comp); michael@0: } michael@0: michael@0: // Don't discard active baseline scripts. They are recompiled for debug michael@0: // mode. michael@0: jit::MarkActiveBaselineScripts(zone); michael@0: michael@0: for (JitActivationIterator iter(rt); !iter.done(); ++iter) { michael@0: JSCompartment *comp = iter->compartment(); michael@0: if (comp_ == comp || zone_ == comp->zone()) { michael@0: IonContext ictx(CompileRuntime::get(rt)); michael@0: IonSpew(IonSpew_Invalidate, "Invalidating frames for debug mode toggle"); michael@0: InvalidateActivation(fop, iter.jitTop(), true); michael@0: } michael@0: } michael@0: michael@0: for (gc::CellIter i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) { michael@0: JSScript *script = i.get(); michael@0: if (script->compartment() == comp_ || zone_) { michael@0: FinishInvalidation(fop, script); michael@0: FinishInvalidation(fop, script); michael@0: FinishDiscardBaselineScript(fop, script); michael@0: script->resetUseCount(); michael@0: } else if (script->hasBaselineScript()) { michael@0: script->baselineScript()->resetActive(); michael@0: } michael@0: } michael@0: }