Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/Ion.h"
9 #include "mozilla/MemoryReporting.h"
10 #include "mozilla/ThreadLocal.h"
12 #include "jscompartment.h"
13 #include "jsprf.h"
14 #include "jsworkers.h"
16 #include "gc/Marking.h"
17 #include "jit/AliasAnalysis.h"
18 #include "jit/AsmJSModule.h"
19 #include "jit/BacktrackingAllocator.h"
20 #include "jit/BaselineDebugModeOSR.h"
21 #include "jit/BaselineFrame.h"
22 #include "jit/BaselineInspector.h"
23 #include "jit/BaselineJIT.h"
24 #include "jit/CodeGenerator.h"
25 #include "jit/EdgeCaseAnalysis.h"
26 #include "jit/EffectiveAddressAnalysis.h"
27 #include "jit/IonAnalysis.h"
28 #include "jit/IonBuilder.h"
29 #include "jit/IonOptimizationLevels.h"
30 #include "jit/IonSpewer.h"
31 #include "jit/JitCommon.h"
32 #include "jit/JitCompartment.h"
33 #include "jit/LICM.h"
34 #include "jit/LinearScan.h"
35 #include "jit/LIR.h"
36 #include "jit/Lowering.h"
37 #include "jit/ParallelSafetyAnalysis.h"
38 #include "jit/PerfSpewer.h"
39 #include "jit/RangeAnalysis.h"
40 #include "jit/StupidAllocator.h"
41 #include "jit/UnreachableCodeElimination.h"
42 #include "jit/ValueNumbering.h"
43 #include "vm/ForkJoin.h"
44 #include "vm/TraceLogging.h"
46 #include "jscompartmentinlines.h"
47 #include "jsgcinlines.h"
48 #include "jsinferinlines.h"
49 #include "jsobjinlines.h"
51 #include "jit/ExecutionMode-inl.h"
53 using namespace js;
54 using namespace js::jit;
56 using mozilla::ThreadLocal;
58 // Assert that JitCode is gc::Cell aligned.
59 JS_STATIC_ASSERT(sizeof(JitCode) % gc::CellSize == 0);
61 static ThreadLocal<IonContext*> TlsIonContext;
63 static IonContext *
64 CurrentIonContext()
65 {
66 if (!TlsIonContext.initialized())
67 return nullptr;
68 return TlsIonContext.get();
69 }
71 void
72 jit::SetIonContext(IonContext *ctx)
73 {
74 TlsIonContext.set(ctx);
75 }
77 IonContext *
78 jit::GetIonContext()
79 {
80 MOZ_ASSERT(CurrentIonContext());
81 return CurrentIonContext();
82 }
84 IonContext *
85 jit::MaybeGetIonContext()
86 {
87 return CurrentIonContext();
88 }
90 IonContext::IonContext(JSContext *cx, TempAllocator *temp)
91 : cx(cx),
92 temp(temp),
93 runtime(CompileRuntime::get(cx->runtime())),
94 compartment(CompileCompartment::get(cx->compartment())),
95 prev_(CurrentIonContext()),
96 assemblerCount_(0)
97 {
98 SetIonContext(this);
99 }
101 IonContext::IonContext(ExclusiveContext *cx, TempAllocator *temp)
102 : cx(nullptr),
103 temp(temp),
104 runtime(CompileRuntime::get(cx->runtime_)),
105 compartment(nullptr),
106 prev_(CurrentIonContext()),
107 assemblerCount_(0)
108 {
109 SetIonContext(this);
110 }
112 IonContext::IonContext(CompileRuntime *rt, CompileCompartment *comp, TempAllocator *temp)
113 : cx(nullptr),
114 temp(temp),
115 runtime(rt),
116 compartment(comp),
117 prev_(CurrentIonContext()),
118 assemblerCount_(0)
119 {
120 SetIonContext(this);
121 }
123 IonContext::IonContext(CompileRuntime *rt)
124 : cx(nullptr),
125 temp(nullptr),
126 runtime(rt),
127 compartment(nullptr),
128 prev_(CurrentIonContext()),
129 assemblerCount_(0)
130 {
131 SetIonContext(this);
132 }
134 IonContext::~IonContext()
135 {
136 SetIonContext(prev_);
137 }
139 bool
140 jit::InitializeIon()
141 {
142 if (!TlsIonContext.initialized() && !TlsIonContext.init())
143 return false;
144 CheckLogging();
145 CheckPerf();
146 return true;
147 }
149 JitRuntime::JitRuntime()
150 : execAlloc_(nullptr),
151 ionAlloc_(nullptr),
152 exceptionTail_(nullptr),
153 bailoutTail_(nullptr),
154 enterJIT_(nullptr),
155 bailoutHandler_(nullptr),
156 argumentsRectifier_(nullptr),
157 argumentsRectifierReturnAddr_(nullptr),
158 parallelArgumentsRectifier_(nullptr),
159 invalidator_(nullptr),
160 debugTrapHandler_(nullptr),
161 forkJoinGetSliceStub_(nullptr),
162 baselineDebugModeOSRHandler_(nullptr),
163 functionWrappers_(nullptr),
164 osrTempData_(nullptr),
165 ionCodeProtected_(false)
166 {
167 }
169 JitRuntime::~JitRuntime()
170 {
171 js_delete(functionWrappers_);
172 freeOsrTempData();
174 // Note: The interrupt lock is not taken here, as JitRuntime is only
175 // destroyed along with its containing JSRuntime.
176 js_delete(ionAlloc_);
177 }
179 bool
180 JitRuntime::initialize(JSContext *cx)
181 {
182 JS_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess());
183 JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
185 AutoCompartment ac(cx, cx->atomsCompartment());
187 IonContext ictx(cx, nullptr);
189 execAlloc_ = cx->runtime()->getExecAlloc(cx);
190 if (!execAlloc_)
191 return false;
193 if (!cx->compartment()->ensureJitCompartmentExists(cx))
194 return false;
196 functionWrappers_ = cx->new_<VMWrapperMap>(cx);
197 if (!functionWrappers_ || !functionWrappers_->init())
198 return false;
200 IonSpew(IonSpew_Codegen, "# Emitting exception tail stub");
201 exceptionTail_ = generateExceptionTailStub(cx);
202 if (!exceptionTail_)
203 return false;
205 IonSpew(IonSpew_Codegen, "# Emitting bailout tail stub");
206 bailoutTail_ = generateBailoutTailStub(cx);
207 if (!bailoutTail_)
208 return false;
210 if (cx->runtime()->jitSupportsFloatingPoint) {
211 IonSpew(IonSpew_Codegen, "# Emitting bailout tables");
213 // Initialize some Ion-only stubs that require floating-point support.
214 if (!bailoutTables_.reserve(FrameSizeClass::ClassLimit().classId()))
215 return false;
217 for (uint32_t id = 0;; id++) {
218 FrameSizeClass class_ = FrameSizeClass::FromClass(id);
219 if (class_ == FrameSizeClass::ClassLimit())
220 break;
221 bailoutTables_.infallibleAppend((JitCode *)nullptr);
222 bailoutTables_[id] = generateBailoutTable(cx, id);
223 if (!bailoutTables_[id])
224 return false;
225 }
227 IonSpew(IonSpew_Codegen, "# Emitting bailout handler");
228 bailoutHandler_ = generateBailoutHandler(cx);
229 if (!bailoutHandler_)
230 return false;
232 IonSpew(IonSpew_Codegen, "# Emitting invalidator");
233 invalidator_ = generateInvalidator(cx);
234 if (!invalidator_)
235 return false;
236 }
238 IonSpew(IonSpew_Codegen, "# Emitting sequential arguments rectifier");
239 argumentsRectifier_ = generateArgumentsRectifier(cx, SequentialExecution, &argumentsRectifierReturnAddr_);
240 if (!argumentsRectifier_)
241 return false;
243 #ifdef JS_THREADSAFE
244 IonSpew(IonSpew_Codegen, "# Emitting parallel arguments rectifier");
245 parallelArgumentsRectifier_ = generateArgumentsRectifier(cx, ParallelExecution, nullptr);
246 if (!parallelArgumentsRectifier_)
247 return false;
248 #endif
250 IonSpew(IonSpew_Codegen, "# Emitting EnterJIT sequence");
251 enterJIT_ = generateEnterJIT(cx, EnterJitOptimized);
252 if (!enterJIT_)
253 return false;
255 IonSpew(IonSpew_Codegen, "# Emitting EnterBaselineJIT sequence");
256 enterBaselineJIT_ = generateEnterJIT(cx, EnterJitBaseline);
257 if (!enterBaselineJIT_)
258 return false;
260 IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Value");
261 valuePreBarrier_ = generatePreBarrier(cx, MIRType_Value);
262 if (!valuePreBarrier_)
263 return false;
265 IonSpew(IonSpew_Codegen, "# Emitting Pre Barrier for Shape");
266 shapePreBarrier_ = generatePreBarrier(cx, MIRType_Shape);
267 if (!shapePreBarrier_)
268 return false;
270 IonSpew(IonSpew_Codegen, "# Emitting VM function wrappers");
271 for (VMFunction *fun = VMFunction::functions; fun; fun = fun->next) {
272 if (!generateVMWrapper(cx, *fun))
273 return false;
274 }
276 return true;
277 }
279 JitCode *
280 JitRuntime::debugTrapHandler(JSContext *cx)
281 {
282 if (!debugTrapHandler_) {
283 // JitRuntime code stubs are shared across compartments and have to
284 // be allocated in the atoms compartment.
285 AutoLockForExclusiveAccess lock(cx);
286 AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
287 debugTrapHandler_ = generateDebugTrapHandler(cx);
288 }
289 return debugTrapHandler_;
290 }
292 bool
293 JitRuntime::ensureForkJoinGetSliceStubExists(JSContext *cx)
294 {
295 if (!forkJoinGetSliceStub_) {
296 IonSpew(IonSpew_Codegen, "# Emitting ForkJoinGetSlice stub");
297 AutoLockForExclusiveAccess lock(cx);
298 AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
299 forkJoinGetSliceStub_ = generateForkJoinGetSliceStub(cx);
300 }
301 return !!forkJoinGetSliceStub_;
302 }
304 uint8_t *
305 JitRuntime::allocateOsrTempData(size_t size)
306 {
307 osrTempData_ = (uint8_t *)js_realloc(osrTempData_, size);
308 return osrTempData_;
309 }
311 void
312 JitRuntime::freeOsrTempData()
313 {
314 js_free(osrTempData_);
315 osrTempData_ = nullptr;
316 }
318 JSC::ExecutableAllocator *
319 JitRuntime::createIonAlloc(JSContext *cx)
320 {
321 JS_ASSERT(cx->runtime()->currentThreadOwnsInterruptLock());
323 ionAlloc_ = js_new<JSC::ExecutableAllocator>();
324 if (!ionAlloc_)
325 js_ReportOutOfMemory(cx);
326 return ionAlloc_;
327 }
329 void
330 JitRuntime::ensureIonCodeProtected(JSRuntime *rt)
331 {
332 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
334 if (!rt->signalHandlersInstalled() || ionCodeProtected_ || !ionAlloc_)
335 return;
337 // Protect all Ion code in the runtime to trigger an access violation the
338 // next time any of it runs on the main thread.
339 ionAlloc_->toggleAllCodeAsAccessible(false);
340 ionCodeProtected_ = true;
341 }
343 bool
344 JitRuntime::handleAccessViolation(JSRuntime *rt, void *faultingAddress)
345 {
346 if (!rt->signalHandlersInstalled() || !ionAlloc_ || !ionAlloc_->codeContains((char *) faultingAddress))
347 return false;
349 #ifdef JS_THREADSAFE
350 // All places where the interrupt lock is taken must either ensure that Ion
351 // code memory won't be accessed within, or call ensureIonCodeAccessible to
352 // render the memory safe for accessing. Otherwise taking the lock below
353 // will deadlock the process.
354 JS_ASSERT(!rt->currentThreadOwnsInterruptLock());
355 #endif
357 // Taking this lock is necessary to prevent the interrupting thread from marking
358 // the memory as inaccessible while we are patching backedges. This will cause us
359 // to SEGV while still inside the signal handler, and the process will terminate.
360 JSRuntime::AutoLockForInterrupt lock(rt);
362 // Ion code in the runtime faulted after it was made inaccessible. Reset
363 // the code privileges and patch all loop backedges to perform an interrupt
364 // check instead.
365 ensureIonCodeAccessible(rt);
366 return true;
367 }
369 void
370 JitRuntime::ensureIonCodeAccessible(JSRuntime *rt)
371 {
372 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
374 // This can only be called on the main thread and while handling signals,
375 // which happens on a separate thread in OS X.
376 #ifndef XP_MACOSX
377 JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
378 #endif
380 if (ionCodeProtected_) {
381 ionAlloc_->toggleAllCodeAsAccessible(true);
382 ionCodeProtected_ = false;
383 }
385 if (rt->interrupt) {
386 // The interrupt handler needs to be invoked by this thread, but we may
387 // be inside a signal handler and have no idea what is above us on the
388 // stack (probably we are executing Ion code at an arbitrary point, but
389 // we could be elsewhere, say repatching a jump for an IonCache).
390 // Patch all backedges in the runtime so they will invoke the interrupt
391 // handler the next time they execute.
392 patchIonBackedges(rt, BackedgeInterruptCheck);
393 }
394 }
396 void
397 JitRuntime::patchIonBackedges(JSRuntime *rt, BackedgeTarget target)
398 {
399 #ifndef XP_MACOSX
400 JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
401 #endif
403 // Patch all loop backedges in Ion code so that they either jump to the
404 // normal loop header or to an interrupt handler each time they run.
405 for (InlineListIterator<PatchableBackedge> iter(backedgeList_.begin());
406 iter != backedgeList_.end();
407 iter++)
408 {
409 PatchableBackedge *patchableBackedge = *iter;
410 PatchJump(patchableBackedge->backedge, target == BackedgeLoopHeader
411 ? patchableBackedge->loopHeader
412 : patchableBackedge->interruptCheck);
413 }
414 }
416 void
417 jit::RequestInterruptForIonCode(JSRuntime *rt, JSRuntime::InterruptMode mode)
418 {
419 JitRuntime *jitRuntime = rt->jitRuntime();
420 if (!jitRuntime)
421 return;
423 JS_ASSERT(rt->currentThreadOwnsInterruptLock());
425 // The mechanism for interrupting normal ion code varies depending on how
426 // the interrupt is being requested.
427 switch (mode) {
428 case JSRuntime::RequestInterruptMainThread:
429 // When requesting an interrupt from the main thread, Ion loop
430 // backedges can be patched directly. Make sure we don't segv while
431 // patching the backedges, to avoid deadlocking inside the signal
432 // handler.
433 JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
434 jitRuntime->ensureIonCodeAccessible(rt);
435 break;
437 case JSRuntime::RequestInterruptAnyThread:
438 // When requesting an interrupt from off the main thread, protect
439 // Ion code memory so that the main thread will fault and enter a
440 // signal handler when trying to execute the code. The signal
441 // handler will unprotect the code and patch loop backedges so
442 // that the interrupt handler is invoked afterwards.
443 jitRuntime->ensureIonCodeProtected(rt);
444 break;
446 case JSRuntime::RequestInterruptAnyThreadDontStopIon:
447 case JSRuntime::RequestInterruptAnyThreadForkJoin:
448 // The caller does not require Ion code to be interrupted.
449 // Nothing more needs to be done.
450 break;
452 default:
453 MOZ_ASSUME_UNREACHABLE("Bad interrupt mode");
454 }
455 }
457 JitCompartment::JitCompartment()
458 : stubCodes_(nullptr),
459 baselineCallReturnFromIonAddr_(nullptr),
460 baselineGetPropReturnFromIonAddr_(nullptr),
461 baselineSetPropReturnFromIonAddr_(nullptr),
462 baselineCallReturnFromStubAddr_(nullptr),
463 baselineGetPropReturnFromStubAddr_(nullptr),
464 baselineSetPropReturnFromStubAddr_(nullptr),
465 stringConcatStub_(nullptr),
466 parallelStringConcatStub_(nullptr),
467 activeParallelEntryScripts_(nullptr)
468 {
469 }
471 JitCompartment::~JitCompartment()
472 {
473 js_delete(stubCodes_);
474 js_delete(activeParallelEntryScripts_);
475 }
477 bool
478 JitCompartment::initialize(JSContext *cx)
479 {
480 stubCodes_ = cx->new_<ICStubCodeMap>(cx);
481 if (!stubCodes_ || !stubCodes_->init())
482 return false;
484 return true;
485 }
487 bool
488 JitCompartment::ensureIonStubsExist(JSContext *cx)
489 {
490 if (!stringConcatStub_) {
491 stringConcatStub_ = generateStringConcatStub(cx, SequentialExecution);
492 if (!stringConcatStub_)
493 return false;
494 }
496 #ifdef JS_THREADSAFE
497 if (!parallelStringConcatStub_) {
498 parallelStringConcatStub_ = generateStringConcatStub(cx, ParallelExecution);
499 if (!parallelStringConcatStub_)
500 return false;
501 }
502 #endif
504 return true;
505 }
507 bool
508 JitCompartment::notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script)
509 {
510 // Fast path. The isParallelEntryScript bit guarantees that the script is
511 // already in the set.
512 if (script->parallelIonScript()->isParallelEntryScript()) {
513 MOZ_ASSERT(activeParallelEntryScripts_ && activeParallelEntryScripts_->has(script));
514 script->parallelIonScript()->resetParallelAge();
515 return true;
516 }
518 if (!activeParallelEntryScripts_) {
519 activeParallelEntryScripts_ = cx->new_<ScriptSet>(cx);
520 if (!activeParallelEntryScripts_ || !activeParallelEntryScripts_->init())
521 return false;
522 }
524 script->parallelIonScript()->setIsParallelEntryScript();
525 ScriptSet::AddPtr p = activeParallelEntryScripts_->lookupForAdd(script);
526 return p || activeParallelEntryScripts_->add(p, script);
527 }
529 void
530 jit::FinishOffThreadBuilder(IonBuilder *builder)
531 {
532 ExecutionMode executionMode = builder->info().executionMode();
534 // Clear the recompiling flag of the old ionScript, since we continue to
535 // use the old ionScript if recompiling fails.
536 if (executionMode == SequentialExecution && builder->script()->hasIonScript())
537 builder->script()->ionScript()->clearRecompiling();
539 // Clean up if compilation did not succeed.
540 if (CompilingOffThread(builder->script(), executionMode))
541 SetIonScript(builder->script(), executionMode, nullptr);
543 // The builder is allocated into its LifoAlloc, so destroying that will
544 // destroy the builder and all other data accumulated during compilation,
545 // except any final codegen (which includes an assembler and needs to be
546 // explicitly destroyed).
547 js_delete(builder->backgroundCodegen());
548 js_delete(builder->alloc().lifoAlloc());
549 }
551 static inline void
552 FinishAllOffThreadCompilations(JSCompartment *comp)
553 {
554 #ifdef JS_THREADSAFE
555 AutoLockWorkerThreadState lock;
556 GlobalWorkerThreadState::IonBuilderVector &finished = WorkerThreadState().ionFinishedList();
558 for (size_t i = 0; i < finished.length(); i++) {
559 IonBuilder *builder = finished[i];
560 if (builder->compartment == CompileCompartment::get(comp)) {
561 FinishOffThreadBuilder(builder);
562 WorkerThreadState().remove(finished, &i);
563 }
564 }
565 #endif
566 }
568 /* static */ void
569 JitRuntime::Mark(JSTracer *trc)
570 {
571 JS_ASSERT(!trc->runtime()->isHeapMinorCollecting());
572 Zone *zone = trc->runtime()->atomsCompartment()->zone();
573 for (gc::CellIterUnderGC i(zone, gc::FINALIZE_JITCODE); !i.done(); i.next()) {
574 JitCode *code = i.get<JitCode>();
575 MarkJitCodeRoot(trc, &code, "wrapper");
576 }
577 }
579 void
580 JitCompartment::mark(JSTracer *trc, JSCompartment *compartment)
581 {
582 // Cancel any active or pending off thread compilations. Note that the
583 // MIR graph does not hold any nursery pointers, so there's no need to
584 // do this for minor GCs.
585 JS_ASSERT(!trc->runtime()->isHeapMinorCollecting());
586 CancelOffThreadIonCompile(compartment, nullptr);
587 FinishAllOffThreadCompilations(compartment);
589 // Free temporary OSR buffer.
590 trc->runtime()->jitRuntime()->freeOsrTempData();
592 // Mark scripts with parallel IonScripts if we should preserve them.
593 if (activeParallelEntryScripts_) {
594 for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) {
595 JSScript *script = e.front();
597 // If the script has since been invalidated or was attached by an
598 // off-thread worker too late (i.e., the ForkJoin finished with
599 // warmup doing all the work), remove it.
600 if (!script->hasParallelIonScript() ||
601 !script->parallelIonScript()->isParallelEntryScript())
602 {
603 e.removeFront();
604 continue;
605 }
607 // Check and increment the age. If the script is below the max
608 // age, mark it.
609 //
610 // Subtlety: We depend on the tracing of the parallel IonScript's
611 // callTargetEntries to propagate the parallel age to the entire
612 // call graph.
613 if (ShouldPreserveParallelJITCode(trc->runtime(), script, /* increase = */ true)) {
614 MarkScript(trc, const_cast<EncapsulatedPtrScript *>(&e.front()), "par-script");
615 MOZ_ASSERT(script == e.front());
616 }
617 }
618 }
619 }
621 void
622 JitCompartment::sweep(FreeOp *fop)
623 {
624 stubCodes_->sweep(fop);
626 // If the sweep removed the ICCall_Fallback stub, nullptr the baselineCallReturnAddr_ field.
627 if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::Call_Fallback))) {
628 baselineCallReturnFromIonAddr_ = nullptr;
629 baselineCallReturnFromStubAddr_ = nullptr;
630 }
631 // Similarly for the ICGetProp_Fallback stub.
632 if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::GetProp_Fallback))) {
633 baselineGetPropReturnFromIonAddr_ = nullptr;
634 baselineGetPropReturnFromStubAddr_ = nullptr;
635 }
636 if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::SetProp_Fallback))) {
637 baselineSetPropReturnFromIonAddr_ = nullptr;
638 baselineSetPropReturnFromStubAddr_ = nullptr;
639 }
641 if (stringConcatStub_ && !IsJitCodeMarked(stringConcatStub_.unsafeGet()))
642 stringConcatStub_ = nullptr;
644 if (parallelStringConcatStub_ && !IsJitCodeMarked(parallelStringConcatStub_.unsafeGet()))
645 parallelStringConcatStub_ = nullptr;
647 if (activeParallelEntryScripts_) {
648 for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) {
649 JSScript *script = e.front();
650 if (!IsScriptMarked(&script))
651 e.removeFront();
652 else
653 MOZ_ASSERT(script == e.front());
654 }
655 }
656 }
658 JitCode *
659 JitRuntime::getBailoutTable(const FrameSizeClass &frameClass) const
660 {
661 JS_ASSERT(frameClass != FrameSizeClass::None());
662 return bailoutTables_[frameClass.classId()];
663 }
665 JitCode *
666 JitRuntime::getVMWrapper(const VMFunction &f) const
667 {
668 JS_ASSERT(functionWrappers_);
669 JS_ASSERT(functionWrappers_->initialized());
670 JitRuntime::VMWrapperMap::Ptr p = functionWrappers_->readonlyThreadsafeLookup(&f);
671 JS_ASSERT(p);
673 return p->value();
674 }
676 template <AllowGC allowGC>
677 JitCode *
678 JitCode::New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
679 JSC::ExecutablePool *pool, JSC::CodeKind kind)
680 {
681 JitCode *codeObj = js::NewJitCode<allowGC>(cx);
682 if (!codeObj) {
683 pool->release(headerSize + bufferSize, kind);
684 return nullptr;
685 }
687 new (codeObj) JitCode(code, bufferSize, headerSize, pool, kind);
688 return codeObj;
689 }
691 template
692 JitCode *
693 JitCode::New<CanGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
694 JSC::ExecutablePool *pool, JSC::CodeKind kind);
696 template
697 JitCode *
698 JitCode::New<NoGC>(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize,
699 JSC::ExecutablePool *pool, JSC::CodeKind kind);
701 void
702 JitCode::copyFrom(MacroAssembler &masm)
703 {
704 // Store the JitCode pointer right before the code buffer, so we can
705 // recover the gcthing from relocation tables.
706 *(JitCode **)(code_ - sizeof(JitCode *)) = this;
707 insnSize_ = masm.instructionsSize();
708 masm.executableCopy(code_);
710 jumpRelocTableBytes_ = masm.jumpRelocationTableBytes();
711 masm.copyJumpRelocationTable(code_ + jumpRelocTableOffset());
713 dataRelocTableBytes_ = masm.dataRelocationTableBytes();
714 masm.copyDataRelocationTable(code_ + dataRelocTableOffset());
716 preBarrierTableBytes_ = masm.preBarrierTableBytes();
717 masm.copyPreBarrierTable(code_ + preBarrierTableOffset());
719 masm.processCodeLabels(code_);
720 }
722 void
723 JitCode::trace(JSTracer *trc)
724 {
725 // Note that we cannot mark invalidated scripts, since we've basically
726 // corrupted the code stream by injecting bailouts.
727 if (invalidated())
728 return;
730 if (jumpRelocTableBytes_) {
731 uint8_t *start = code_ + jumpRelocTableOffset();
732 CompactBufferReader reader(start, start + jumpRelocTableBytes_);
733 MacroAssembler::TraceJumpRelocations(trc, this, reader);
734 }
735 if (dataRelocTableBytes_) {
736 uint8_t *start = code_ + dataRelocTableOffset();
737 CompactBufferReader reader(start, start + dataRelocTableBytes_);
738 MacroAssembler::TraceDataRelocations(trc, this, reader);
739 }
740 }
742 void
743 JitCode::finalize(FreeOp *fop)
744 {
745 // Make sure this can't race with an interrupting thread, which may try
746 // to read the contents of the pool we are releasing references in.
747 JS_ASSERT(fop->runtime()->currentThreadOwnsInterruptLock());
749 // Buffer can be freed at any time hereafter. Catch use-after-free bugs.
750 // Don't do this if the Ion code is protected, as the signal handler will
751 // deadlock trying to reacquire the interrupt lock.
752 if (fop->runtime()->jitRuntime() && !fop->runtime()->jitRuntime()->ionCodeProtected())
753 memset(code_, JS_SWEPT_CODE_PATTERN, bufferSize_);
754 code_ = nullptr;
756 // Code buffers are stored inside JSC pools.
757 // Pools are refcounted. Releasing the pool may free it.
758 if (pool_) {
759 // Horrible hack: if we are using perf integration, we don't
760 // want to reuse code addresses, so we just leak the memory instead.
761 if (!PerfEnabled())
762 pool_->release(headerSize_ + bufferSize_, JSC::CodeKind(kind_));
763 pool_ = nullptr;
764 }
765 }
767 void
768 JitCode::togglePreBarriers(bool enabled)
769 {
770 uint8_t *start = code_ + preBarrierTableOffset();
771 CompactBufferReader reader(start, start + preBarrierTableBytes_);
773 while (reader.more()) {
774 size_t offset = reader.readUnsigned();
775 CodeLocationLabel loc(this, offset);
776 if (enabled)
777 Assembler::ToggleToCmp(loc);
778 else
779 Assembler::ToggleToJmp(loc);
780 }
781 }
783 IonScript::IonScript()
784 : method_(nullptr),
785 deoptTable_(nullptr),
786 osrPc_(nullptr),
787 osrEntryOffset_(0),
788 skipArgCheckEntryOffset_(0),
789 invalidateEpilogueOffset_(0),
790 invalidateEpilogueDataOffset_(0),
791 numBailouts_(0),
792 hasUncompiledCallTarget_(false),
793 isParallelEntryScript_(false),
794 hasSPSInstrumentation_(false),
795 recompiling_(false),
796 runtimeData_(0),
797 runtimeSize_(0),
798 cacheIndex_(0),
799 cacheEntries_(0),
800 safepointIndexOffset_(0),
801 safepointIndexEntries_(0),
802 safepointsStart_(0),
803 safepointsSize_(0),
804 frameSlots_(0),
805 frameSize_(0),
806 bailoutTable_(0),
807 bailoutEntries_(0),
808 osiIndexOffset_(0),
809 osiIndexEntries_(0),
810 snapshots_(0),
811 snapshotsListSize_(0),
812 snapshotsRVATableSize_(0),
813 constantTable_(0),
814 constantEntries_(0),
815 callTargetList_(0),
816 callTargetEntries_(0),
817 backedgeList_(0),
818 backedgeEntries_(0),
819 refcount_(0),
820 parallelAge_(0),
821 recompileInfo_(),
822 osrPcMismatchCounter_(0),
823 dependentAsmJSModules(nullptr)
824 {
825 }
827 IonScript *
828 IonScript::New(JSContext *cx, types::RecompileInfo recompileInfo,
829 uint32_t frameSlots, uint32_t frameSize,
830 size_t snapshotsListSize, size_t snapshotsRVATableSize,
831 size_t recoversSize, size_t bailoutEntries,
832 size_t constants, size_t safepointIndices,
833 size_t osiIndices, size_t cacheEntries,
834 size_t runtimeSize, size_t safepointsSize,
835 size_t callTargetEntries, size_t backedgeEntries,
836 OptimizationLevel optimizationLevel)
837 {
838 static const int DataAlignment = sizeof(void *);
840 if (snapshotsListSize >= MAX_BUFFER_SIZE ||
841 (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t)))
842 {
843 js_ReportOutOfMemory(cx);
844 return nullptr;
845 }
847 // This should not overflow on x86, because the memory is already allocated
848 // *somewhere* and if their total overflowed there would be no memory left
849 // at all.
850 size_t paddedSnapshotsSize = AlignBytes(snapshotsListSize + snapshotsRVATableSize, DataAlignment);
851 size_t paddedRecoversSize = AlignBytes(recoversSize, DataAlignment);
852 size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment);
853 size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment);
854 size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment);
855 size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment);
856 size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(uint32_t), DataAlignment);
857 size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment);
858 size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
859 size_t paddedCallTargetSize = AlignBytes(callTargetEntries * sizeof(JSScript *), DataAlignment);
860 size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment);
861 size_t bytes = paddedSnapshotsSize +
862 paddedRecoversSize +
863 paddedBailoutSize +
864 paddedConstantsSize +
865 paddedSafepointIndicesSize+
866 paddedOsiIndicesSize +
867 paddedCacheEntriesSize +
868 paddedRuntimeSize +
869 paddedSafepointSize +
870 paddedCallTargetSize +
871 paddedBackedgeSize;
872 uint8_t *buffer = (uint8_t *)cx->malloc_(sizeof(IonScript) + bytes);
873 if (!buffer)
874 return nullptr;
876 IonScript *script = reinterpret_cast<IonScript *>(buffer);
877 new (script) IonScript();
879 uint32_t offsetCursor = sizeof(IonScript);
881 script->runtimeData_ = offsetCursor;
882 script->runtimeSize_ = runtimeSize;
883 offsetCursor += paddedRuntimeSize;
885 script->cacheIndex_ = offsetCursor;
886 script->cacheEntries_ = cacheEntries;
887 offsetCursor += paddedCacheEntriesSize;
889 script->safepointIndexOffset_ = offsetCursor;
890 script->safepointIndexEntries_ = safepointIndices;
891 offsetCursor += paddedSafepointIndicesSize;
893 script->safepointsStart_ = offsetCursor;
894 script->safepointsSize_ = safepointsSize;
895 offsetCursor += paddedSafepointSize;
897 script->bailoutTable_ = offsetCursor;
898 script->bailoutEntries_ = bailoutEntries;
899 offsetCursor += paddedBailoutSize;
901 script->osiIndexOffset_ = offsetCursor;
902 script->osiIndexEntries_ = osiIndices;
903 offsetCursor += paddedOsiIndicesSize;
905 script->snapshots_ = offsetCursor;
906 script->snapshotsListSize_ = snapshotsListSize;
907 script->snapshotsRVATableSize_ = snapshotsRVATableSize;
908 offsetCursor += paddedSnapshotsSize;
910 script->recovers_ = offsetCursor;
911 script->recoversSize_ = recoversSize;
912 offsetCursor += paddedRecoversSize;
914 script->constantTable_ = offsetCursor;
915 script->constantEntries_ = constants;
916 offsetCursor += paddedConstantsSize;
918 script->callTargetList_ = offsetCursor;
919 script->callTargetEntries_ = callTargetEntries;
920 offsetCursor += paddedCallTargetSize;
922 script->backedgeList_ = offsetCursor;
923 script->backedgeEntries_ = backedgeEntries;
924 offsetCursor += paddedBackedgeSize;
926 script->frameSlots_ = frameSlots;
927 script->frameSize_ = frameSize;
929 script->recompileInfo_ = recompileInfo;
930 script->optimizationLevel_ = optimizationLevel;
932 return script;
933 }
935 void
936 IonScript::trace(JSTracer *trc)
937 {
938 if (method_)
939 MarkJitCode(trc, &method_, "method");
941 if (deoptTable_)
942 MarkJitCode(trc, &deoptTable_, "deoptimizationTable");
944 for (size_t i = 0; i < numConstants(); i++)
945 gc::MarkValue(trc, &getConstant(i), "constant");
947 // No write barrier is needed for the call target list, as it's attached
948 // at compilation time and is read only.
949 for (size_t i = 0; i < callTargetEntries(); i++) {
950 // Propagate the parallelAge to the call targets.
951 if (callTargetList()[i]->hasParallelIonScript())
952 callTargetList()[i]->parallelIonScript()->parallelAge_ = parallelAge_;
954 gc::MarkScriptUnbarriered(trc, &callTargetList()[i], "callTarget");
955 }
956 }
958 /* static */ void
959 IonScript::writeBarrierPre(Zone *zone, IonScript *ionScript)
960 {
961 #ifdef JSGC_INCREMENTAL
962 if (zone->needsBarrier())
963 ionScript->trace(zone->barrierTracer());
964 #endif
965 }
967 void
968 IonScript::copySnapshots(const SnapshotWriter *writer)
969 {
970 MOZ_ASSERT(writer->listSize() == snapshotsListSize_);
971 memcpy((uint8_t *)this + snapshots_,
972 writer->listBuffer(), snapshotsListSize_);
974 MOZ_ASSERT(snapshotsRVATableSize_);
975 MOZ_ASSERT(writer->RVATableSize() == snapshotsRVATableSize_);
976 memcpy((uint8_t *)this + snapshots_ + snapshotsListSize_,
977 writer->RVATableBuffer(), snapshotsRVATableSize_);
978 }
980 void
981 IonScript::copyRecovers(const RecoverWriter *writer)
982 {
983 MOZ_ASSERT(writer->size() == recoversSize_);
984 memcpy((uint8_t *)this + recovers_, writer->buffer(), recoversSize_);
985 }
987 void
988 IonScript::copySafepoints(const SafepointWriter *writer)
989 {
990 JS_ASSERT(writer->size() == safepointsSize_);
991 memcpy((uint8_t *)this + safepointsStart_, writer->buffer(), safepointsSize_);
992 }
994 void
995 IonScript::copyBailoutTable(const SnapshotOffset *table)
996 {
997 memcpy(bailoutTable(), table, bailoutEntries_ * sizeof(uint32_t));
998 }
1000 void
1001 IonScript::copyConstants(const Value *vp)
1002 {
1003 for (size_t i = 0; i < constantEntries_; i++)
1004 constants()[i].init(vp[i]);
1005 }
1007 void
1008 IonScript::copyCallTargetEntries(JSScript **callTargets)
1009 {
1010 for (size_t i = 0; i < callTargetEntries_; i++)
1011 callTargetList()[i] = callTargets[i];
1012 }
1014 void
1015 IonScript::copyPatchableBackedges(JSContext *cx, JitCode *code,
1016 PatchableBackedgeInfo *backedges)
1017 {
1018 for (size_t i = 0; i < backedgeEntries_; i++) {
1019 const PatchableBackedgeInfo &info = backedges[i];
1020 PatchableBackedge *patchableBackedge = &backedgeList()[i];
1022 CodeLocationJump backedge(code, info.backedge);
1023 CodeLocationLabel loopHeader(code, CodeOffsetLabel(info.loopHeader->offset()));
1024 CodeLocationLabel interruptCheck(code, CodeOffsetLabel(info.interruptCheck->offset()));
1025 new(patchableBackedge) PatchableBackedge(backedge, loopHeader, interruptCheck);
1027 // Point the backedge to either of its possible targets, according to
1028 // whether an interrupt is currently desired, matching the targets
1029 // established by ensureIonCodeAccessible() above. We don't handle the
1030 // interrupt immediately as the interrupt lock is held here.
1031 PatchJump(backedge, cx->runtime()->interrupt ? interruptCheck : loopHeader);
1033 cx->runtime()->jitRuntime()->addPatchableBackedge(patchableBackedge);
1034 }
1035 }
1037 void
1038 IonScript::copySafepointIndices(const SafepointIndex *si, MacroAssembler &masm)
1039 {
1040 // Jumps in the caches reflect the offset of those jumps in the compiled
1041 // code, not the absolute positions of the jumps. Update according to the
1042 // final code address now.
1043 SafepointIndex *table = safepointIndices();
1044 memcpy(table, si, safepointIndexEntries_ * sizeof(SafepointIndex));
1045 for (size_t i = 0; i < safepointIndexEntries_; i++)
1046 table[i].adjustDisplacement(masm.actualOffset(table[i].displacement()));
1047 }
1049 void
1050 IonScript::copyOsiIndices(const OsiIndex *oi, MacroAssembler &masm)
1051 {
1052 memcpy(osiIndices(), oi, osiIndexEntries_ * sizeof(OsiIndex));
1053 for (unsigned i = 0; i < osiIndexEntries_; i++)
1054 osiIndices()[i].fixUpOffset(masm);
1055 }
1057 void
1058 IonScript::copyRuntimeData(const uint8_t *data)
1059 {
1060 memcpy(runtimeData(), data, runtimeSize());
1061 }
1063 void
1064 IonScript::copyCacheEntries(const uint32_t *caches, MacroAssembler &masm)
1065 {
1066 memcpy(cacheIndex(), caches, numCaches() * sizeof(uint32_t));
1068 // Jumps in the caches reflect the offset of those jumps in the compiled
1069 // code, not the absolute positions of the jumps. Update according to the
1070 // final code address now.
1071 for (size_t i = 0; i < numCaches(); i++)
1072 getCacheFromIndex(i).updateBaseAddress(method_, masm);
1073 }
1075 const SafepointIndex *
1076 IonScript::getSafepointIndex(uint32_t disp) const
1077 {
1078 JS_ASSERT(safepointIndexEntries_ > 0);
1080 const SafepointIndex *table = safepointIndices();
1081 if (safepointIndexEntries_ == 1) {
1082 JS_ASSERT(disp == table[0].displacement());
1083 return &table[0];
1084 }
1086 size_t minEntry = 0;
1087 size_t maxEntry = safepointIndexEntries_ - 1;
1088 uint32_t min = table[minEntry].displacement();
1089 uint32_t max = table[maxEntry].displacement();
1091 // Raise if the element is not in the list.
1092 JS_ASSERT(min <= disp && disp <= max);
1094 // Approximate the location of the FrameInfo.
1095 size_t guess = (disp - min) * (maxEntry - minEntry) / (max - min) + minEntry;
1096 uint32_t guessDisp = table[guess].displacement();
1098 if (table[guess].displacement() == disp)
1099 return &table[guess];
1101 // Doing a linear scan from the guess should be more efficient in case of
1102 // small group which are equally distributed on the code.
1103 //
1104 // such as: <... ... ... ... . ... ...>
1105 if (guessDisp > disp) {
1106 while (--guess >= minEntry) {
1107 guessDisp = table[guess].displacement();
1108 JS_ASSERT(guessDisp >= disp);
1109 if (guessDisp == disp)
1110 return &table[guess];
1111 }
1112 } else {
1113 while (++guess <= maxEntry) {
1114 guessDisp = table[guess].displacement();
1115 JS_ASSERT(guessDisp <= disp);
1116 if (guessDisp == disp)
1117 return &table[guess];
1118 }
1119 }
1121 MOZ_ASSUME_UNREACHABLE("displacement not found.");
1122 }
1124 const OsiIndex *
1125 IonScript::getOsiIndex(uint32_t disp) const
1126 {
1127 for (const OsiIndex *it = osiIndices(), *end = osiIndices() + osiIndexEntries_;
1128 it != end;
1129 ++it)
1130 {
1131 if (it->returnPointDisplacement() == disp)
1132 return it;
1133 }
1135 MOZ_ASSUME_UNREACHABLE("Failed to find OSI point return address");
1136 }
1138 const OsiIndex *
1139 IonScript::getOsiIndex(uint8_t *retAddr) const
1140 {
1141 IonSpew(IonSpew_Invalidate, "IonScript %p has method %p raw %p", (void *) this, (void *)
1142 method(), method()->raw());
1144 JS_ASSERT(containsCodeAddress(retAddr));
1145 uint32_t disp = retAddr - method()->raw();
1146 return getOsiIndex(disp);
1147 }
1149 void
1150 IonScript::Trace(JSTracer *trc, IonScript *script)
1151 {
1152 if (script != ION_DISABLED_SCRIPT)
1153 script->trace(trc);
1154 }
1156 void
1157 IonScript::Destroy(FreeOp *fop, IonScript *script)
1158 {
1159 script->destroyCaches();
1160 script->unlinkFromRuntime(fop);
1161 fop->free_(script);
1162 }
1164 void
1165 IonScript::toggleBarriers(bool enabled)
1166 {
1167 method()->togglePreBarriers(enabled);
1168 }
1170 void
1171 IonScript::purgeCaches()
1172 {
1173 // Don't reset any ICs if we're invalidated, otherwise, repointing the
1174 // inline jump could overwrite an invalidation marker. These ICs can
1175 // no longer run, however, the IC slow paths may be active on the stack.
1176 // ICs therefore are required to check for invalidation before patching,
1177 // to ensure the same invariant.
1178 if (invalidated())
1179 return;
1181 for (size_t i = 0; i < numCaches(); i++)
1182 getCacheFromIndex(i).reset();
1183 }
1185 void
1186 IonScript::destroyCaches()
1187 {
1188 for (size_t i = 0; i < numCaches(); i++)
1189 getCacheFromIndex(i).destroy();
1190 }
1192 bool
1193 IonScript::addDependentAsmJSModule(JSContext *cx, DependentAsmJSModuleExit exit)
1194 {
1195 if (!dependentAsmJSModules) {
1196 dependentAsmJSModules = cx->new_<Vector<DependentAsmJSModuleExit> >(cx);
1197 if (!dependentAsmJSModules)
1198 return false;
1199 }
1200 return dependentAsmJSModules->append(exit);
1201 }
1203 void
1204 IonScript::unlinkFromRuntime(FreeOp *fop)
1205 {
1206 // Remove any links from AsmJSModules that contain optimized FFI calls into
1207 // this IonScript.
1208 if (dependentAsmJSModules) {
1209 for (size_t i = 0; i < dependentAsmJSModules->length(); i++) {
1210 DependentAsmJSModuleExit exit = dependentAsmJSModules->begin()[i];
1211 exit.module->detachIonCompilation(exit.exitIndex);
1212 }
1214 fop->delete_(dependentAsmJSModules);
1215 dependentAsmJSModules = nullptr;
1216 }
1218 // The writes to the executable buffer below may clobber backedge jumps, so
1219 // make sure that those backedges are unlinked from the runtime and not
1220 // reclobbered with garbage if an interrupt is requested.
1221 JSRuntime *rt = fop->runtime();
1222 for (size_t i = 0; i < backedgeEntries_; i++) {
1223 PatchableBackedge *backedge = &backedgeList()[i];
1224 rt->jitRuntime()->removePatchableBackedge(backedge);
1225 }
1227 // Clear the list of backedges, so that this method is idempotent. It is
1228 // called during destruction, and may be additionally called when the
1229 // script is invalidated.
1230 backedgeEntries_ = 0;
1231 }
1233 void
1234 jit::ToggleBarriers(JS::Zone *zone, bool needs)
1235 {
1236 JSRuntime *rt = zone->runtimeFromMainThread();
1237 if (!rt->hasJitRuntime())
1238 return;
1240 for (gc::CellIterUnderGC i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
1241 JSScript *script = i.get<JSScript>();
1242 if (script->hasIonScript())
1243 script->ionScript()->toggleBarriers(needs);
1244 if (script->hasBaselineScript())
1245 script->baselineScript()->toggleBarriers(needs);
1246 }
1248 for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
1249 if (comp->jitCompartment())
1250 comp->jitCompartment()->toggleBaselineStubBarriers(needs);
1251 }
1252 }
1254 namespace js {
1255 namespace jit {
1257 bool
1258 OptimizeMIR(MIRGenerator *mir)
1259 {
1260 MIRGraph &graph = mir->graph();
1261 TraceLogger *logger;
1262 if (GetIonContext()->runtime->onMainThread())
1263 logger = TraceLoggerForMainThread(GetIonContext()->runtime);
1264 else
1265 logger = TraceLoggerForCurrentThread();
1267 if (!mir->compilingAsmJS()) {
1268 if (!MakeMRegExpHoistable(graph))
1269 return false;
1270 }
1272 IonSpewPass("BuildSSA");
1273 AssertBasicGraphCoherency(graph);
1275 if (mir->shouldCancel("Start"))
1276 return false;
1278 {
1279 AutoTraceLog log(logger, TraceLogger::SplitCriticalEdges);
1280 if (!SplitCriticalEdges(graph))
1281 return false;
1282 IonSpewPass("Split Critical Edges");
1283 AssertGraphCoherency(graph);
1285 if (mir->shouldCancel("Split Critical Edges"))
1286 return false;
1287 }
1289 {
1290 AutoTraceLog log(logger, TraceLogger::RenumberBlocks);
1291 if (!RenumberBlocks(graph))
1292 return false;
1293 IonSpewPass("Renumber Blocks");
1294 AssertGraphCoherency(graph);
1296 if (mir->shouldCancel("Renumber Blocks"))
1297 return false;
1298 }
1300 {
1301 AutoTraceLog log(logger, TraceLogger::DominatorTree);
1302 if (!BuildDominatorTree(graph))
1303 return false;
1304 // No spew: graph not changed.
1306 if (mir->shouldCancel("Dominator Tree"))
1307 return false;
1308 }
1310 {
1311 AutoTraceLog log(logger, TraceLogger::PhiAnalysis);
1312 // Aggressive phi elimination must occur before any code elimination. If the
1313 // script contains a try-statement, we only compiled the try block and not
1314 // the catch or finally blocks, so in this case it's also invalid to use
1315 // aggressive phi elimination.
1316 Observability observability = graph.hasTryBlock()
1317 ? ConservativeObservability
1318 : AggressiveObservability;
1319 if (!EliminatePhis(mir, graph, observability))
1320 return false;
1321 IonSpewPass("Eliminate phis");
1322 AssertGraphCoherency(graph);
1324 if (mir->shouldCancel("Eliminate phis"))
1325 return false;
1327 if (!BuildPhiReverseMapping(graph))
1328 return false;
1329 AssertExtendedGraphCoherency(graph);
1330 // No spew: graph not changed.
1332 if (mir->shouldCancel("Phi reverse mapping"))
1333 return false;
1334 }
1336 if (!mir->compilingAsmJS()) {
1337 AutoTraceLog log(logger, TraceLogger::ApplyTypes);
1338 if (!ApplyTypeInformation(mir, graph))
1339 return false;
1340 IonSpewPass("Apply types");
1341 AssertExtendedGraphCoherency(graph);
1343 if (mir->shouldCancel("Apply types"))
1344 return false;
1345 }
1347 if (graph.entryBlock()->info().executionMode() == ParallelExecution) {
1348 AutoTraceLog log(logger, TraceLogger::ParallelSafetyAnalysis);
1349 ParallelSafetyAnalysis analysis(mir, graph);
1350 if (!analysis.analyze())
1351 return false;
1352 }
1354 // Alias analysis is required for LICM and GVN so that we don't move
1355 // loads across stores.
1356 if (mir->optimizationInfo().licmEnabled() ||
1357 mir->optimizationInfo().gvnEnabled())
1358 {
1359 AutoTraceLog log(logger, TraceLogger::AliasAnalysis);
1360 AliasAnalysis analysis(mir, graph);
1361 if (!analysis.analyze())
1362 return false;
1363 IonSpewPass("Alias analysis");
1364 AssertExtendedGraphCoherency(graph);
1366 if (mir->shouldCancel("Alias analysis"))
1367 return false;
1369 // Eliminating dead resume point operands requires basic block
1370 // instructions to be numbered. Reuse the numbering computed during
1371 // alias analysis.
1372 if (!EliminateDeadResumePointOperands(mir, graph))
1373 return false;
1375 if (mir->shouldCancel("Eliminate dead resume point operands"))
1376 return false;
1377 }
1379 if (mir->optimizationInfo().gvnEnabled()) {
1380 AutoTraceLog log(logger, TraceLogger::GVN);
1381 ValueNumberer gvn(mir, graph, mir->optimizationInfo().gvnKind() == GVN_Optimistic);
1382 if (!gvn.analyze())
1383 return false;
1384 IonSpewPass("GVN");
1385 AssertExtendedGraphCoherency(graph);
1387 if (mir->shouldCancel("GVN"))
1388 return false;
1389 }
1391 if (mir->optimizationInfo().uceEnabled()) {
1392 AutoTraceLog log(logger, TraceLogger::UCE);
1393 UnreachableCodeElimination uce(mir, graph);
1394 if (!uce.analyze())
1395 return false;
1396 IonSpewPass("UCE");
1397 AssertExtendedGraphCoherency(graph);
1399 if (mir->shouldCancel("UCE"))
1400 return false;
1401 }
1403 if (mir->optimizationInfo().licmEnabled()) {
1404 AutoTraceLog log(logger, TraceLogger::LICM);
1405 // LICM can hoist instructions from conditional branches and trigger
1406 // repeated bailouts. Disable it if this script is known to bailout
1407 // frequently.
1408 JSScript *script = mir->info().script();
1409 if (!script || !script->hadFrequentBailouts()) {
1410 LICM licm(mir, graph);
1411 if (!licm.analyze())
1412 return false;
1413 IonSpewPass("LICM");
1414 AssertExtendedGraphCoherency(graph);
1416 if (mir->shouldCancel("LICM"))
1417 return false;
1418 }
1419 }
1421 if (mir->optimizationInfo().rangeAnalysisEnabled()) {
1422 AutoTraceLog log(logger, TraceLogger::RangeAnalysis);
1423 RangeAnalysis r(mir, graph);
1424 if (!r.addBetaNodes())
1425 return false;
1426 IonSpewPass("Beta");
1427 AssertExtendedGraphCoherency(graph);
1429 if (mir->shouldCancel("RA Beta"))
1430 return false;
1432 if (!r.analyze() || !r.addRangeAssertions())
1433 return false;
1434 IonSpewPass("Range Analysis");
1435 AssertExtendedGraphCoherency(graph);
1437 if (mir->shouldCancel("Range Analysis"))
1438 return false;
1440 if (!r.removeBetaNodes())
1441 return false;
1442 IonSpewPass("De-Beta");
1443 AssertExtendedGraphCoherency(graph);
1445 if (mir->shouldCancel("RA De-Beta"))
1446 return false;
1448 if (mir->optimizationInfo().uceEnabled()) {
1449 bool shouldRunUCE = false;
1450 if (!r.prepareForUCE(&shouldRunUCE))
1451 return false;
1452 IonSpewPass("RA check UCE");
1453 AssertExtendedGraphCoherency(graph);
1455 if (mir->shouldCancel("RA check UCE"))
1456 return false;
1458 if (shouldRunUCE) {
1459 UnreachableCodeElimination uce(mir, graph);
1460 uce.disableAliasAnalysis();
1461 if (!uce.analyze())
1462 return false;
1463 IonSpewPass("UCE After RA");
1464 AssertExtendedGraphCoherency(graph);
1466 if (mir->shouldCancel("UCE After RA"))
1467 return false;
1468 }
1469 }
1471 if (!r.truncate())
1472 return false;
1473 IonSpewPass("Truncate Doubles");
1474 AssertExtendedGraphCoherency(graph);
1476 if (mir->shouldCancel("Truncate Doubles"))
1477 return false;
1478 }
1480 if (mir->optimizationInfo().eaaEnabled()) {
1481 AutoTraceLog log(logger, TraceLogger::EffectiveAddressAnalysis);
1482 EffectiveAddressAnalysis eaa(graph);
1483 if (!eaa.analyze())
1484 return false;
1485 IonSpewPass("Effective Address Analysis");
1486 AssertExtendedGraphCoherency(graph);
1488 if (mir->shouldCancel("Effective Address Analysis"))
1489 return false;
1490 }
1492 {
1493 AutoTraceLog log(logger, TraceLogger::EliminateDeadCode);
1494 if (!EliminateDeadCode(mir, graph))
1495 return false;
1496 IonSpewPass("DCE");
1497 AssertExtendedGraphCoherency(graph);
1499 if (mir->shouldCancel("DCE"))
1500 return false;
1501 }
1503 // Passes after this point must not move instructions; these analyses
1504 // depend on knowing the final order in which instructions will execute.
1506 if (mir->optimizationInfo().edgeCaseAnalysisEnabled()) {
1507 AutoTraceLog log(logger, TraceLogger::EdgeCaseAnalysis);
1508 EdgeCaseAnalysis edgeCaseAnalysis(mir, graph);
1509 if (!edgeCaseAnalysis.analyzeLate())
1510 return false;
1511 IonSpewPass("Edge Case Analysis (Late)");
1512 AssertGraphCoherency(graph);
1514 if (mir->shouldCancel("Edge Case Analysis (Late)"))
1515 return false;
1516 }
1518 if (mir->optimizationInfo().eliminateRedundantChecksEnabled()) {
1519 AutoTraceLog log(logger, TraceLogger::EliminateRedundantChecks);
1520 // Note: check elimination has to run after all other passes that move
1521 // instructions. Since check uses are replaced with the actual index,
1522 // code motion after this pass could incorrectly move a load or store
1523 // before its bounds check.
1524 if (!EliminateRedundantChecks(graph))
1525 return false;
1526 IonSpewPass("Bounds Check Elimination");
1527 AssertGraphCoherency(graph);
1528 }
1530 return true;
1531 }
1533 LIRGraph *
1534 GenerateLIR(MIRGenerator *mir)
1535 {
1536 MIRGraph &graph = mir->graph();
1538 LIRGraph *lir = mir->alloc().lifoAlloc()->new_<LIRGraph>(&graph);
1539 if (!lir || !lir->init())
1540 return nullptr;
1542 LIRGenerator lirgen(mir, graph, *lir);
1543 if (!lirgen.generate())
1544 return nullptr;
1545 IonSpewPass("Generate LIR");
1547 if (mir->shouldCancel("Generate LIR"))
1548 return nullptr;
1550 AllocationIntegrityState integrity(*lir);
1552 switch (mir->optimizationInfo().registerAllocator()) {
1553 case RegisterAllocator_LSRA: {
1554 #ifdef DEBUG
1555 if (!integrity.record())
1556 return nullptr;
1557 #endif
1559 LinearScanAllocator regalloc(mir, &lirgen, *lir);
1560 if (!regalloc.go())
1561 return nullptr;
1563 #ifdef DEBUG
1564 if (!integrity.check(false))
1565 return nullptr;
1566 #endif
1568 IonSpewPass("Allocate Registers [LSRA]", ®alloc);
1569 break;
1570 }
1572 case RegisterAllocator_Backtracking: {
1573 #ifdef DEBUG
1574 if (!integrity.record())
1575 return nullptr;
1576 #endif
1578 BacktrackingAllocator regalloc(mir, &lirgen, *lir);
1579 if (!regalloc.go())
1580 return nullptr;
1582 #ifdef DEBUG
1583 if (!integrity.check(false))
1584 return nullptr;
1585 #endif
1587 IonSpewPass("Allocate Registers [Backtracking]");
1588 break;
1589 }
1591 case RegisterAllocator_Stupid: {
1592 // Use the integrity checker to populate safepoint information, so
1593 // run it in all builds.
1594 if (!integrity.record())
1595 return nullptr;
1597 StupidAllocator regalloc(mir, &lirgen, *lir);
1598 if (!regalloc.go())
1599 return nullptr;
1600 if (!integrity.check(true))
1601 return nullptr;
1602 IonSpewPass("Allocate Registers [Stupid]");
1603 break;
1604 }
1606 default:
1607 MOZ_ASSUME_UNREACHABLE("Bad regalloc");
1608 }
1610 if (mir->shouldCancel("Allocate Registers"))
1611 return nullptr;
1613 // Now that all optimization and register allocation is done, re-introduce
1614 // critical edges to avoid unnecessary jumps.
1615 if (!UnsplitEdges(lir))
1616 return nullptr;
1617 IonSpewPass("Unsplit Critical Edges");
1618 AssertBasicGraphCoherency(graph);
1620 return lir;
1621 }
1623 CodeGenerator *
1624 GenerateCode(MIRGenerator *mir, LIRGraph *lir)
1625 {
1626 CodeGenerator *codegen = js_new<CodeGenerator>(mir, lir);
1627 if (!codegen)
1628 return nullptr;
1630 if (!codegen->generate()) {
1631 js_delete(codegen);
1632 return nullptr;
1633 }
1635 return codegen;
1636 }
1638 CodeGenerator *
1639 CompileBackEnd(MIRGenerator *mir)
1640 {
1641 if (!OptimizeMIR(mir))
1642 return nullptr;
1644 LIRGraph *lir = GenerateLIR(mir);
1645 if (!lir)
1646 return nullptr;
1648 return GenerateCode(mir, lir);
1649 }
1651 void
1652 AttachFinishedCompilations(JSContext *cx)
1653 {
1654 #ifdef JS_THREADSAFE
1655 JitCompartment *ion = cx->compartment()->jitCompartment();
1656 if (!ion)
1657 return;
1659 types::AutoEnterAnalysis enterTypes(cx);
1660 AutoLockWorkerThreadState lock;
1662 GlobalWorkerThreadState::IonBuilderVector &finished = WorkerThreadState().ionFinishedList();
1664 TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
1666 // Incorporate any off thread compilations for the compartment which have
1667 // finished, failed or have been cancelled.
1668 while (true) {
1669 IonBuilder *builder = nullptr;
1671 // Find a finished builder for the compartment.
1672 for (size_t i = 0; i < finished.length(); i++) {
1673 IonBuilder *testBuilder = finished[i];
1674 if (testBuilder->compartment == CompileCompartment::get(cx->compartment())) {
1675 builder = testBuilder;
1676 WorkerThreadState().remove(finished, &i);
1677 break;
1678 }
1679 }
1680 if (!builder)
1681 break;
1683 if (CodeGenerator *codegen = builder->backgroundCodegen()) {
1684 RootedScript script(cx, builder->script());
1685 IonContext ictx(cx, &builder->alloc());
1686 AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script));
1687 AutoTraceLog logLink(logger, TraceLogger::IonLinking);
1689 // Root the assembler until the builder is finished below. As it
1690 // was constructed off thread, the assembler has not been rooted
1691 // previously, though any GC activity would discard the builder.
1692 codegen->masm.constructRoot(cx);
1694 bool success;
1695 {
1696 // Release the worker thread lock and root the compiler for GC.
1697 AutoTempAllocatorRooter root(cx, &builder->alloc());
1698 AutoUnlockWorkerThreadState unlock;
1699 success = codegen->link(cx, builder->constraints());
1700 }
1702 if (!success) {
1703 // Silently ignore OOM during code generation. The caller is
1704 // InvokeInterruptCallback, which always runs at a
1705 // nondeterministic time. It's not OK to throw a catchable
1706 // exception from there.
1707 cx->clearPendingException();
1708 }
1709 }
1711 FinishOffThreadBuilder(builder);
1712 }
1713 #endif
1714 }
1716 static const size_t BUILDER_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12;
1718 static inline bool
1719 OffThreadCompilationAvailable(JSContext *cx)
1720 {
1721 #ifdef JS_THREADSAFE
1722 // Even if off thread compilation is enabled, compilation must still occur
1723 // on the main thread in some cases. Do not compile off thread during an
1724 // incremental GC, as this may trip incremental read barriers.
1725 //
1726 // Require cpuCount > 1 so that Ion compilation jobs and main-thread
1727 // execution are not competing for the same resources.
1728 //
1729 // Skip off thread compilation if PC count profiling is enabled, as
1730 // CodeGenerator::maybeCreateScriptCounts will not attach script profiles
1731 // when running off thread.
1732 return cx->runtime()->canUseParallelIonCompilation()
1733 && WorkerThreadState().cpuCount > 1
1734 && cx->runtime()->gcIncrementalState == gc::NO_INCREMENTAL
1735 && !cx->runtime()->profilingScripts;
1736 #else
1737 return false;
1738 #endif
1739 }
1741 static void
1742 TrackAllProperties(JSContext *cx, JSObject *obj)
1743 {
1744 JS_ASSERT(obj->hasSingletonType());
1746 for (Shape::Range<NoGC> range(obj->lastProperty()); !range.empty(); range.popFront())
1747 types::EnsureTrackPropertyTypes(cx, obj, range.front().propid());
1748 }
1750 static void
1751 TrackPropertiesForSingletonScopes(JSContext *cx, JSScript *script, BaselineFrame *baselineFrame)
1752 {
1753 // Ensure that all properties of singleton call objects which the script
1754 // could access are tracked. These are generally accessed through
1755 // ALIASEDVAR operations in baseline and will not be tracked even if they
1756 // have been accessed in baseline code.
1757 JSObject *environment = script->functionNonDelazifying()
1758 ? script->functionNonDelazifying()->environment()
1759 : nullptr;
1761 while (environment && !environment->is<GlobalObject>()) {
1762 if (environment->is<CallObject>() && environment->hasSingletonType())
1763 TrackAllProperties(cx, environment);
1764 environment = environment->enclosingScope();
1765 }
1767 if (baselineFrame) {
1768 JSObject *scope = baselineFrame->scopeChain();
1769 if (scope->is<CallObject>() && scope->hasSingletonType())
1770 TrackAllProperties(cx, scope);
1771 }
1772 }
1774 static AbortReason
1775 IonCompile(JSContext *cx, JSScript *script,
1776 BaselineFrame *baselineFrame, jsbytecode *osrPc, bool constructing,
1777 ExecutionMode executionMode, bool recompile,
1778 OptimizationLevel optimizationLevel)
1779 {
1780 TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
1781 AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script));
1782 AutoTraceLog logCompile(logger, TraceLogger::IonCompilation);
1784 JS_ASSERT(optimizationLevel > Optimization_DontCompile);
1786 // Make sure the script's canonical function isn't lazy. We can't de-lazify
1787 // it in a worker thread.
1788 script->ensureNonLazyCanonicalFunction(cx);
1790 TrackPropertiesForSingletonScopes(cx, script, baselineFrame);
1792 LifoAlloc *alloc = cx->new_<LifoAlloc>(BUILDER_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
1793 if (!alloc)
1794 return AbortReason_Alloc;
1796 ScopedJSDeletePtr<LifoAlloc> autoDelete(alloc);
1798 TempAllocator *temp = alloc->new_<TempAllocator>(alloc);
1799 if (!temp)
1800 return AbortReason_Alloc;
1802 IonContext ictx(cx, temp);
1804 types::AutoEnterAnalysis enter(cx);
1806 if (!cx->compartment()->ensureJitCompartmentExists(cx))
1807 return AbortReason_Alloc;
1809 if (!cx->compartment()->jitCompartment()->ensureIonStubsExist(cx))
1810 return AbortReason_Alloc;
1812 if (executionMode == ParallelExecution &&
1813 LIRGenerator::allowInlineForkJoinGetSlice() &&
1814 !cx->runtime()->jitRuntime()->ensureForkJoinGetSliceStubExists(cx))
1815 {
1816 return AbortReason_Alloc;
1817 }
1819 MIRGraph *graph = alloc->new_<MIRGraph>(temp);
1820 if (!graph)
1821 return AbortReason_Alloc;
1823 CompileInfo *info = alloc->new_<CompileInfo>(script, script->functionNonDelazifying(), osrPc,
1824 constructing, executionMode,
1825 script->needsArgsObj());
1826 if (!info)
1827 return AbortReason_Alloc;
1829 BaselineInspector *inspector = alloc->new_<BaselineInspector>(script);
1830 if (!inspector)
1831 return AbortReason_Alloc;
1833 BaselineFrameInspector *baselineFrameInspector = nullptr;
1834 if (baselineFrame) {
1835 baselineFrameInspector = NewBaselineFrameInspector(temp, baselineFrame, info);
1836 if (!baselineFrameInspector)
1837 return AbortReason_Alloc;
1838 }
1840 AutoTempAllocatorRooter root(cx, temp);
1841 types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(*temp);
1842 if (!constraints)
1843 return AbortReason_Alloc;
1845 const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(optimizationLevel);
1846 const JitCompileOptions options(cx);
1848 IonBuilder *builder = alloc->new_<IonBuilder>((JSContext *) nullptr,
1849 CompileCompartment::get(cx->compartment()),
1850 options, temp, graph, constraints,
1851 inspector, info, optimizationInfo,
1852 baselineFrameInspector);
1853 if (!builder)
1854 return AbortReason_Alloc;
1856 JS_ASSERT(recompile == HasIonScript(builder->script(), executionMode));
1857 JS_ASSERT(CanIonCompile(builder->script(), executionMode));
1859 RootedScript builderScript(cx, builder->script());
1861 if (recompile) {
1862 JS_ASSERT(executionMode == SequentialExecution);
1863 builderScript->ionScript()->setRecompiling();
1864 }
1866 IonSpewNewFunction(graph, builderScript);
1868 bool succeeded = builder->build();
1869 builder->clearForBackEnd();
1871 if (!succeeded)
1872 return builder->abortReason();
1874 // If possible, compile the script off thread.
1875 if (OffThreadCompilationAvailable(cx)) {
1876 if (!recompile)
1877 SetIonScript(builderScript, executionMode, ION_COMPILING_SCRIPT);
1879 IonSpew(IonSpew_Logs, "Can't log script %s:%d. (Compiled on background thread.)",
1880 builderScript->filename(), builderScript->lineno());
1882 if (!StartOffThreadIonCompile(cx, builder)) {
1883 IonSpew(IonSpew_Abort, "Unable to start off-thread ion compilation.");
1884 return AbortReason_Alloc;
1885 }
1887 // The allocator and associated data will be destroyed after being
1888 // processed in the finishedOffThreadCompilations list.
1889 autoDelete.forget();
1891 return AbortReason_NoAbort;
1892 }
1894 ScopedJSDeletePtr<CodeGenerator> codegen(CompileBackEnd(builder));
1895 if (!codegen) {
1896 IonSpew(IonSpew_Abort, "Failed during back-end compilation.");
1897 return AbortReason_Disable;
1898 }
1900 bool success = codegen->link(cx, builder->constraints());
1902 IonSpewEndFunction();
1904 return success ? AbortReason_NoAbort : AbortReason_Disable;
1905 }
1907 static bool
1908 CheckFrame(BaselineFrame *frame)
1909 {
1910 JS_ASSERT(!frame->isGeneratorFrame());
1911 JS_ASSERT(!frame->isDebuggerFrame());
1913 // This check is to not overrun the stack.
1914 if (frame->isFunctionFrame() && TooManyArguments(frame->numActualArgs())) {
1915 IonSpew(IonSpew_Abort, "too many actual args");
1916 return false;
1917 }
1919 return true;
1920 }
1922 static bool
1923 CheckScript(JSContext *cx, JSScript *script, bool osr)
1924 {
1925 if (script->isForEval()) {
1926 // Eval frames are not yet supported. Supporting this will require new
1927 // logic in pushBailoutFrame to deal with linking prev.
1928 // Additionally, JSOP_DEFVAR support will require baking in isEvalFrame().
1929 IonSpew(IonSpew_Abort, "eval script");
1930 return false;
1931 }
1933 if (!script->compileAndGo()) {
1934 IonSpew(IonSpew_Abort, "not compile-and-go");
1935 return false;
1936 }
1938 return true;
1939 }
1941 static MethodStatus
1942 CheckScriptSize(JSContext *cx, JSScript* script)
1943 {
1944 if (!js_JitOptions.limitScriptSize)
1945 return Method_Compiled;
1947 if (script->length() > MAX_OFF_THREAD_SCRIPT_SIZE) {
1948 // Some scripts are so large we never try to Ion compile them.
1949 IonSpew(IonSpew_Abort, "Script too large (%u bytes)", script->length());
1950 return Method_CantCompile;
1951 }
1953 uint32_t numLocalsAndArgs = analyze::TotalSlots(script);
1954 if (cx->runtime()->isWorkerRuntime()) {
1955 // DOM Workers don't have off thread compilation enabled. Since workers
1956 // don't block the browser's event loop, allow them to compile larger
1957 // scripts.
1958 JS_ASSERT(!cx->runtime()->canUseParallelIonCompilation());
1960 if (script->length() > MAX_DOM_WORKER_SCRIPT_SIZE ||
1961 numLocalsAndArgs > MAX_DOM_WORKER_LOCALS_AND_ARGS)
1962 {
1963 return Method_CantCompile;
1964 }
1966 return Method_Compiled;
1967 }
1969 if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE ||
1970 numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS)
1971 {
1972 #ifdef JS_THREADSAFE
1973 size_t cpuCount = WorkerThreadState().cpuCount;
1974 #else
1975 size_t cpuCount = 1;
1976 #endif
1977 if (cx->runtime()->canUseParallelIonCompilation() && cpuCount > 1) {
1978 // Even if off thread compilation is enabled, there are cases where
1979 // compilation must still occur on the main thread. Don't compile
1980 // in these cases (except when profiling scripts, as compilations
1981 // occurring with profiling should reflect those without), but do
1982 // not forbid compilation so that the script may be compiled later.
1983 if (!OffThreadCompilationAvailable(cx) && !cx->runtime()->profilingScripts) {
1984 IonSpew(IonSpew_Abort,
1985 "Script too large for main thread, skipping (%u bytes) (%u locals/args)",
1986 script->length(), numLocalsAndArgs);
1987 return Method_Skipped;
1988 }
1989 } else {
1990 IonSpew(IonSpew_Abort, "Script too large (%u bytes) (%u locals/args)",
1991 script->length(), numLocalsAndArgs);
1992 return Method_CantCompile;
1993 }
1994 }
1996 return Method_Compiled;
1997 }
1999 bool
2000 CanIonCompileScript(JSContext *cx, JSScript *script, bool osr)
2001 {
2002 if (!script->canIonCompile() || !CheckScript(cx, script, osr))
2003 return false;
2005 return CheckScriptSize(cx, script) == Method_Compiled;
2006 }
2008 static OptimizationLevel
2009 GetOptimizationLevel(HandleScript script, jsbytecode *pc, ExecutionMode executionMode)
2010 {
2011 if (executionMode == ParallelExecution)
2012 return Optimization_Normal;
2014 JS_ASSERT(executionMode == SequentialExecution);
2016 return js_IonOptimizations.levelForScript(script, pc);
2017 }
2019 static MethodStatus
2020 Compile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc,
2021 bool constructing, ExecutionMode executionMode)
2022 {
2023 JS_ASSERT(jit::IsIonEnabled(cx));
2024 JS_ASSERT(jit::IsBaselineEnabled(cx));
2025 JS_ASSERT_IF(osrPc != nullptr, LoopEntryCanIonOsr(osrPc));
2026 JS_ASSERT_IF(executionMode == ParallelExecution, !osrFrame && !osrPc);
2027 JS_ASSERT_IF(executionMode == ParallelExecution, !HasIonScript(script, executionMode));
2029 if (!script->hasBaselineScript())
2030 return Method_Skipped;
2032 if (cx->compartment()->debugMode()) {
2033 IonSpew(IonSpew_Abort, "debugging");
2034 return Method_CantCompile;
2035 }
2037 if (!CheckScript(cx, script, bool(osrPc))) {
2038 IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename(), script->lineno());
2039 return Method_CantCompile;
2040 }
2042 MethodStatus status = CheckScriptSize(cx, script);
2043 if (status != Method_Compiled) {
2044 IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename(), script->lineno());
2045 return status;
2046 }
2048 bool recompile = false;
2049 OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc, executionMode);
2050 if (optimizationLevel == Optimization_DontCompile)
2051 return Method_Skipped;
2053 IonScript *scriptIon = GetIonScript(script, executionMode);
2054 if (scriptIon) {
2055 if (!scriptIon->method())
2056 return Method_CantCompile;
2058 MethodStatus failedState = Method_Compiled;
2060 // If we keep failing to enter the script due to an OSR pc mismatch,
2061 // recompile with the right pc.
2062 if (osrPc && script->ionScript()->osrPc() != osrPc) {
2063 uint32_t count = script->ionScript()->incrOsrPcMismatchCounter();
2064 if (count <= js_JitOptions.osrPcMismatchesBeforeRecompile)
2065 return Method_Skipped;
2067 failedState = Method_Skipped;
2068 }
2070 // Don't recompile/overwrite higher optimized code,
2071 // with a lower optimization level.
2072 if (optimizationLevel < scriptIon->optimizationLevel())
2073 return failedState;
2075 if (optimizationLevel == scriptIon->optimizationLevel() &&
2076 (!osrPc || script->ionScript()->osrPc() == osrPc))
2077 {
2078 return failedState;
2079 }
2081 // Don't start compiling if already compiling
2082 if (scriptIon->isRecompiling())
2083 return failedState;
2085 if (osrPc)
2086 script->ionScript()->resetOsrPcMismatchCounter();
2088 recompile = true;
2089 }
2091 AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, constructing, executionMode,
2092 recompile, optimizationLevel);
2093 if (reason == AbortReason_Error)
2094 return Method_Error;
2096 if (reason == AbortReason_Disable)
2097 return Method_CantCompile;
2099 if (reason == AbortReason_Alloc) {
2100 js_ReportOutOfMemory(cx);
2101 return Method_Error;
2102 }
2104 // Compilation succeeded or we invalidated right away or an inlining/alloc abort
2105 if (HasIonScript(script, executionMode)) {
2106 if (osrPc && script->ionScript()->osrPc() != osrPc)
2107 return Method_Skipped;
2108 return Method_Compiled;
2109 }
2110 return Method_Skipped;
2111 }
2113 } // namespace jit
2114 } // namespace js
2116 // Decide if a transition from interpreter execution to Ion code should occur.
2117 // May compile or recompile the target JSScript.
2118 MethodStatus
2119 jit::CanEnterAtBranch(JSContext *cx, JSScript *script, BaselineFrame *osrFrame,
2120 jsbytecode *pc, bool isConstructing)
2121 {
2122 JS_ASSERT(jit::IsIonEnabled(cx));
2123 JS_ASSERT((JSOp)*pc == JSOP_LOOPENTRY);
2124 JS_ASSERT(LoopEntryCanIonOsr(pc));
2126 // Skip if the script has been disabled.
2127 if (!script->canIonCompile())
2128 return Method_Skipped;
2130 // Skip if the script is being compiled off thread.
2131 if (script->isIonCompilingOffThread())
2132 return Method_Skipped;
2134 // Skip if the code is expected to result in a bailout.
2135 if (script->hasIonScript() && script->ionScript()->bailoutExpected())
2136 return Method_Skipped;
2138 // Optionally ignore on user request.
2139 if (!js_JitOptions.osr)
2140 return Method_Skipped;
2142 // Mark as forbidden if frame can't be handled.
2143 if (!CheckFrame(osrFrame)) {
2144 ForbidCompilation(cx, script);
2145 return Method_CantCompile;
2146 }
2148 // Attempt compilation.
2149 // - Returns Method_Compiled if the right ionscript is present
2150 // (Meaning it was present or a sequantial compile finished)
2151 // - Returns Method_Skipped if pc doesn't match
2152 // (This means a background thread compilation with that pc could have started or not.)
2153 RootedScript rscript(cx, script);
2154 MethodStatus status = Compile(cx, rscript, osrFrame, pc, isConstructing, SequentialExecution);
2155 if (status != Method_Compiled) {
2156 if (status == Method_CantCompile)
2157 ForbidCompilation(cx, script);
2158 return status;
2159 }
2161 return Method_Compiled;
2162 }
2164 MethodStatus
2165 jit::CanEnter(JSContext *cx, RunState &state)
2166 {
2167 JS_ASSERT(jit::IsIonEnabled(cx));
2169 JSScript *script = state.script();
2171 // Skip if the script has been disabled.
2172 if (!script->canIonCompile())
2173 return Method_Skipped;
2175 // Skip if the script is being compiled off thread.
2176 if (script->isIonCompilingOffThread())
2177 return Method_Skipped;
2179 // Skip if the code is expected to result in a bailout.
2180 if (script->hasIonScript() && script->ionScript()->bailoutExpected())
2181 return Method_Skipped;
2183 // If constructing, allocate a new |this| object before building Ion.
2184 // Creating |this| is done before building Ion because it may change the
2185 // type information and invalidate compilation results.
2186 if (state.isInvoke()) {
2187 InvokeState &invoke = *state.asInvoke();
2189 if (TooManyArguments(invoke.args().length())) {
2190 IonSpew(IonSpew_Abort, "too many actual args");
2191 ForbidCompilation(cx, script);
2192 return Method_CantCompile;
2193 }
2195 if (TooManyArguments(invoke.args().callee().as<JSFunction>().nargs())) {
2196 IonSpew(IonSpew_Abort, "too many args");
2197 ForbidCompilation(cx, script);
2198 return Method_CantCompile;
2199 }
2201 if (invoke.constructing() && invoke.args().thisv().isPrimitive()) {
2202 RootedScript scriptRoot(cx, script);
2203 RootedObject callee(cx, &invoke.args().callee());
2204 RootedObject obj(cx, CreateThisForFunction(cx, callee,
2205 invoke.useNewType()
2206 ? SingletonObject
2207 : GenericObject));
2208 if (!obj || !jit::IsIonEnabled(cx)) // Note: OOM under CreateThis can disable TI.
2209 return Method_Skipped;
2210 invoke.args().setThis(ObjectValue(*obj));
2211 script = scriptRoot;
2212 }
2213 } else if (state.isGenerator()) {
2214 IonSpew(IonSpew_Abort, "generator frame");
2215 ForbidCompilation(cx, script);
2216 return Method_CantCompile;
2217 }
2219 // If --ion-eager is used, compile with Baseline first, so that we
2220 // can directly enter IonMonkey.
2221 RootedScript rscript(cx, script);
2222 if (js_JitOptions.eagerCompilation && !rscript->hasBaselineScript()) {
2223 MethodStatus status = CanEnterBaselineMethod(cx, state);
2224 if (status != Method_Compiled)
2225 return status;
2226 }
2228 // Attempt compilation. Returns Method_Compiled if already compiled.
2229 bool constructing = state.isInvoke() && state.asInvoke()->constructing();
2230 MethodStatus status =
2231 Compile(cx, rscript, nullptr, nullptr, constructing, SequentialExecution);
2232 if (status != Method_Compiled) {
2233 if (status == Method_CantCompile)
2234 ForbidCompilation(cx, rscript);
2235 return status;
2236 }
2238 return Method_Compiled;
2239 }
2241 MethodStatus
2242 jit::CompileFunctionForBaseline(JSContext *cx, HandleScript script, BaselineFrame *frame,
2243 bool isConstructing)
2244 {
2245 JS_ASSERT(jit::IsIonEnabled(cx));
2246 JS_ASSERT(frame->fun()->nonLazyScript()->canIonCompile());
2247 JS_ASSERT(!frame->fun()->nonLazyScript()->isIonCompilingOffThread());
2248 JS_ASSERT(!frame->fun()->nonLazyScript()->hasIonScript());
2249 JS_ASSERT(frame->isFunctionFrame());
2251 // Mark as forbidden if frame can't be handled.
2252 if (!CheckFrame(frame)) {
2253 ForbidCompilation(cx, script);
2254 return Method_CantCompile;
2255 }
2257 // Attempt compilation. Returns Method_Compiled if already compiled.
2258 MethodStatus status =
2259 Compile(cx, script, frame, nullptr, isConstructing, SequentialExecution);
2260 if (status != Method_Compiled) {
2261 if (status == Method_CantCompile)
2262 ForbidCompilation(cx, script);
2263 return status;
2264 }
2266 return Method_Compiled;
2267 }
2269 MethodStatus
2270 jit::Recompile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc,
2271 bool constructing)
2272 {
2273 JS_ASSERT(script->hasIonScript());
2274 if (script->ionScript()->isRecompiling())
2275 return Method_Compiled;
2277 MethodStatus status =
2278 Compile(cx, script, osrFrame, osrPc, constructing, SequentialExecution);
2279 if (status != Method_Compiled) {
2280 if (status == Method_CantCompile)
2281 ForbidCompilation(cx, script);
2282 return status;
2283 }
2285 return Method_Compiled;
2286 }
2288 MethodStatus
2289 jit::CanEnterInParallel(JSContext *cx, HandleScript script)
2290 {
2291 // Skip if the script has been disabled.
2292 //
2293 // Note: We return Method_Skipped in this case because the other
2294 // CanEnter() methods do so. However, ForkJoin.cpp detects this
2295 // condition differently treats it more like an error.
2296 if (!script->canParallelIonCompile())
2297 return Method_Skipped;
2299 // Skip if the script is being compiled off thread.
2300 if (script->isParallelIonCompilingOffThread())
2301 return Method_Skipped;
2303 MethodStatus status = Compile(cx, script, nullptr, nullptr, false, ParallelExecution);
2304 if (status != Method_Compiled) {
2305 if (status == Method_CantCompile)
2306 ForbidCompilation(cx, script, ParallelExecution);
2307 return status;
2308 }
2310 // This can GC, so afterward, script->parallelIon is
2311 // not guaranteed to be valid.
2312 if (!cx->runtime()->jitRuntime()->enterIon())
2313 return Method_Error;
2315 // Subtle: it is possible for GC to occur during
2316 // compilation of one of the invoked functions, which
2317 // would cause the earlier functions (such as the
2318 // kernel itself) to be collected. In this event, we
2319 // give up and fallback to sequential for now.
2320 if (!script->hasParallelIonScript()) {
2321 parallel::Spew(
2322 parallel::SpewCompile,
2323 "Script %p:%s:%u was garbage-collected or invalidated",
2324 script.get(), script->filename(), script->lineno());
2325 return Method_Skipped;
2326 }
2328 return Method_Compiled;
2329 }
2331 MethodStatus
2332 jit::CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs)
2333 {
2334 JS_ASSERT(jit::IsIonEnabled(cx));
2336 // Skip if the code is expected to result in a bailout.
2337 if (!script->hasIonScript() || script->ionScript()->bailoutExpected())
2338 return Method_Skipped;
2340 // Don't handle arguments underflow, to make this work we would have to pad
2341 // missing arguments with |undefined|.
2342 if (numActualArgs < script->functionNonDelazifying()->nargs())
2343 return Method_Skipped;
2345 if (!cx->compartment()->ensureJitCompartmentExists(cx))
2346 return Method_Error;
2348 // This can GC, so afterward, script->ion is not guaranteed to be valid.
2349 if (!cx->runtime()->jitRuntime()->enterIon())
2350 return Method_Error;
2352 if (!script->hasIonScript())
2353 return Method_Skipped;
2355 return Method_Compiled;
2356 }
2358 static IonExecStatus
2359 EnterIon(JSContext *cx, EnterJitData &data)
2360 {
2361 JS_CHECK_RECURSION(cx, return IonExec_Aborted);
2362 JS_ASSERT(jit::IsIonEnabled(cx));
2363 JS_ASSERT(!data.osrFrame);
2365 EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon();
2367 // Caller must construct |this| before invoking the Ion function.
2368 JS_ASSERT_IF(data.constructing, data.maxArgv[0].isObject());
2370 data.result.setInt32(data.numActualArgs);
2371 {
2372 AssertCompartmentUnchanged pcc(cx);
2373 JitActivation activation(cx, data.constructing);
2375 CALL_GENERATED_CODE(enter, data.jitcode, data.maxArgc, data.maxArgv, /* osrFrame = */nullptr, data.calleeToken,
2376 /* scopeChain = */ nullptr, 0, data.result.address());
2377 }
2379 JS_ASSERT(!cx->runtime()->hasIonReturnOverride());
2381 // Jit callers wrap primitive constructor return.
2382 if (!data.result.isMagic() && data.constructing && data.result.isPrimitive())
2383 data.result = data.maxArgv[0];
2385 // Release temporary buffer used for OSR into Ion.
2386 cx->runtime()->getJitRuntime(cx)->freeOsrTempData();
2388 JS_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR));
2389 return data.result.isMagic() ? IonExec_Error : IonExec_Ok;
2390 }
2392 bool
2393 jit::SetEnterJitData(JSContext *cx, EnterJitData &data, RunState &state, AutoValueVector &vals)
2394 {
2395 data.osrFrame = nullptr;
2397 if (state.isInvoke()) {
2398 CallArgs &args = state.asInvoke()->args();
2399 unsigned numFormals = state.script()->functionNonDelazifying()->nargs();
2400 data.constructing = state.asInvoke()->constructing();
2401 data.numActualArgs = args.length();
2402 data.maxArgc = Max(args.length(), numFormals) + 1;
2403 data.scopeChain = nullptr;
2404 data.calleeToken = CalleeToToken(&args.callee().as<JSFunction>());
2406 if (data.numActualArgs >= numFormals) {
2407 data.maxArgv = args.base() + 1;
2408 } else {
2409 // Pad missing arguments with |undefined|.
2410 for (size_t i = 1; i < args.length() + 2; i++) {
2411 if (!vals.append(args.base()[i]))
2412 return false;
2413 }
2415 while (vals.length() < numFormals + 1) {
2416 if (!vals.append(UndefinedValue()))
2417 return false;
2418 }
2420 JS_ASSERT(vals.length() >= numFormals + 1);
2421 data.maxArgv = vals.begin();
2422 }
2423 } else {
2424 data.constructing = false;
2425 data.numActualArgs = 0;
2426 data.maxArgc = 1;
2427 data.maxArgv = state.asExecute()->addressOfThisv();
2428 data.scopeChain = state.asExecute()->scopeChain();
2430 data.calleeToken = CalleeToToken(state.script());
2432 if (state.script()->isForEval() &&
2433 !(state.asExecute()->type() & InterpreterFrame::GLOBAL))
2434 {
2435 ScriptFrameIter iter(cx);
2436 if (iter.isFunctionFrame())
2437 data.calleeToken = CalleeToToken(iter.callee());
2438 }
2439 }
2441 return true;
2442 }
2444 IonExecStatus
2445 jit::IonCannon(JSContext *cx, RunState &state)
2446 {
2447 IonScript *ion = state.script()->ionScript();
2449 EnterJitData data(cx);
2450 data.jitcode = ion->method()->raw();
2452 AutoValueVector vals(cx);
2453 if (!SetEnterJitData(cx, data, state, vals))
2454 return IonExec_Error;
2456 IonExecStatus status = EnterIon(cx, data);
2458 if (status == IonExec_Ok)
2459 state.setReturnValue(data.result);
2461 return status;
2462 }
2464 IonExecStatus
2465 jit::FastInvoke(JSContext *cx, HandleFunction fun, CallArgs &args)
2466 {
2467 JS_CHECK_RECURSION(cx, return IonExec_Error);
2469 IonScript *ion = fun->nonLazyScript()->ionScript();
2470 JitCode *code = ion->method();
2471 void *jitcode = code->raw();
2473 JS_ASSERT(jit::IsIonEnabled(cx));
2474 JS_ASSERT(!ion->bailoutExpected());
2476 JitActivation activation(cx, /* firstFrameIsConstructing = */false);
2478 EnterJitCode enter = cx->runtime()->jitRuntime()->enterIon();
2479 void *calleeToken = CalleeToToken(fun);
2481 RootedValue result(cx, Int32Value(args.length()));
2482 JS_ASSERT(args.length() >= fun->nargs());
2484 CALL_GENERATED_CODE(enter, jitcode, args.length() + 1, args.array() - 1, /* osrFrame = */nullptr,
2485 calleeToken, /* scopeChain = */ nullptr, 0, result.address());
2487 JS_ASSERT(!cx->runtime()->hasIonReturnOverride());
2489 args.rval().set(result);
2491 JS_ASSERT_IF(result.isMagic(), result.isMagic(JS_ION_ERROR));
2492 return result.isMagic() ? IonExec_Error : IonExec_Ok;
2493 }
2495 static void
2496 InvalidateActivation(FreeOp *fop, uint8_t *ionTop, bool invalidateAll)
2497 {
2498 IonSpew(IonSpew_Invalidate, "BEGIN invalidating activation");
2500 size_t frameno = 1;
2502 for (JitFrameIterator it(ionTop, SequentialExecution); !it.done(); ++it, ++frameno) {
2503 JS_ASSERT_IF(frameno == 1, it.type() == JitFrame_Exit);
2505 #ifdef DEBUG
2506 switch (it.type()) {
2507 case JitFrame_Exit:
2508 IonSpew(IonSpew_Invalidate, "#%d exit frame @ %p", frameno, it.fp());
2509 break;
2510 case JitFrame_BaselineJS:
2511 case JitFrame_IonJS:
2512 {
2513 JS_ASSERT(it.isScripted());
2514 const char *type = it.isIonJS() ? "Optimized" : "Baseline";
2515 IonSpew(IonSpew_Invalidate, "#%d %s JS frame @ %p, %s:%d (fun: %p, script: %p, pc %p)",
2516 frameno, type, it.fp(), it.script()->filename(), it.script()->lineno(),
2517 it.maybeCallee(), (JSScript *)it.script(), it.returnAddressToFp());
2518 break;
2519 }
2520 case JitFrame_BaselineStub:
2521 IonSpew(IonSpew_Invalidate, "#%d baseline stub frame @ %p", frameno, it.fp());
2522 break;
2523 case JitFrame_Rectifier:
2524 IonSpew(IonSpew_Invalidate, "#%d rectifier frame @ %p", frameno, it.fp());
2525 break;
2526 case JitFrame_Unwound_IonJS:
2527 case JitFrame_Unwound_BaselineStub:
2528 MOZ_ASSUME_UNREACHABLE("invalid");
2529 case JitFrame_Unwound_Rectifier:
2530 IonSpew(IonSpew_Invalidate, "#%d unwound rectifier frame @ %p", frameno, it.fp());
2531 break;
2532 case JitFrame_Entry:
2533 IonSpew(IonSpew_Invalidate, "#%d entry frame @ %p", frameno, it.fp());
2534 break;
2535 }
2536 #endif
2538 if (!it.isIonJS())
2539 continue;
2541 // See if the frame has already been invalidated.
2542 if (it.checkInvalidation())
2543 continue;
2545 JSScript *script = it.script();
2546 if (!script->hasIonScript())
2547 continue;
2549 if (!invalidateAll && !script->ionScript()->invalidated())
2550 continue;
2552 IonScript *ionScript = script->ionScript();
2554 // Purge ICs before we mark this script as invalidated. This will
2555 // prevent lastJump_ from appearing to be a bogus pointer, just
2556 // in case anyone tries to read it.
2557 ionScript->purgeCaches();
2559 // Clean up any pointers from elsewhere in the runtime to this IonScript
2560 // which is about to become disconnected from its JSScript.
2561 ionScript->unlinkFromRuntime(fop);
2563 // This frame needs to be invalidated. We do the following:
2564 //
2565 // 1. Increment the reference counter to keep the ionScript alive
2566 // for the invalidation bailout or for the exception handler.
2567 // 2. Determine safepoint that corresponds to the current call.
2568 // 3. From safepoint, get distance to the OSI-patchable offset.
2569 // 4. From the IonScript, determine the distance between the
2570 // call-patchable offset and the invalidation epilogue.
2571 // 5. Patch the OSI point with a call-relative to the
2572 // invalidation epilogue.
2573 //
2574 // The code generator ensures that there's enough space for us
2575 // to patch in a call-relative operation at each invalidation
2576 // point.
2577 //
2578 // Note: you can't simplify this mechanism to "just patch the
2579 // instruction immediately after the call" because things may
2580 // need to move into a well-defined register state (using move
2581 // instructions after the call) in to capture an appropriate
2582 // snapshot after the call occurs.
2584 ionScript->incref();
2586 const SafepointIndex *si = ionScript->getSafepointIndex(it.returnAddressToFp());
2587 JitCode *ionCode = ionScript->method();
2589 JS::Zone *zone = script->zone();
2590 if (zone->needsBarrier()) {
2591 // We're about to remove edges from the JSScript to gcthings
2592 // embedded in the JitCode. Perform one final trace of the
2593 // JitCode for the incremental GC, as it must know about
2594 // those edges.
2595 ionCode->trace(zone->barrierTracer());
2596 }
2597 ionCode->setInvalidated();
2599 // Write the delta (from the return address offset to the
2600 // IonScript pointer embedded into the invalidation epilogue)
2601 // where the safepointed call instruction used to be. We rely on
2602 // the call sequence causing the safepoint being >= the size of
2603 // a uint32, which is checked during safepoint index
2604 // construction.
2605 CodeLocationLabel dataLabelToMunge(it.returnAddressToFp());
2606 ptrdiff_t delta = ionScript->invalidateEpilogueDataOffset() -
2607 (it.returnAddressToFp() - ionCode->raw());
2608 Assembler::patchWrite_Imm32(dataLabelToMunge, Imm32(delta));
2610 CodeLocationLabel osiPatchPoint = SafepointReader::InvalidationPatchPoint(ionScript, si);
2611 CodeLocationLabel invalidateEpilogue(ionCode, ionScript->invalidateEpilogueOffset());
2613 IonSpew(IonSpew_Invalidate, " ! Invalidate ionScript %p (ref %u) -> patching osipoint %p",
2614 ionScript, ionScript->refcount(), (void *) osiPatchPoint.raw());
2615 Assembler::patchWrite_NearCall(osiPatchPoint, invalidateEpilogue);
2616 }
2618 IonSpew(IonSpew_Invalidate, "END invalidating activation");
2619 }
2621 void
2622 jit::StopAllOffThreadCompilations(JSCompartment *comp)
2623 {
2624 if (!comp->jitCompartment())
2625 return;
2626 CancelOffThreadIonCompile(comp, nullptr);
2627 FinishAllOffThreadCompilations(comp);
2628 }
2630 void
2631 jit::InvalidateAll(FreeOp *fop, Zone *zone)
2632 {
2633 for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
2634 StopAllOffThreadCompilations(comp);
2636 for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter) {
2637 if (iter->compartment()->zone() == zone) {
2638 IonSpew(IonSpew_Invalidate, "Invalidating all frames for GC");
2639 InvalidateActivation(fop, iter.jitTop(), true);
2640 }
2641 }
2642 }
2645 void
2646 jit::Invalidate(types::TypeZone &types, FreeOp *fop,
2647 const Vector<types::RecompileInfo> &invalid, bool resetUses,
2648 bool cancelOffThread)
2649 {
2650 IonSpew(IonSpew_Invalidate, "Start invalidation.");
2652 // Add an invalidation reference to all invalidated IonScripts to indicate
2653 // to the traversal which frames have been invalidated.
2654 size_t numInvalidations = 0;
2655 for (size_t i = 0; i < invalid.length(); i++) {
2656 const types::CompilerOutput &co = *invalid[i].compilerOutput(types);
2657 if (!co.isValid())
2658 continue;
2660 if (cancelOffThread)
2661 CancelOffThreadIonCompile(co.script()->compartment(), co.script());
2663 if (!co.ion())
2664 continue;
2666 IonSpew(IonSpew_Invalidate, " Invalidate %s:%u, IonScript %p",
2667 co.script()->filename(), co.script()->lineno(), co.ion());
2669 // Keep the ion script alive during the invalidation and flag this
2670 // ionScript as being invalidated. This increment is removed by the
2671 // loop after the calls to InvalidateActivation.
2672 co.ion()->incref();
2673 numInvalidations++;
2674 }
2676 if (!numInvalidations) {
2677 IonSpew(IonSpew_Invalidate, " No IonScript invalidation.");
2678 return;
2679 }
2681 for (JitActivationIterator iter(fop->runtime()); !iter.done(); ++iter)
2682 InvalidateActivation(fop, iter.jitTop(), false);
2684 // Drop the references added above. If a script was never active, its
2685 // IonScript will be immediately destroyed. Otherwise, it will be held live
2686 // until its last invalidated frame is destroyed.
2687 for (size_t i = 0; i < invalid.length(); i++) {
2688 types::CompilerOutput &co = *invalid[i].compilerOutput(types);
2689 if (!co.isValid())
2690 continue;
2692 ExecutionMode executionMode = co.mode();
2693 JSScript *script = co.script();
2694 IonScript *ionScript = co.ion();
2695 if (!ionScript)
2696 continue;
2698 SetIonScript(script, executionMode, nullptr);
2699 ionScript->decref(fop);
2700 co.invalidate();
2701 numInvalidations--;
2703 // Wait for the scripts to get warm again before doing another
2704 // compile, unless either:
2705 // (1) we are recompiling *because* a script got hot;
2706 // (resetUses is false); or,
2707 // (2) we are invalidating a parallel script. This is because
2708 // the useCount only applies to sequential uses. Parallel
2709 // execution *requires* ion, and so we don't limit it to
2710 // methods with a high usage count (though we do check that
2711 // the useCount is at least 1 when compiling the transitive
2712 // closure of potential callees, to avoid compiling things
2713 // that are never run at all).
2714 if (resetUses && executionMode != ParallelExecution)
2715 script->resetUseCount();
2716 }
2718 // Make sure we didn't leak references by invalidating the same IonScript
2719 // multiple times in the above loop.
2720 JS_ASSERT(!numInvalidations);
2721 }
2723 void
2724 jit::Invalidate(JSContext *cx, const Vector<types::RecompileInfo> &invalid, bool resetUses,
2725 bool cancelOffThread)
2726 {
2727 jit::Invalidate(cx->zone()->types, cx->runtime()->defaultFreeOp(), invalid, resetUses,
2728 cancelOffThread);
2729 }
2731 bool
2732 jit::Invalidate(JSContext *cx, JSScript *script, ExecutionMode mode, bool resetUses,
2733 bool cancelOffThread)
2734 {
2735 JS_ASSERT(script->hasIonScript());
2737 if (cx->runtime()->spsProfiler.enabled()) {
2738 // Register invalidation with profiler.
2739 // Format of event payload string:
2740 // "<filename>:<lineno>"
2742 // Get the script filename, if any, and its length.
2743 const char *filename = script->filename();
2744 if (filename == nullptr)
2745 filename = "<unknown>";
2747 size_t len = strlen(filename) + 20;
2748 char *buf = js_pod_malloc<char>(len);
2749 if (!buf)
2750 return false;
2752 // Construct the descriptive string.
2753 JS_snprintf(buf, len, "Invalidate %s:%llu", filename, script->lineno());
2754 cx->runtime()->spsProfiler.markEvent(buf);
2755 js_free(buf);
2756 }
2758 Vector<types::RecompileInfo> scripts(cx);
2760 switch (mode) {
2761 case SequentialExecution:
2762 JS_ASSERT(script->hasIonScript());
2763 if (!scripts.append(script->ionScript()->recompileInfo()))
2764 return false;
2765 break;
2766 case ParallelExecution:
2767 JS_ASSERT(script->hasParallelIonScript());
2768 if (!scripts.append(script->parallelIonScript()->recompileInfo()))
2769 return false;
2770 break;
2771 default:
2772 MOZ_ASSUME_UNREACHABLE("No such execution mode");
2773 }
2775 Invalidate(cx, scripts, resetUses, cancelOffThread);
2776 return true;
2777 }
2779 bool
2780 jit::Invalidate(JSContext *cx, JSScript *script, bool resetUses, bool cancelOffThread)
2781 {
2782 return Invalidate(cx, script, SequentialExecution, resetUses, cancelOffThread);
2783 }
2785 static void
2786 FinishInvalidationOf(FreeOp *fop, JSScript *script, IonScript *ionScript)
2787 {
2788 types::TypeZone &types = script->zone()->types;
2790 // Note: If the script is about to be swept, the compiler output may have
2791 // already been destroyed.
2792 if (types::CompilerOutput *output = ionScript->recompileInfo().compilerOutput(types))
2793 output->invalidate();
2795 // If this script has Ion code on the stack, invalidated() will return
2796 // true. In this case we have to wait until destroying it.
2797 if (!ionScript->invalidated())
2798 jit::IonScript::Destroy(fop, ionScript);
2799 }
2801 template <ExecutionMode mode>
2802 void
2803 jit::FinishInvalidation(FreeOp *fop, JSScript *script)
2804 {
2805 // In all cases, nullptr out script->ion or script->parallelIon to avoid
2806 // re-entry.
2807 switch (mode) {
2808 case SequentialExecution:
2809 if (script->hasIonScript()) {
2810 IonScript *ion = script->ionScript();
2811 script->setIonScript(nullptr);
2812 FinishInvalidationOf(fop, script, ion);
2813 }
2814 return;
2816 case ParallelExecution:
2817 if (script->hasParallelIonScript()) {
2818 IonScript *parallelIon = script->parallelIonScript();
2819 script->setParallelIonScript(nullptr);
2820 FinishInvalidationOf(fop, script, parallelIon);
2821 }
2822 return;
2824 default:
2825 MOZ_ASSUME_UNREACHABLE("bad execution mode");
2826 }
2827 }
2829 template void
2830 jit::FinishInvalidation<SequentialExecution>(FreeOp *fop, JSScript *script);
2832 template void
2833 jit::FinishInvalidation<ParallelExecution>(FreeOp *fop, JSScript *script);
2835 void
2836 jit::MarkValueFromIon(JSRuntime *rt, Value *vp)
2837 {
2838 gc::MarkValueUnbarriered(&rt->gcMarker, vp, "write barrier");
2839 }
2841 void
2842 jit::MarkShapeFromIon(JSRuntime *rt, Shape **shapep)
2843 {
2844 gc::MarkShapeUnbarriered(&rt->gcMarker, shapep, "write barrier");
2845 }
2847 void
2848 jit::ForbidCompilation(JSContext *cx, JSScript *script)
2849 {
2850 ForbidCompilation(cx, script, SequentialExecution);
2851 }
2853 void
2854 jit::ForbidCompilation(JSContext *cx, JSScript *script, ExecutionMode mode)
2855 {
2856 IonSpew(IonSpew_Abort, "Disabling Ion mode %d compilation of script %s:%d",
2857 mode, script->filename(), script->lineno());
2859 CancelOffThreadIonCompile(cx->compartment(), script);
2861 switch (mode) {
2862 case SequentialExecution:
2863 if (script->hasIonScript()) {
2864 // It is only safe to modify script->ion if the script is not currently
2865 // running, because JitFrameIterator needs to tell what ionScript to
2866 // use (either the one on the JSScript, or the one hidden in the
2867 // breadcrumbs Invalidation() leaves). Therefore, if invalidation
2868 // fails, we cannot disable the script.
2869 if (!Invalidate(cx, script, mode, false))
2870 return;
2871 }
2873 script->setIonScript(ION_DISABLED_SCRIPT);
2874 return;
2876 case ParallelExecution:
2877 if (script->hasParallelIonScript()) {
2878 if (!Invalidate(cx, script, mode, false))
2879 return;
2880 }
2882 script->setParallelIonScript(ION_DISABLED_SCRIPT);
2883 return;
2885 default:
2886 MOZ_ASSUME_UNREACHABLE("No such execution mode");
2887 }
2889 MOZ_ASSUME_UNREACHABLE("No such execution mode");
2890 }
2892 AutoFlushICache *
2893 PerThreadData::autoFlushICache() const
2894 {
2895 return autoFlushICache_;
2896 }
2898 void
2899 PerThreadData::setAutoFlushICache(AutoFlushICache *afc)
2900 {
2901 autoFlushICache_ = afc;
2902 }
2904 // Set the range for the merging of flushes. The flushing is deferred until the end of
2905 // the AutoFlushICache context. Subsequent flushing within this range will is also
2906 // deferred. This is only expected to be defined once for each AutoFlushICache
2907 // context. It assumes the range will be flushed is required to be within an
2908 // AutoFlushICache context.
2909 void
2910 AutoFlushICache::setRange(uintptr_t start, size_t len)
2911 {
2912 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
2913 AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
2914 JS_ASSERT(afc);
2915 JS_ASSERT(!afc->start_);
2916 IonSpewCont(IonSpew_CacheFlush, "(%x %x):", start, len);
2918 uintptr_t stop = start + len;
2919 afc->start_ = start;
2920 afc->stop_ = stop;
2921 #endif
2922 }
2924 // Flush the instruction cache.
2925 //
2926 // If called within a dynamic AutoFlushICache context and if the range is already pending
2927 // flushing for this AutoFlushICache context then the request is ignored with the
2928 // understanding that it will be flushed on exit from the AutoFlushICache context.
2929 // Otherwise the range is flushed immediately.
2930 //
2931 // Updates outside the current code object are typically the exception so they are flushed
2932 // immediately rather than attempting to merge them.
2933 //
2934 // For efficiency it is expected that all large ranges will be flushed within an
2935 // AutoFlushICache, so check. If this assertion is hit then it does not necessarily
2936 // indicate a progam fault but it might indicate a lost opportunity to merge cache
2937 // flushing. It can be corrected by wrapping the call in an AutoFlushICache to context.
2938 void
2939 AutoFlushICache::flush(uintptr_t start, size_t len)
2940 {
2941 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
2942 AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
2943 if (!afc) {
2944 IonSpewCont(IonSpew_CacheFlush, "#");
2945 JSC::ExecutableAllocator::cacheFlush((void*)start, len);
2946 JS_ASSERT(len <= 16);
2947 return;
2948 }
2950 uintptr_t stop = start + len;
2951 if (start >= afc->start_ && stop <= afc->stop_) {
2952 // Update is within the pending flush range, so defer to the end of the context.
2953 IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "-" : "=");
2954 return;
2955 }
2957 IonSpewCont(IonSpew_CacheFlush, afc->inhibit_ ? "x" : "*");
2958 JSC::ExecutableAllocator::cacheFlush((void *)start, len);
2959 #endif
2960 }
2962 // Flag the current dynamic AutoFlushICache as inhibiting flushing. Useful in error paths
2963 // where the changes are being abandoned.
2964 void
2965 AutoFlushICache::setInhibit()
2966 {
2967 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
2968 AutoFlushICache *afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
2969 JS_ASSERT(afc);
2970 JS_ASSERT(afc->start_);
2971 IonSpewCont(IonSpew_CacheFlush, "I");
2972 afc->inhibit_ = true;
2973 #endif
2974 }
2976 // The common use case is merging cache flushes when preparing a code object. In this
2977 // case the entire range of the code object is being flushed and as the code is patched
2978 // smaller redundant flushes could occur. The design allows an AutoFlushICache dynamic
2979 // thread local context to be declared in which the range of the code object can be set
2980 // which defers flushing until the end of this dynamic context. The redundant flushing
2981 // within this code range is also deferred avoiding redundant flushing. Flushing outside
2982 // this code range is not affected and proceeds immediately.
2983 //
2984 // In some cases flushing is not necessary, such as when compiling an asm.js module which
2985 // is flushed again when dynamically linked, and also in error paths that abandon the
2986 // code. Flushing within the set code range can be inhibited within the AutoFlushICache
2987 // dynamic context by setting an inhibit flag.
2988 //
2989 // The JS compiler can be re-entered while within an AutoFlushICache dynamic context and
2990 // it is assumed that code being assembled or patched is not executed before the exit of
2991 // the respective AutoFlushICache dynamic context.
2992 //
2993 AutoFlushICache::AutoFlushICache(const char *nonce, bool inhibit)
2994 : start_(0),
2995 stop_(0),
2996 name_(nonce),
2997 inhibit_(inhibit)
2998 {
2999 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
3000 PerThreadData *pt = TlsPerThreadData.get();
3001 AutoFlushICache *afc = pt->PerThreadData::autoFlushICache();
3002 if (afc)
3003 IonSpew(IonSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, inhibit ? " I" : "");
3004 else
3005 IonSpewCont(IonSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : "");
3007 prev_ = afc;
3008 pt->PerThreadData::setAutoFlushICache(this);
3009 #endif
3010 }
3012 AutoFlushICache::~AutoFlushICache()
3013 {
3014 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
3015 PerThreadData *pt = TlsPerThreadData.get();
3016 JS_ASSERT(pt->PerThreadData::autoFlushICache() == this);
3018 if (!inhibit_ && start_)
3019 JSC::ExecutableAllocator::cacheFlush((void *)start_, size_t(stop_ - start_));
3021 IonSpewCont(IonSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U");
3022 IonSpewFin(IonSpew_CacheFlush);
3023 pt->PerThreadData::setAutoFlushICache(prev_);
3024 #endif
3025 }
3027 void
3028 jit::PurgeCaches(JSScript *script)
3029 {
3030 if (script->hasIonScript())
3031 script->ionScript()->purgeCaches();
3033 if (script->hasParallelIonScript())
3034 script->parallelIonScript()->purgeCaches();
3035 }
3037 size_t
3038 jit::SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf)
3039 {
3040 size_t result = 0;
3042 if (script->hasIonScript())
3043 result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf);
3045 if (script->hasParallelIonScript())
3046 result += script->parallelIonScript()->sizeOfIncludingThis(mallocSizeOf);
3048 return result;
3049 }
3051 void
3052 jit::DestroyIonScripts(FreeOp *fop, JSScript *script)
3053 {
3054 if (script->hasIonScript())
3055 jit::IonScript::Destroy(fop, script->ionScript());
3057 if (script->hasParallelIonScript())
3058 jit::IonScript::Destroy(fop, script->parallelIonScript());
3060 if (script->hasBaselineScript())
3061 jit::BaselineScript::Destroy(fop, script->baselineScript());
3062 }
3064 void
3065 jit::TraceIonScripts(JSTracer* trc, JSScript *script)
3066 {
3067 if (script->hasIonScript())
3068 jit::IonScript::Trace(trc, script->ionScript());
3070 if (script->hasParallelIonScript())
3071 jit::IonScript::Trace(trc, script->parallelIonScript());
3073 if (script->hasBaselineScript())
3074 jit::BaselineScript::Trace(trc, script->baselineScript());
3075 }
3077 bool
3078 jit::RematerializeAllFrames(JSContext *cx, JSCompartment *comp)
3079 {
3080 for (JitActivationIterator iter(comp->runtimeFromMainThread()); !iter.done(); ++iter) {
3081 if (iter.activation()->compartment() == comp) {
3082 for (JitFrameIterator frameIter(iter); !frameIter.done(); ++frameIter) {
3083 if (!frameIter.isIonJS())
3084 continue;
3085 if (!iter.activation()->asJit()->getRematerializedFrame(cx, frameIter))
3086 return false;
3087 }
3088 }
3089 }
3090 return true;
3091 }
3093 bool
3094 jit::UpdateForDebugMode(JSContext *maybecx, JSCompartment *comp,
3095 AutoDebugModeInvalidation &invalidate)
3096 {
3097 MOZ_ASSERT(invalidate.isFor(comp));
3099 // Schedule invalidation of all optimized JIT code since debug mode
3100 // invalidates assumptions.
3101 invalidate.scheduleInvalidation(comp->debugMode());
3103 // Recompile on-stack baseline scripts if we have a cx.
3104 if (maybecx) {
3105 IonContext ictx(maybecx, nullptr);
3106 if (!RecompileOnStackBaselineScriptsForDebugMode(maybecx, comp)) {
3107 js_ReportOutOfMemory(maybecx);
3108 return false;
3109 }
3110 }
3112 return true;
3113 }
3115 AutoDebugModeInvalidation::~AutoDebugModeInvalidation()
3116 {
3117 MOZ_ASSERT(!!comp_ != !!zone_);
3119 if (needInvalidation_ == NoNeed)
3120 return;
3122 Zone *zone = zone_ ? zone_ : comp_->zone();
3123 JSRuntime *rt = zone->runtimeFromMainThread();
3124 FreeOp *fop = rt->defaultFreeOp();
3126 if (comp_) {
3127 StopAllOffThreadCompilations(comp_);
3128 } else {
3129 for (CompartmentsInZoneIter comp(zone_); !comp.done(); comp.next())
3130 StopAllOffThreadCompilations(comp);
3131 }
3133 // Don't discard active baseline scripts. They are recompiled for debug
3134 // mode.
3135 jit::MarkActiveBaselineScripts(zone);
3137 for (JitActivationIterator iter(rt); !iter.done(); ++iter) {
3138 JSCompartment *comp = iter->compartment();
3139 if (comp_ == comp || zone_ == comp->zone()) {
3140 IonContext ictx(CompileRuntime::get(rt));
3141 IonSpew(IonSpew_Invalidate, "Invalidating frames for debug mode toggle");
3142 InvalidateActivation(fop, iter.jitTop(), true);
3143 }
3144 }
3146 for (gc::CellIter i(zone, gc::FINALIZE_SCRIPT); !i.done(); i.next()) {
3147 JSScript *script = i.get<JSScript>();
3148 if (script->compartment() == comp_ || zone_) {
3149 FinishInvalidation<SequentialExecution>(fop, script);
3150 FinishInvalidation<ParallelExecution>(fop, script);
3151 FinishDiscardBaselineScript(fop, script);
3152 script->resetUseCount();
3153 } else if (script->hasBaselineScript()) {
3154 script->baselineScript()->resetActive();
3155 }
3156 }
3157 }