Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/IonAnalysis.h"
9 #include "jsanalyze.h"
11 #include "jit/BaselineInspector.h"
12 #include "jit/BaselineJIT.h"
13 #include "jit/Ion.h"
14 #include "jit/IonBuilder.h"
15 #include "jit/IonOptimizationLevels.h"
16 #include "jit/LIR.h"
17 #include "jit/Lowering.h"
18 #include "jit/MIRGraph.h"
20 #include "jsinferinlines.h"
21 #include "jsobjinlines.h"
22 #include "jsopcodeinlines.h"
24 using namespace js;
25 using namespace js::jit;
27 using mozilla::DebugOnly;
29 // A critical edge is an edge which is neither its successor's only predecessor
30 // nor its predecessor's only successor. Critical edges must be split to
31 // prevent copy-insertion and code motion from affecting other edges.
32 bool
33 jit::SplitCriticalEdges(MIRGraph &graph)
34 {
35 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
36 if (block->numSuccessors() < 2)
37 continue;
38 for (size_t i = 0; i < block->numSuccessors(); i++) {
39 MBasicBlock *target = block->getSuccessor(i);
40 if (target->numPredecessors() < 2)
41 continue;
43 // Create a new block inheriting from the predecessor.
44 MBasicBlock *split = MBasicBlock::NewSplitEdge(graph, block->info(), *block);
45 if (!split)
46 return false;
47 split->setLoopDepth(block->loopDepth());
48 graph.insertBlockAfter(*block, split);
49 split->end(MGoto::New(graph.alloc(), target));
51 block->replaceSuccessor(i, split);
52 target->replacePredecessor(*block, split);
53 }
54 }
55 return true;
56 }
58 // Operands to a resume point which are dead at the point of the resume can be
59 // replaced with undefined values. This analysis supports limited detection of
60 // dead operands, pruning those which are defined in the resume point's basic
61 // block and have no uses outside the block or at points later than the resume
62 // point.
63 //
64 // This is intended to ensure that extra resume points within a basic block
65 // will not artificially extend the lifetimes of any SSA values. This could
66 // otherwise occur if the new resume point captured a value which is created
67 // between the old and new resume point and is dead at the new resume point.
68 bool
69 jit::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
70 {
71 // If we are compiling try blocks, locals and arguments may be observable
72 // from catch or finally blocks (which Ion does not compile). For now just
73 // disable the pass in this case.
74 if (graph.hasTryBlock())
75 return true;
77 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
78 if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
79 return false;
81 // The logic below can get confused on infinite loops.
82 if (block->isLoopHeader() && block->backedge() == *block)
83 continue;
85 for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
86 // No benefit to replacing constant operands with other constants.
87 if (ins->isConstant())
88 continue;
90 // Scanning uses does not give us sufficient information to tell
91 // where instructions that are involved in box/unbox operations or
92 // parameter passing might be live. Rewriting uses of these terms
93 // in resume points may affect the interpreter's behavior. Rather
94 // than doing a more sophisticated analysis, just ignore these.
95 if (ins->isUnbox() || ins->isParameter() || ins->isTypeBarrier() || ins->isComputeThis())
96 continue;
98 // If the instruction's behavior has been constant folded into a
99 // separate instruction, we can't determine precisely where the
100 // instruction becomes dead and can't eliminate its uses.
101 if (ins->isImplicitlyUsed())
102 continue;
104 // Check if this instruction's result is only used within the
105 // current block, and keep track of its last use in a definition
106 // (not resume point). This requires the instructions in the block
107 // to be numbered, ensured by running this immediately after alias
108 // analysis.
109 uint32_t maxDefinition = 0;
110 for (MUseDefIterator uses(*ins); uses; uses++) {
111 if (uses.def()->block() != *block ||
112 uses.def()->isBox() ||
113 uses.def()->isPhi())
114 {
115 maxDefinition = UINT32_MAX;
116 break;
117 }
118 maxDefinition = Max(maxDefinition, uses.def()->id());
119 }
120 if (maxDefinition == UINT32_MAX)
121 continue;
123 // Walk the uses a second time, removing any in resume points after
124 // the last use in a definition.
125 for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
126 if (uses->consumer()->isDefinition()) {
127 uses++;
128 continue;
129 }
130 MResumePoint *mrp = uses->consumer()->toResumePoint();
131 if (mrp->block() != *block ||
132 !mrp->instruction() ||
133 mrp->instruction() == *ins ||
134 mrp->instruction()->id() <= maxDefinition)
135 {
136 uses++;
137 continue;
138 }
140 // The operand is an uneliminable slot. This currently
141 // includes argument slots in non-strict scripts (due to being
142 // observable via Function.arguments).
143 if (!block->info().canOptimizeOutSlot(uses->index())) {
144 uses++;
145 continue;
146 }
148 // Store an optimized out magic value in place of all dead
149 // resume point operands. Making any such substitution can in
150 // general alter the interpreter's behavior, even though the
151 // code is dead, as the interpreter will still execute opcodes
152 // whose effects cannot be observed. If the undefined value
153 // were to flow to, say, a dead property access the
154 // interpreter could throw an exception; we avoid this problem
155 // by removing dead operands before removing dead code.
156 MConstant *constant = MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
157 block->insertBefore(*(block->begin()), constant);
158 uses = mrp->replaceOperand(uses, constant);
159 }
160 }
161 }
163 return true;
164 }
166 // Instructions are useless if they are unused and have no side effects.
167 // This pass eliminates useless instructions.
168 // The graph itself is unchanged.
169 bool
170 jit::EliminateDeadCode(MIRGenerator *mir, MIRGraph &graph)
171 {
172 // Traverse in postorder so that we hit uses before definitions.
173 // Traverse instruction list backwards for the same reason.
174 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
175 if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
176 return false;
178 // Remove unused instructions.
179 for (MInstructionReverseIterator inst = block->rbegin(); inst != block->rend(); ) {
180 if (!inst->isEffectful() && !inst->resumePoint() &&
181 !inst->hasUses() && !inst->isGuard() &&
182 !inst->isControlInstruction()) {
183 inst = block->discardAt(inst);
184 } else {
185 inst++;
186 }
187 }
188 }
190 return true;
191 }
193 static inline bool
194 IsPhiObservable(MPhi *phi, Observability observe)
195 {
196 // If the phi has uses which are not reflected in SSA, then behavior in the
197 // interpreter may be affected by removing the phi.
198 if (phi->isImplicitlyUsed())
199 return true;
201 // Check for uses of this phi node outside of other phi nodes.
202 // Note that, initially, we skip reading resume points, which we
203 // don't count as actual uses. If the only uses are resume points,
204 // then the SSA name is never consumed by the program. However,
205 // after optimizations have been performed, it's possible that the
206 // actual uses in the program have been (incorrectly) optimized
207 // away, so we must be more conservative and consider resume
208 // points as well.
209 switch (observe) {
210 case AggressiveObservability:
211 for (MUseDefIterator iter(phi); iter; iter++) {
212 if (!iter.def()->isPhi())
213 return true;
214 }
215 break;
217 case ConservativeObservability:
218 for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
219 if (!iter->consumer()->isDefinition() ||
220 !iter->consumer()->toDefinition()->isPhi())
221 return true;
222 }
223 break;
224 }
226 uint32_t slot = phi->slot();
227 CompileInfo &info = phi->block()->info();
228 JSFunction *fun = info.funMaybeLazy();
230 // If the Phi is of the |this| value, it must always be observable.
231 if (fun && slot == info.thisSlot())
232 return true;
234 // If the function may need an arguments object, then make sure to
235 // preserve the scope chain, because it may be needed to construct the
236 // arguments object during bailout. If we've already created an arguments
237 // object (or got one via OSR), preserve that as well.
238 if (fun && info.hasArguments() &&
239 (slot == info.scopeChainSlot() || slot == info.argsObjSlot()))
240 {
241 return true;
242 }
244 // The Phi is an uneliminable slot. Currently this includes argument slots
245 // in non-strict scripts (due to being observable via Function.arguments).
246 if (fun && !info.canOptimizeOutSlot(slot))
247 return true;
249 return false;
250 }
252 // Handles cases like:
253 // x is phi(a, x) --> a
254 // x is phi(a, a) --> a
255 static inline MDefinition *
256 IsPhiRedundant(MPhi *phi)
257 {
258 MDefinition *first = phi->operandIfRedundant();
259 if (first == nullptr)
260 return nullptr;
262 // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
263 if (phi->isImplicitlyUsed())
264 first->setImplicitlyUsedUnchecked();
266 return first;
267 }
269 bool
270 jit::EliminatePhis(MIRGenerator *mir, MIRGraph &graph,
271 Observability observe)
272 {
273 // Eliminates redundant or unobservable phis from the graph. A
274 // redundant phi is something like b = phi(a, a) or b = phi(a, b),
275 // both of which can be replaced with a. An unobservable phi is
276 // one that whose value is never used in the program.
277 //
278 // Note that we must be careful not to eliminate phis representing
279 // values that the interpreter will require later. When the graph
280 // is first constructed, we can be more aggressive, because there
281 // is a greater correspondence between the CFG and the bytecode.
282 // After optimizations such as GVN have been performed, however,
283 // the bytecode and CFG may not correspond as closely to one
284 // another. In that case, we must be more conservative. The flag
285 // |conservativeObservability| is used to indicate that eliminate
286 // phis is being run after some optimizations have been performed,
287 // and thus we should use more conservative rules about
288 // observability. The particular danger is that we can optimize
289 // away uses of a phi because we think they are not executable,
290 // but the foundation for that assumption is false TI information
291 // that will eventually be invalidated. Therefore, if
292 // |conservativeObservability| is set, we will consider any use
293 // from a resume point to be observable. Otherwise, we demand a
294 // use from an actual instruction.
296 Vector<MPhi *, 16, SystemAllocPolicy> worklist;
298 // Add all observable phis to a worklist. We use the "in worklist" bit to
299 // mean "this phi is live".
300 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
301 if (mir->shouldCancel("Eliminate Phis (populate loop)"))
302 return false;
304 MPhiIterator iter = block->phisBegin();
305 while (iter != block->phisEnd()) {
306 // Flag all as unused, only observable phis would be marked as used
307 // when processed by the work list.
308 iter->setUnused();
310 // If the phi is redundant, remove it here.
311 if (MDefinition *redundant = IsPhiRedundant(*iter)) {
312 iter->replaceAllUsesWith(redundant);
313 iter = block->discardPhiAt(iter);
314 continue;
315 }
317 // Enqueue observable Phis.
318 if (IsPhiObservable(*iter, observe)) {
319 iter->setInWorklist();
320 if (!worklist.append(*iter))
321 return false;
322 }
323 iter++;
324 }
325 }
327 // Iteratively mark all phis reachable from live phis.
328 while (!worklist.empty()) {
329 if (mir->shouldCancel("Eliminate Phis (worklist)"))
330 return false;
332 MPhi *phi = worklist.popCopy();
333 JS_ASSERT(phi->isUnused());
334 phi->setNotInWorklist();
336 // The removal of Phis can produce newly redundant phis.
337 if (MDefinition *redundant = IsPhiRedundant(phi)) {
338 // Add to the worklist the used phis which are impacted.
339 for (MUseDefIterator it(phi); it; it++) {
340 if (it.def()->isPhi()) {
341 MPhi *use = it.def()->toPhi();
342 if (!use->isUnused()) {
343 use->setUnusedUnchecked();
344 use->setInWorklist();
345 if (!worklist.append(use))
346 return false;
347 }
348 }
349 }
350 phi->replaceAllUsesWith(redundant);
351 } else {
352 // Otherwise flag them as used.
353 phi->setNotUnused();
354 }
356 // The current phi is/was used, so all its operands are used.
357 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
358 MDefinition *in = phi->getOperand(i);
359 if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
360 continue;
361 in->setInWorklist();
362 if (!worklist.append(in->toPhi()))
363 return false;
364 }
365 }
367 // Sweep dead phis.
368 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
369 MPhiIterator iter = block->phisBegin();
370 while (iter != block->phisEnd()) {
371 if (iter->isUnused())
372 iter = block->discardPhiAt(iter);
373 else
374 iter++;
375 }
376 }
378 return true;
379 }
381 namespace {
383 // The type analysis algorithm inserts conversions and box/unbox instructions
384 // to make the IR graph well-typed for future passes.
385 //
386 // Phi adjustment: If a phi's inputs are all the same type, the phi is
387 // specialized to return that type.
388 //
389 // Input adjustment: Each input is asked to apply conversion operations to its
390 // inputs. This may include Box, Unbox, or other instruction-specific type
391 // conversion operations.
392 //
393 class TypeAnalyzer
394 {
395 MIRGenerator *mir;
396 MIRGraph &graph;
397 Vector<MPhi *, 0, SystemAllocPolicy> phiWorklist_;
399 TempAllocator &alloc() const {
400 return graph.alloc();
401 }
403 bool addPhiToWorklist(MPhi *phi) {
404 if (phi->isInWorklist())
405 return true;
406 if (!phiWorklist_.append(phi))
407 return false;
408 phi->setInWorklist();
409 return true;
410 }
411 MPhi *popPhi() {
412 MPhi *phi = phiWorklist_.popCopy();
413 phi->setNotInWorklist();
414 return phi;
415 }
417 bool respecialize(MPhi *phi, MIRType type);
418 bool propagateSpecialization(MPhi *phi);
419 bool specializePhis();
420 void replaceRedundantPhi(MPhi *phi);
421 void adjustPhiInputs(MPhi *phi);
422 bool adjustInputs(MDefinition *def);
423 bool insertConversions();
425 bool checkFloatCoherency();
426 bool graphContainsFloat32();
427 bool markPhiConsumers();
428 bool markPhiProducers();
429 bool specializeValidFloatOps();
430 bool tryEmitFloatOperations();
432 public:
433 TypeAnalyzer(MIRGenerator *mir, MIRGraph &graph)
434 : mir(mir), graph(graph)
435 { }
437 bool analyze();
438 };
440 } /* anonymous namespace */
442 // Try to specialize this phi based on its non-cyclic inputs.
443 static MIRType
444 GuessPhiType(MPhi *phi, bool *hasInputsWithEmptyTypes)
445 {
446 #ifdef DEBUG
447 // Check that different magic constants aren't flowing together. Ignore
448 // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
449 // away.
450 MIRType magicType = MIRType_None;
451 for (size_t i = 0; i < phi->numOperands(); i++) {
452 MDefinition *in = phi->getOperand(i);
453 if (in->type() == MIRType_MagicOptimizedArguments ||
454 in->type() == MIRType_MagicHole ||
455 in->type() == MIRType_MagicIsConstructing)
456 {
457 if (magicType == MIRType_None)
458 magicType = in->type();
459 MOZ_ASSERT(magicType == in->type());
460 }
461 }
462 #endif
464 *hasInputsWithEmptyTypes = false;
466 MIRType type = MIRType_None;
467 bool convertibleToFloat32 = false;
468 bool hasPhiInputs = false;
469 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
470 MDefinition *in = phi->getOperand(i);
471 if (in->isPhi()) {
472 hasPhiInputs = true;
473 if (!in->toPhi()->triedToSpecialize())
474 continue;
475 if (in->type() == MIRType_None) {
476 // The operand is a phi we tried to specialize, but we were
477 // unable to guess its type. propagateSpecialization will
478 // propagate the type to this phi when it becomes known.
479 continue;
480 }
481 }
483 // Ignore operands which we've never observed.
484 if (in->resultTypeSet() && in->resultTypeSet()->empty()) {
485 *hasInputsWithEmptyTypes = true;
486 continue;
487 }
489 if (type == MIRType_None) {
490 type = in->type();
491 if (in->canProduceFloat32())
492 convertibleToFloat32 = true;
493 continue;
494 }
495 if (type != in->type()) {
496 if (convertibleToFloat32 && in->type() == MIRType_Float32) {
497 // If we only saw definitions that can be converted into Float32 before and
498 // encounter a Float32 value, promote previous values to Float32
499 type = MIRType_Float32;
500 } else if (IsNumberType(type) && IsNumberType(in->type())) {
501 // Specialize phis with int32 and double operands as double.
502 type = MIRType_Double;
503 convertibleToFloat32 &= in->canProduceFloat32();
504 } else {
505 return MIRType_Value;
506 }
507 }
508 }
510 if (type == MIRType_None && !hasPhiInputs) {
511 // All inputs are non-phis with empty typesets. Use MIRType_Value
512 // in this case, as it's impossible to get better type information.
513 JS_ASSERT(*hasInputsWithEmptyTypes);
514 type = MIRType_Value;
515 }
517 return type;
518 }
520 bool
521 TypeAnalyzer::respecialize(MPhi *phi, MIRType type)
522 {
523 if (phi->type() == type)
524 return true;
525 phi->specialize(type);
526 return addPhiToWorklist(phi);
527 }
529 bool
530 TypeAnalyzer::propagateSpecialization(MPhi *phi)
531 {
532 JS_ASSERT(phi->type() != MIRType_None);
534 // Verify that this specialization matches any phis depending on it.
535 for (MUseDefIterator iter(phi); iter; iter++) {
536 if (!iter.def()->isPhi())
537 continue;
538 MPhi *use = iter.def()->toPhi();
539 if (!use->triedToSpecialize())
540 continue;
541 if (use->type() == MIRType_None) {
542 // We tried to specialize this phi, but were unable to guess its
543 // type. Now that we know the type of one of its operands, we can
544 // specialize it.
545 if (!respecialize(use, phi->type()))
546 return false;
547 continue;
548 }
549 if (use->type() != phi->type()) {
550 // Specialize phis with int32 that can be converted to float and float operands as floats.
551 if ((use->type() == MIRType_Int32 && use->canProduceFloat32() && phi->type() == MIRType_Float32) ||
552 (phi->type() == MIRType_Int32 && phi->canProduceFloat32() && use->type() == MIRType_Float32))
553 {
554 if (!respecialize(use, MIRType_Float32))
555 return false;
556 continue;
557 }
559 // Specialize phis with int32 and double operands as double.
560 if (IsNumberType(use->type()) && IsNumberType(phi->type())) {
561 if (!respecialize(use, MIRType_Double))
562 return false;
563 continue;
564 }
566 // This phi in our use chain can now no longer be specialized.
567 if (!respecialize(use, MIRType_Value))
568 return false;
569 }
570 }
572 return true;
573 }
575 bool
576 TypeAnalyzer::specializePhis()
577 {
578 Vector<MPhi *, 0, SystemAllocPolicy> phisWithEmptyInputTypes;
580 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
581 if (mir->shouldCancel("Specialize Phis (main loop)"))
582 return false;
584 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
585 bool hasInputsWithEmptyTypes;
586 MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes);
587 phi->specialize(type);
588 if (type == MIRType_None) {
589 // We tried to guess the type but failed because all operands are
590 // phis we still have to visit. Set the triedToSpecialize flag but
591 // don't propagate the type to other phis, propagateSpecialization
592 // will do that once we know the type of one of the operands.
594 // Edge case: when this phi has a non-phi input with an empty
595 // typeset, it's possible for two phis to have a cyclic
596 // dependency and they will both have MIRType_None. Specialize
597 // such phis to MIRType_Value later on.
598 if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi))
599 return false;
600 continue;
601 }
602 if (!propagateSpecialization(*phi))
603 return false;
604 }
605 }
607 do {
608 while (!phiWorklist_.empty()) {
609 if (mir->shouldCancel("Specialize Phis (worklist)"))
610 return false;
612 MPhi *phi = popPhi();
613 if (!propagateSpecialization(phi))
614 return false;
615 }
617 // When two phis have a cyclic dependency and inputs that have an empty
618 // typeset (which are ignored by GuessPhiType), we may still have to
619 // specialize these to MIRType_Value.
620 while (!phisWithEmptyInputTypes.empty()) {
621 if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)"))
622 return false;
624 MPhi *phi = phisWithEmptyInputTypes.popCopy();
625 if (phi->type() == MIRType_None) {
626 phi->specialize(MIRType_Value);
627 if (!propagateSpecialization(phi))
628 return false;
629 }
630 }
631 } while (!phiWorklist_.empty());
633 return true;
634 }
636 void
637 TypeAnalyzer::adjustPhiInputs(MPhi *phi)
638 {
639 MIRType phiType = phi->type();
640 JS_ASSERT(phiType != MIRType_None);
642 // If we specialized a type that's not Value, there are 3 cases:
643 // 1. Every input is of that type.
644 // 2. Every observed input is of that type (i.e., some inputs haven't been executed yet).
645 // 3. Inputs were doubles and int32s, and was specialized to double.
646 if (phiType != MIRType_Value) {
647 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
648 MDefinition *in = phi->getOperand(i);
649 if (in->type() == phiType)
650 continue;
652 if (in->isBox() && in->toBox()->input()->type() == phiType) {
653 phi->replaceOperand(i, in->toBox()->input());
654 } else {
655 MInstruction *replacement;
657 if (phiType == MIRType_Double && IsFloatType(in->type())) {
658 // Convert int32 operands to double.
659 replacement = MToDouble::New(alloc(), in);
660 } else if (phiType == MIRType_Float32) {
661 if (in->type() == MIRType_Int32 || in->type() == MIRType_Double) {
662 replacement = MToFloat32::New(alloc(), in);
663 } else {
664 // See comment below
665 if (in->type() != MIRType_Value) {
666 MBox *box = MBox::New(alloc(), in);
667 in->block()->insertBefore(in->block()->lastIns(), box);
668 in = box;
669 }
671 MUnbox *unbox = MUnbox::New(alloc(), in, MIRType_Double, MUnbox::Fallible);
672 in->block()->insertBefore(in->block()->lastIns(), unbox);
673 replacement = MToFloat32::New(alloc(), in);
674 }
675 } else {
676 // If we know this branch will fail to convert to phiType,
677 // insert a box that'll immediately fail in the fallible unbox
678 // below.
679 if (in->type() != MIRType_Value) {
680 MBox *box = MBox::New(alloc(), in);
681 in->block()->insertBefore(in->block()->lastIns(), box);
682 in = box;
683 }
685 // Be optimistic and insert unboxes when the operand is a
686 // value.
687 replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
688 }
690 in->block()->insertBefore(in->block()->lastIns(), replacement);
691 phi->replaceOperand(i, replacement);
692 }
693 }
695 return;
696 }
698 // Box every typed input.
699 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
700 MDefinition *in = phi->getOperand(i);
701 if (in->type() == MIRType_Value)
702 continue;
704 if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input())) {
705 // The input is being explicitly unboxed, so sneak past and grab
706 // the original box.
707 phi->replaceOperand(i, in->toUnbox()->input());
708 } else {
709 MDefinition *box = BoxInputsPolicy::alwaysBoxAt(alloc(), in->block()->lastIns(), in);
710 phi->replaceOperand(i, box);
711 }
712 }
713 }
715 bool
716 TypeAnalyzer::adjustInputs(MDefinition *def)
717 {
718 TypePolicy *policy = def->typePolicy();
719 if (policy && !policy->adjustInputs(alloc(), def->toInstruction()))
720 return false;
721 return true;
722 }
724 void
725 TypeAnalyzer::replaceRedundantPhi(MPhi *phi)
726 {
727 MBasicBlock *block = phi->block();
728 js::Value v;
729 switch (phi->type()) {
730 case MIRType_Undefined:
731 v = UndefinedValue();
732 break;
733 case MIRType_Null:
734 v = NullValue();
735 break;
736 case MIRType_MagicOptimizedArguments:
737 v = MagicValue(JS_OPTIMIZED_ARGUMENTS);
738 break;
739 case MIRType_MagicOptimizedOut:
740 v = MagicValue(JS_OPTIMIZED_OUT);
741 break;
742 default:
743 MOZ_ASSUME_UNREACHABLE("unexpected type");
744 }
745 MConstant *c = MConstant::New(alloc(), v);
746 // The instruction pass will insert the box
747 block->insertBefore(*(block->begin()), c);
748 phi->replaceAllUsesWith(c);
749 }
751 bool
752 TypeAnalyzer::insertConversions()
753 {
754 // Instructions are processed in reverse postorder: all uses are defs are
755 // seen before uses. This ensures that output adjustment (which may rewrite
756 // inputs of uses) does not conflict with input adjustment.
757 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
758 if (mir->shouldCancel("Insert Conversions"))
759 return false;
761 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd();) {
762 if (phi->type() == MIRType_Undefined ||
763 phi->type() == MIRType_Null ||
764 phi->type() == MIRType_MagicOptimizedArguments ||
765 phi->type() == MIRType_MagicOptimizedOut)
766 {
767 replaceRedundantPhi(*phi);
768 phi = block->discardPhiAt(phi);
769 } else {
770 adjustPhiInputs(*phi);
771 phi++;
772 }
773 }
774 for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
775 if (!adjustInputs(*iter))
776 return false;
777 }
778 }
779 return true;
780 }
782 // This function tries to emit Float32 specialized operations whenever it's possible.
783 // MIR nodes are flagged as:
784 // - Producers, when they can create Float32 that might need to be coerced into a Double.
785 // Loads in Float32 arrays and conversions to Float32 are producers.
786 // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
787 // Stores in Float32 arrays and conversions to Float32 are consumers.
788 // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
789 // does not result in a compound loss of precision. This is the case for +, -, /, * with 2
790 // operands, for instance. However, an addition with 3 operands is not commutative anymore,
791 // so an intermediate coercion is needed.
792 // Except for phis, all these flags are known after Ion building, so they cannot change during
793 // the process.
794 //
795 // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
796 // has only producers as inputs and consumers as uses, we can specialize the operation as a
797 // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
798 // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
799 //
800 // Phis have a special status. Phis need to be flagged as producers or consumers as they can
801 // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
802 // properties are such that we can deduce the property using all non phis inputs first (which form
803 // an initial phi graph) and then propagate all properties from one phi to another using a
804 // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
805 // many flagged phis as the previous iteration (so the worst steady state case is all phis being
806 // flagged as false).
807 //
808 // In a nutshell, the algorithm applies three passes:
809 // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
810 // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
811 // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
812 // consumer if all of its uses are consumers.
813 // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
814 // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
815 // 3 - Go through all commutative operations and ensure their inputs are all producers and their
816 // uses are all consumers.
817 bool
818 TypeAnalyzer::markPhiConsumers()
819 {
820 JS_ASSERT(phiWorklist_.empty());
822 // Iterate in postorder so worklist is initialized to RPO.
823 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); ++block) {
824 if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Initial state"))
825 return false;
827 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
828 JS_ASSERT(!phi->isInWorklist());
829 bool canConsumeFloat32 = true;
830 for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
831 MDefinition *usedef = use.def();
832 canConsumeFloat32 &= usedef->isPhi() || usedef->canConsumeFloat32(use.use());
833 }
834 phi->setCanConsumeFloat32(canConsumeFloat32);
835 if (canConsumeFloat32 && !addPhiToWorklist(*phi))
836 return false;
837 }
838 }
840 while (!phiWorklist_.empty()) {
841 if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Fixed point"))
842 return false;
844 MPhi *phi = popPhi();
845 JS_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
847 bool validConsumer = true;
848 for (MUseDefIterator use(phi); use; use++) {
849 MDefinition *def = use.def();
850 if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
851 validConsumer = false;
852 break;
853 }
854 }
856 if (validConsumer)
857 continue;
859 // Propagate invalidated phis
860 phi->setCanConsumeFloat32(false);
861 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
862 MDefinition *input = phi->getOperand(i);
863 if (input->isPhi() && !input->isInWorklist() && input->canConsumeFloat32(nullptr /* unused */))
864 {
865 if (!addPhiToWorklist(input->toPhi()))
866 return false;
867 }
868 }
869 }
870 return true;
871 }
873 bool
874 TypeAnalyzer::markPhiProducers()
875 {
876 JS_ASSERT(phiWorklist_.empty());
878 // Iterate in reverse postorder so worklist is initialized to PO.
879 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
880 if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - initial state"))
881 return false;
883 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
884 JS_ASSERT(!phi->isInWorklist());
885 bool canProduceFloat32 = true;
886 for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e; ++i) {
887 MDefinition *input = phi->getOperand(i);
888 canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
889 }
890 phi->setCanProduceFloat32(canProduceFloat32);
891 if (canProduceFloat32 && !addPhiToWorklist(*phi))
892 return false;
893 }
894 }
896 while (!phiWorklist_.empty()) {
897 if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - Fixed point"))
898 return false;
900 MPhi *phi = popPhi();
901 JS_ASSERT(phi->canProduceFloat32());
903 bool validProducer = true;
904 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
905 MDefinition *input = phi->getOperand(i);
906 if (input->isPhi() && !input->canProduceFloat32()) {
907 validProducer = false;
908 break;
909 }
910 }
912 if (validProducer)
913 continue;
915 // Propagate invalidated phis
916 phi->setCanProduceFloat32(false);
917 for (MUseDefIterator use(phi); use; use++) {
918 MDefinition *def = use.def();
919 if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32())
920 {
921 if (!addPhiToWorklist(def->toPhi()))
922 return false;
923 }
924 }
925 }
926 return true;
927 }
929 bool
930 TypeAnalyzer::specializeValidFloatOps()
931 {
932 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
933 if (mir->shouldCancel("Ensure Float32 commutativity - Instructions"))
934 return false;
936 for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
937 if (!ins->isFloat32Commutative())
938 continue;
940 if (ins->type() == MIRType_Float32)
941 continue;
943 // This call will try to specialize the instruction iff all uses are consumers and
944 // all inputs are producers.
945 ins->trySpecializeFloat32(alloc());
946 }
947 }
948 return true;
949 }
951 bool
952 TypeAnalyzer::graphContainsFloat32()
953 {
954 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
955 if (mir->shouldCancel("Ensure Float32 commutativity - Graph contains Float32"))
956 return false;
958 for (MDefinitionIterator def(*block); def; def++) {
959 if (def->type() == MIRType_Float32)
960 return true;
961 }
962 }
963 return false;
964 }
966 bool
967 TypeAnalyzer::tryEmitFloatOperations()
968 {
969 // Backends that currently don't know how to generate Float32 specialized instructions
970 // shouldn't run this pass and just let all instructions as specialized for Double.
971 if (!LIRGenerator::allowFloat32Optimizations())
972 return true;
974 // Asm.js uses the ahead of time type checks to specialize operations, no need to check
975 // them again at this point.
976 if (mir->compilingAsmJS())
977 return true;
979 // Check ahead of time that there is at least one definition typed as Float32, otherwise we
980 // don't need this pass.
981 if (!graphContainsFloat32())
982 return true;
984 if (!markPhiConsumers())
985 return false;
986 if (!markPhiProducers())
987 return false;
988 if (!specializeValidFloatOps())
989 return false;
990 return true;
991 }
993 bool
994 TypeAnalyzer::checkFloatCoherency()
995 {
996 #ifdef DEBUG
997 // Asserts that all Float32 instructions are flowing into Float32 consumers or specialized
998 // operations
999 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
1000 if (mir->shouldCancel("Check Float32 coherency"))
1001 return false;
1003 for (MDefinitionIterator def(*block); def; def++) {
1004 if (def->type() != MIRType_Float32)
1005 continue;
1007 for (MUseDefIterator use(*def); use; use++) {
1008 MDefinition *consumer = use.def();
1009 JS_ASSERT(consumer->isConsistentFloat32Use(use.use()));
1010 }
1011 }
1012 }
1013 #endif
1014 return true;
1015 }
1017 bool
1018 TypeAnalyzer::analyze()
1019 {
1020 if (!tryEmitFloatOperations())
1021 return false;
1022 if (!specializePhis())
1023 return false;
1024 if (!insertConversions())
1025 return false;
1026 if (!checkFloatCoherency())
1027 return false;
1028 return true;
1029 }
1031 bool
1032 jit::ApplyTypeInformation(MIRGenerator *mir, MIRGraph &graph)
1033 {
1034 TypeAnalyzer analyzer(mir, graph);
1036 if (!analyzer.analyze())
1037 return false;
1039 return true;
1040 }
1042 bool
1043 jit::MakeMRegExpHoistable(MIRGraph &graph)
1044 {
1045 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
1046 for (MDefinitionIterator iter(*block); iter; iter++) {
1047 if (!iter->isRegExp())
1048 continue;
1050 MRegExp *regexp = iter->toRegExp();
1052 // Test if MRegExp is hoistable by looking at all uses.
1053 bool hoistable = true;
1054 for (MUseIterator i = regexp->usesBegin(); i != regexp->usesEnd(); i++) {
1055 // Ignore resume points. At this point all uses are listed.
1056 // No DCE or GVN or something has happened.
1057 if (i->consumer()->isResumePoint())
1058 continue;
1060 JS_ASSERT(i->consumer()->isDefinition());
1062 // All MRegExp* MIR's don't adjust the regexp.
1063 MDefinition *use = i->consumer()->toDefinition();
1064 if (use->isRegExpReplace())
1065 continue;
1066 if (use->isRegExpExec())
1067 continue;
1068 if (use->isRegExpTest())
1069 continue;
1071 hoistable = false;
1072 break;
1073 }
1075 if (!hoistable)
1076 continue;
1078 // Make MRegExp hoistable
1079 regexp->setMovable();
1081 // That would be incorrect for global/sticky, because lastIndex could be wrong.
1082 // Therefore setting the lastIndex to 0. That is faster than a not movable regexp.
1083 RegExpObject *source = regexp->source();
1084 if (source->sticky() || source->global()) {
1085 JS_ASSERT(regexp->mustClone());
1086 MConstant *zero = MConstant::New(graph.alloc(), Int32Value(0));
1087 regexp->block()->insertAfter(regexp, zero);
1089 MStoreFixedSlot *lastIndex =
1090 MStoreFixedSlot::New(graph.alloc(), regexp, RegExpObject::lastIndexSlot(), zero);
1091 regexp->block()->insertAfter(zero, lastIndex);
1092 }
1093 }
1094 }
1096 return true;
1097 }
1099 bool
1100 jit::RenumberBlocks(MIRGraph &graph)
1101 {
1102 size_t id = 0;
1103 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
1104 block->setId(id++);
1106 return true;
1107 }
1109 // A Simple, Fast Dominance Algorithm by Cooper et al.
1110 // Modified to support empty intersections for OSR, and in RPO.
1111 static MBasicBlock *
1112 IntersectDominators(MBasicBlock *block1, MBasicBlock *block2)
1113 {
1114 MBasicBlock *finger1 = block1;
1115 MBasicBlock *finger2 = block2;
1117 JS_ASSERT(finger1);
1118 JS_ASSERT(finger2);
1120 // In the original paper, the block ID comparisons are on the postorder index.
1121 // This implementation iterates in RPO, so the comparisons are reversed.
1123 // For this function to be called, the block must have multiple predecessors.
1124 // If a finger is then found to be self-dominating, it must therefore be
1125 // reachable from multiple roots through non-intersecting control flow.
1126 // nullptr is returned in this case, to denote an empty intersection.
1128 while (finger1->id() != finger2->id()) {
1129 while (finger1->id() > finger2->id()) {
1130 MBasicBlock *idom = finger1->immediateDominator();
1131 if (idom == finger1)
1132 return nullptr; // Empty intersection.
1133 finger1 = idom;
1134 }
1136 while (finger2->id() > finger1->id()) {
1137 MBasicBlock *idom = finger2->immediateDominator();
1138 if (idom == finger2)
1139 return nullptr; // Empty intersection.
1140 finger2 = idom;
1141 }
1142 }
1143 return finger1;
1144 }
1146 static void
1147 ComputeImmediateDominators(MIRGraph &graph)
1148 {
1149 // The default start block is a root and therefore only self-dominates.
1150 MBasicBlock *startBlock = *graph.begin();
1151 startBlock->setImmediateDominator(startBlock);
1153 // Any OSR block is a root and therefore only self-dominates.
1154 MBasicBlock *osrBlock = graph.osrBlock();
1155 if (osrBlock)
1156 osrBlock->setImmediateDominator(osrBlock);
1158 bool changed = true;
1160 while (changed) {
1161 changed = false;
1163 ReversePostorderIterator block = graph.rpoBegin();
1165 // For each block in RPO, intersect all dominators.
1166 for (; block != graph.rpoEnd(); block++) {
1167 // If a node has once been found to have no exclusive dominator,
1168 // it will never have an exclusive dominator, so it may be skipped.
1169 if (block->immediateDominator() == *block)
1170 continue;
1172 MBasicBlock *newIdom = block->getPredecessor(0);
1174 // Find the first common dominator.
1175 for (size_t i = 1; i < block->numPredecessors(); i++) {
1176 MBasicBlock *pred = block->getPredecessor(i);
1177 if (pred->immediateDominator() == nullptr)
1178 continue;
1180 newIdom = IntersectDominators(pred, newIdom);
1182 // If there is no common dominator, the block self-dominates.
1183 if (newIdom == nullptr) {
1184 block->setImmediateDominator(*block);
1185 changed = true;
1186 break;
1187 }
1188 }
1190 if (newIdom && block->immediateDominator() != newIdom) {
1191 block->setImmediateDominator(newIdom);
1192 changed = true;
1193 }
1194 }
1195 }
1197 #ifdef DEBUG
1198 // Assert that all blocks have dominator information.
1199 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
1200 JS_ASSERT(block->immediateDominator() != nullptr);
1201 }
1202 #endif
1203 }
1205 bool
1206 jit::BuildDominatorTree(MIRGraph &graph)
1207 {
1208 ComputeImmediateDominators(graph);
1210 // Traversing through the graph in post-order means that every use
1211 // of a definition is visited before the def itself. Since a def
1212 // dominates its uses, by the time we reach a particular
1213 // block, we have processed all of its dominated children, so
1214 // block->numDominated() is accurate.
1215 for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
1216 MBasicBlock *child = *i;
1217 MBasicBlock *parent = child->immediateDominator();
1219 // If the block only self-dominates, it has no definite parent.
1220 if (child == parent)
1221 continue;
1223 if (!parent->addImmediatelyDominatedBlock(child))
1224 return false;
1226 // An additional +1 for the child block.
1227 parent->addNumDominated(child->numDominated() + 1);
1228 }
1230 #ifdef DEBUG
1231 // If compiling with OSR, many blocks will self-dominate.
1232 // Without OSR, there is only one root block which dominates all.
1233 if (!graph.osrBlock())
1234 JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
1235 #endif
1236 // Now, iterate through the dominator tree and annotate every
1237 // block with its index in the pre-order traversal of the
1238 // dominator tree.
1239 Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(graph.alloc());
1241 // The index of the current block in the CFG traversal.
1242 size_t index = 0;
1244 // Add all self-dominating blocks to the worklist.
1245 // This includes all roots. Order does not matter.
1246 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
1247 MBasicBlock *block = *i;
1248 if (block->immediateDominator() == block) {
1249 if (!worklist.append(block))
1250 return false;
1251 }
1252 }
1253 // Starting from each self-dominating block, traverse the CFG in pre-order.
1254 while (!worklist.empty()) {
1255 MBasicBlock *block = worklist.popCopy();
1256 block->setDomIndex(index);
1258 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
1259 block->immediatelyDominatedBlocksEnd())) {
1260 return false;
1261 }
1262 index++;
1263 }
1265 return true;
1266 }
1268 bool
1269 jit::BuildPhiReverseMapping(MIRGraph &graph)
1270 {
1271 // Build a mapping such that given a basic block, whose successor has one or
1272 // more phis, we can find our specific input to that phi. To make this fast
1273 // mapping work we rely on a specific property of our structured control
1274 // flow graph: For a block with phis, its predecessors each have only one
1275 // successor with phis. Consider each case:
1276 // * Blocks with less than two predecessors cannot have phis.
1277 // * Breaks. A break always has exactly one successor, and the break
1278 // catch block has exactly one predecessor for each break, as
1279 // well as a final predecessor for the actual loop exit.
1280 // * Continues. A continue always has exactly one successor, and the
1281 // continue catch block has exactly one predecessor for each
1282 // continue, as well as a final predecessor for the actual
1283 // loop continuation. The continue itself has exactly one
1284 // successor.
1285 // * An if. Each branch as exactly one predecessor.
1286 // * A switch. Each branch has exactly one predecessor.
1287 // * Loop tail. A new block is always created for the exit, and if a
1288 // break statement is present, the exit block will forward
1289 // directly to the break block.
1290 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
1291 if (block->numPredecessors() < 2) {
1292 JS_ASSERT(block->phisEmpty());
1293 continue;
1294 }
1296 // Assert on the above.
1297 for (size_t j = 0; j < block->numPredecessors(); j++) {
1298 MBasicBlock *pred = block->getPredecessor(j);
1300 #ifdef DEBUG
1301 size_t numSuccessorsWithPhis = 0;
1302 for (size_t k = 0; k < pred->numSuccessors(); k++) {
1303 MBasicBlock *successor = pred->getSuccessor(k);
1304 if (!successor->phisEmpty())
1305 numSuccessorsWithPhis++;
1306 }
1307 JS_ASSERT(numSuccessorsWithPhis <= 1);
1308 #endif
1310 pred->setSuccessorWithPhis(*block, j);
1311 }
1312 }
1314 return true;
1315 }
1317 #ifdef DEBUG
1318 static bool
1319 CheckSuccessorImpliesPredecessor(MBasicBlock *A, MBasicBlock *B)
1320 {
1321 // Assuming B = succ(A), verify A = pred(B).
1322 for (size_t i = 0; i < B->numPredecessors(); i++) {
1323 if (A == B->getPredecessor(i))
1324 return true;
1325 }
1326 return false;
1327 }
1329 static bool
1330 CheckPredecessorImpliesSuccessor(MBasicBlock *A, MBasicBlock *B)
1331 {
1332 // Assuming B = pred(A), verify A = succ(B).
1333 for (size_t i = 0; i < B->numSuccessors(); i++) {
1334 if (A == B->getSuccessor(i))
1335 return true;
1336 }
1337 return false;
1338 }
1340 static bool
1341 CheckOperandImpliesUse(MNode *n, MDefinition *operand)
1342 {
1343 for (MUseIterator i = operand->usesBegin(); i != operand->usesEnd(); i++) {
1344 if (i->consumer() == n)
1345 return true;
1346 }
1347 return false;
1348 }
1350 static bool
1351 CheckUseImpliesOperand(MDefinition *def, MUse *use)
1352 {
1353 return use->consumer()->getOperand(use->index()) == def;
1354 }
1355 #endif // DEBUG
1357 void
1358 jit::AssertBasicGraphCoherency(MIRGraph &graph)
1359 {
1360 #ifdef DEBUG
1361 JS_ASSERT(graph.entryBlock()->numPredecessors() == 0);
1362 JS_ASSERT(graph.entryBlock()->phisEmpty());
1363 JS_ASSERT(!graph.entryBlock()->unreachable());
1365 if (MBasicBlock *osrBlock = graph.osrBlock()) {
1366 JS_ASSERT(osrBlock->numPredecessors() == 0);
1367 JS_ASSERT(osrBlock->phisEmpty());
1368 JS_ASSERT(osrBlock != graph.entryBlock());
1369 JS_ASSERT(!osrBlock->unreachable());
1370 }
1372 if (MResumePoint *resumePoint = graph.entryResumePoint())
1373 JS_ASSERT(resumePoint->block() == graph.entryBlock());
1375 // Assert successor and predecessor list coherency.
1376 uint32_t count = 0;
1377 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
1378 count++;
1380 JS_ASSERT(&block->graph() == &graph);
1382 for (size_t i = 0; i < block->numSuccessors(); i++)
1383 JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
1385 for (size_t i = 0; i < block->numPredecessors(); i++)
1386 JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
1388 // Assert that use chains are valid for this instruction.
1389 for (MDefinitionIterator iter(*block); iter; iter++) {
1390 for (uint32_t i = 0, e = iter->numOperands(); i < e; i++)
1391 JS_ASSERT(CheckOperandImpliesUse(*iter, iter->getOperand(i)));
1392 }
1393 for (MResumePointIterator iter(block->resumePointsBegin()); iter != block->resumePointsEnd(); iter++) {
1394 for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) {
1395 if (iter->getUseFor(i)->hasProducer())
1396 JS_ASSERT(CheckOperandImpliesUse(*iter, iter->getOperand(i)));
1397 }
1398 }
1399 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
1400 JS_ASSERT(phi->numOperands() == block->numPredecessors());
1401 }
1402 for (MDefinitionIterator iter(*block); iter; iter++) {
1403 JS_ASSERT(iter->block() == *block);
1404 for (MUseIterator i(iter->usesBegin()); i != iter->usesEnd(); i++)
1405 JS_ASSERT(CheckUseImpliesOperand(*iter, *i));
1407 if (iter->isInstruction()) {
1408 if (MResumePoint *resume = iter->toInstruction()->resumePoint()) {
1409 if (MInstruction *ins = resume->instruction())
1410 JS_ASSERT(ins->block() == iter->block());
1411 }
1412 }
1413 }
1414 }
1416 JS_ASSERT(graph.numBlocks() == count);
1417 #endif
1418 }
1420 #ifdef DEBUG
1421 static void
1422 AssertReversePostOrder(MIRGraph &graph)
1423 {
1424 // Check that every block is visited after all its predecessors (except backedges).
1425 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
1426 JS_ASSERT(!block->isMarked());
1428 for (size_t i = 0; i < block->numPredecessors(); i++) {
1429 MBasicBlock *pred = block->getPredecessor(i);
1430 JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked());
1431 }
1433 block->mark();
1434 }
1436 graph.unmarkBlocks();
1437 }
1438 #endif
1440 void
1441 jit::AssertGraphCoherency(MIRGraph &graph)
1442 {
1443 #ifdef DEBUG
1444 if (!js_JitOptions.checkGraphConsistency)
1445 return;
1446 AssertBasicGraphCoherency(graph);
1447 AssertReversePostOrder(graph);
1448 #endif
1449 }
1451 void
1452 jit::AssertExtendedGraphCoherency(MIRGraph &graph)
1453 {
1454 // Checks the basic GraphCoherency but also other conditions that
1455 // do not hold immediately (such as the fact that critical edges
1456 // are split)
1458 #ifdef DEBUG
1459 if (!js_JitOptions.checkGraphConsistency)
1460 return;
1461 AssertGraphCoherency(graph);
1463 uint32_t idx = 0;
1464 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
1465 JS_ASSERT(block->id() == idx++);
1467 // No critical edges:
1468 if (block->numSuccessors() > 1)
1469 for (size_t i = 0; i < block->numSuccessors(); i++)
1470 JS_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
1472 if (block->isLoopHeader()) {
1473 JS_ASSERT(block->numPredecessors() == 2);
1474 MBasicBlock *backedge = block->getPredecessor(1);
1475 JS_ASSERT(backedge->id() >= block->id());
1476 JS_ASSERT(backedge->numSuccessors() == 1);
1477 JS_ASSERT(backedge->getSuccessor(0) == *block);
1478 }
1480 if (!block->phisEmpty()) {
1481 for (size_t i = 0; i < block->numPredecessors(); i++) {
1482 MBasicBlock *pred = block->getPredecessor(i);
1483 JS_ASSERT(pred->successorWithPhis() == *block);
1484 JS_ASSERT(pred->positionInPhiSuccessor() == i);
1485 }
1486 }
1488 uint32_t successorWithPhis = 0;
1489 for (size_t i = 0; i < block->numSuccessors(); i++)
1490 if (!block->getSuccessor(i)->phisEmpty())
1491 successorWithPhis++;
1493 JS_ASSERT(successorWithPhis <= 1);
1494 JS_ASSERT_IF(successorWithPhis, block->successorWithPhis() != nullptr);
1496 // I'd like to assert this, but it's not necc. true. Sometimes we set this
1497 // flag to non-nullptr just because a successor has multiple preds, even if it
1498 // does not actually have any phis.
1499 //
1500 // JS_ASSERT_IF(!successorWithPhis, block->successorWithPhis() == nullptr);
1501 }
1502 #endif
1503 }
1506 struct BoundsCheckInfo
1507 {
1508 MBoundsCheck *check;
1509 uint32_t validUntil;
1510 };
1512 typedef HashMap<uint32_t,
1513 BoundsCheckInfo,
1514 DefaultHasher<uint32_t>,
1515 IonAllocPolicy> BoundsCheckMap;
1517 // Compute a hash for bounds checks which ignores constant offsets in the index.
1518 static HashNumber
1519 BoundsCheckHashIgnoreOffset(MBoundsCheck *check)
1520 {
1521 SimpleLinearSum indexSum = ExtractLinearSum(check->index());
1522 uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
1523 uintptr_t length = uintptr_t(check->length());
1524 return index ^ length;
1525 }
1527 static MBoundsCheck *
1528 FindDominatingBoundsCheck(BoundsCheckMap &checks, MBoundsCheck *check, size_t index)
1529 {
1530 // See the comment in ValueNumberer::findDominatingDef.
1531 HashNumber hash = BoundsCheckHashIgnoreOffset(check);
1532 BoundsCheckMap::Ptr p = checks.lookup(hash);
1533 if (!p || index > p->value().validUntil) {
1534 // We didn't find a dominating bounds check.
1535 BoundsCheckInfo info;
1536 info.check = check;
1537 info.validUntil = index + check->block()->numDominated();
1539 if(!checks.put(hash, info))
1540 return nullptr;
1542 return check;
1543 }
1545 return p->value().check;
1546 }
1548 // Extract a linear sum from ins, if possible (otherwise giving the sum 'ins + 0').
1549 SimpleLinearSum
1550 jit::ExtractLinearSum(MDefinition *ins)
1551 {
1552 if (ins->isBeta())
1553 ins = ins->getOperand(0);
1555 if (ins->type() != MIRType_Int32)
1556 return SimpleLinearSum(ins, 0);
1558 if (ins->isConstant()) {
1559 const Value &v = ins->toConstant()->value();
1560 JS_ASSERT(v.isInt32());
1561 return SimpleLinearSum(nullptr, v.toInt32());
1562 } else if (ins->isAdd() || ins->isSub()) {
1563 MDefinition *lhs = ins->getOperand(0);
1564 MDefinition *rhs = ins->getOperand(1);
1565 if (lhs->type() == MIRType_Int32 && rhs->type() == MIRType_Int32) {
1566 SimpleLinearSum lsum = ExtractLinearSum(lhs);
1567 SimpleLinearSum rsum = ExtractLinearSum(rhs);
1569 if (lsum.term && rsum.term)
1570 return SimpleLinearSum(ins, 0);
1572 // Check if this is of the form <SUM> + n, n + <SUM> or <SUM> - n.
1573 if (ins->isAdd()) {
1574 int32_t constant;
1575 if (!SafeAdd(lsum.constant, rsum.constant, &constant))
1576 return SimpleLinearSum(ins, 0);
1577 return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
1578 } else if (lsum.term) {
1579 int32_t constant;
1580 if (!SafeSub(lsum.constant, rsum.constant, &constant))
1581 return SimpleLinearSum(ins, 0);
1582 return SimpleLinearSum(lsum.term, constant);
1583 }
1584 }
1585 }
1587 return SimpleLinearSum(ins, 0);
1588 }
1590 // Extract a linear inequality holding when a boolean test goes in the
1591 // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
1592 bool
1593 jit::ExtractLinearInequality(MTest *test, BranchDirection direction,
1594 SimpleLinearSum *plhs, MDefinition **prhs, bool *plessEqual)
1595 {
1596 if (!test->getOperand(0)->isCompare())
1597 return false;
1599 MCompare *compare = test->getOperand(0)->toCompare();
1601 MDefinition *lhs = compare->getOperand(0);
1602 MDefinition *rhs = compare->getOperand(1);
1604 // TODO: optimize Compare_UInt32
1605 if (!compare->isInt32Comparison())
1606 return false;
1608 JS_ASSERT(lhs->type() == MIRType_Int32);
1609 JS_ASSERT(rhs->type() == MIRType_Int32);
1611 JSOp jsop = compare->jsop();
1612 if (direction == FALSE_BRANCH)
1613 jsop = NegateCompareOp(jsop);
1615 SimpleLinearSum lsum = ExtractLinearSum(lhs);
1616 SimpleLinearSum rsum = ExtractLinearSum(rhs);
1618 if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant))
1619 return false;
1621 // Normalize operations to use <= or >=.
1622 switch (jsop) {
1623 case JSOP_LE:
1624 *plessEqual = true;
1625 break;
1626 case JSOP_LT:
1627 /* x < y ==> x + 1 <= y */
1628 if (!SafeAdd(lsum.constant, 1, &lsum.constant))
1629 return false;
1630 *plessEqual = true;
1631 break;
1632 case JSOP_GE:
1633 *plessEqual = false;
1634 break;
1635 case JSOP_GT:
1636 /* x > y ==> x - 1 >= y */
1637 if (!SafeSub(lsum.constant, 1, &lsum.constant))
1638 return false;
1639 *plessEqual = false;
1640 break;
1641 default:
1642 return false;
1643 }
1645 *plhs = lsum;
1646 *prhs = rsum.term;
1648 return true;
1649 }
1651 static bool
1652 TryEliminateBoundsCheck(BoundsCheckMap &checks, size_t blockIndex, MBoundsCheck *dominated, bool *eliminated)
1653 {
1654 JS_ASSERT(!*eliminated);
1656 // Replace all uses of the bounds check with the actual index.
1657 // This is (a) necessary, because we can coalesce two different
1658 // bounds checks and would otherwise use the wrong index and
1659 // (b) helps register allocation. Note that this is safe since
1660 // no other pass after bounds check elimination moves instructions.
1661 dominated->replaceAllUsesWith(dominated->index());
1663 if (!dominated->isMovable())
1664 return true;
1666 MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, dominated, blockIndex);
1667 if (!dominating)
1668 return false;
1670 if (dominating == dominated) {
1671 // We didn't find a dominating bounds check.
1672 return true;
1673 }
1675 // We found two bounds checks with the same hash number, but we still have
1676 // to make sure the lengths and index terms are equal.
1677 if (dominating->length() != dominated->length())
1678 return true;
1680 SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
1681 SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
1683 // Both terms should be nullptr or the same definition.
1684 if (sumA.term != sumB.term)
1685 return true;
1687 // This bounds check is redundant.
1688 *eliminated = true;
1690 // Normalize the ranges according to the constant offsets in the two indexes.
1691 int32_t minimumA, maximumA, minimumB, maximumB;
1692 if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
1693 !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
1694 !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
1695 !SafeAdd(sumB.constant, dominated->maximum(), &maximumB))
1696 {
1697 return false;
1698 }
1700 // Update the dominating check to cover both ranges, denormalizing the
1701 // result per the constant offset in the index.
1702 int32_t newMinimum, newMaximum;
1703 if (!SafeSub(Min(minimumA, minimumB), sumA.constant, &newMinimum) ||
1704 !SafeSub(Max(maximumA, maximumB), sumA.constant, &newMaximum))
1705 {
1706 return false;
1707 }
1709 dominating->setMinimum(newMinimum);
1710 dominating->setMaximum(newMaximum);
1711 return true;
1712 }
1714 static void
1715 TryEliminateTypeBarrierFromTest(MTypeBarrier *barrier, bool filtersNull, bool filtersUndefined,
1716 MTest *test, BranchDirection direction, bool *eliminated)
1717 {
1718 JS_ASSERT(filtersNull || filtersUndefined);
1720 // Watch for code patterns similar to 'if (x.f) { ... = x.f }'. If x.f
1721 // is either an object or null/undefined, there will be a type barrier on
1722 // the latter read as the null/undefined value is never realized there.
1723 // The type barrier can be eliminated, however, by looking at tests
1724 // performed on the result of the first operation that filter out all
1725 // types that have been seen in the first access but not the second.
1727 // A test 'if (x.f)' filters both null and undefined.
1729 // Disregard the possible unbox added before the Typebarrier for checking.
1730 MDefinition *input = barrier->input();
1731 MUnbox *inputUnbox = nullptr;
1732 if (input->isUnbox() && input->toUnbox()->mode() != MUnbox::Fallible) {
1733 inputUnbox = input->toUnbox();
1734 input = inputUnbox->input();
1735 }
1737 MDefinition *subject = nullptr;
1738 bool removeUndefined;
1739 bool removeNull;
1740 test->filtersUndefinedOrNull(direction == TRUE_BRANCH, &subject, &removeUndefined, &removeNull);
1742 // The Test doesn't filter undefined nor null.
1743 if (!subject)
1744 return;
1746 // Make sure the subject equals the input to the TypeBarrier.
1747 if (subject != input)
1748 return;
1750 // When the TypeBarrier filters undefined, the test must at least also do,
1751 // this, before the TypeBarrier can get removed.
1752 if (!removeUndefined && filtersUndefined)
1753 return;
1755 // When the TypeBarrier filters null, the test must at least also do,
1756 // this, before the TypeBarrier can get removed.
1757 if (!removeNull && filtersNull)
1758 return;
1760 // Eliminate the TypeBarrier. The possible TypeBarrier unboxing is kept,
1761 // but made infallible.
1762 *eliminated = true;
1763 if (inputUnbox)
1764 inputUnbox->makeInfallible();
1765 barrier->replaceAllUsesWith(barrier->input());
1766 }
1768 static bool
1769 TryEliminateTypeBarrier(MTypeBarrier *barrier, bool *eliminated)
1770 {
1771 JS_ASSERT(!*eliminated);
1773 const types::TemporaryTypeSet *barrierTypes = barrier->resultTypeSet();
1774 const types::TemporaryTypeSet *inputTypes = barrier->input()->resultTypeSet();
1776 // Disregard the possible unbox added before the Typebarrier.
1777 if (barrier->input()->isUnbox() && barrier->input()->toUnbox()->mode() != MUnbox::Fallible)
1778 inputTypes = barrier->input()->toUnbox()->input()->resultTypeSet();
1780 if (!barrierTypes || !inputTypes)
1781 return true;
1783 bool filtersNull = barrierTypes->filtersType(inputTypes, types::Type::NullType());
1784 bool filtersUndefined = barrierTypes->filtersType(inputTypes, types::Type::UndefinedType());
1786 if (!filtersNull && !filtersUndefined)
1787 return true;
1789 MBasicBlock *block = barrier->block();
1790 while (true) {
1791 BranchDirection direction;
1792 MTest *test = block->immediateDominatorBranch(&direction);
1794 if (test) {
1795 TryEliminateTypeBarrierFromTest(barrier, filtersNull, filtersUndefined,
1796 test, direction, eliminated);
1797 }
1799 MBasicBlock *previous = block->immediateDominator();
1800 if (previous == block)
1801 break;
1802 block = previous;
1803 }
1805 return true;
1806 }
1808 // Eliminate checks which are redundant given each other or other instructions.
1809 //
1810 // A type barrier is considered redundant if all missing types have been tested
1811 // for by earlier control instructions.
1812 //
1813 // A bounds check is considered redundant if it's dominated by another bounds
1814 // check with the same length and the indexes differ by only a constant amount.
1815 // In this case we eliminate the redundant bounds check and update the other one
1816 // to cover the ranges of both checks.
1817 //
1818 // Bounds checks are added to a hash map and since the hash function ignores
1819 // differences in constant offset, this offers a fast way to find redundant
1820 // checks.
1821 bool
1822 jit::EliminateRedundantChecks(MIRGraph &graph)
1823 {
1824 BoundsCheckMap checks(graph.alloc());
1826 if (!checks.init())
1827 return false;
1829 // Stack for pre-order CFG traversal.
1830 Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(graph.alloc());
1832 // The index of the current block in the CFG traversal.
1833 size_t index = 0;
1835 // Add all self-dominating blocks to the worklist.
1836 // This includes all roots. Order does not matter.
1837 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
1838 MBasicBlock *block = *i;
1839 if (block->immediateDominator() == block) {
1840 if (!worklist.append(block))
1841 return false;
1842 }
1843 }
1845 // Starting from each self-dominating block, traverse the CFG in pre-order.
1846 while (!worklist.empty()) {
1847 MBasicBlock *block = worklist.popCopy();
1849 // Add all immediate dominators to the front of the worklist.
1850 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
1851 block->immediatelyDominatedBlocksEnd())) {
1852 return false;
1853 }
1855 for (MDefinitionIterator iter(block); iter; ) {
1856 bool eliminated = false;
1858 if (iter->isBoundsCheck()) {
1859 if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated))
1860 return false;
1861 } else if (iter->isTypeBarrier()) {
1862 if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated))
1863 return false;
1864 } else if (iter->isConvertElementsToDoubles()) {
1865 // Now that code motion passes have finished, replace any
1866 // ConvertElementsToDoubles with the actual elements.
1867 MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles();
1868 ins->replaceAllUsesWith(ins->elements());
1869 }
1871 if (eliminated)
1872 iter = block->discardDefAt(iter);
1873 else
1874 iter++;
1875 }
1876 index++;
1877 }
1879 JS_ASSERT(index == graph.numBlocks());
1880 return true;
1881 }
1883 // If the given block contains a goto and nothing interesting before that,
1884 // return the goto. Return nullptr otherwise.
1885 static LGoto *
1886 FindLeadingGoto(LBlock *bb)
1887 {
1888 for (LInstructionIterator ins(bb->begin()); ins != bb->end(); ins++) {
1889 // Ignore labels.
1890 if (ins->isLabel())
1891 continue;
1892 // If we have a goto, we're good to go.
1893 if (ins->isGoto())
1894 return ins->toGoto();
1895 break;
1896 }
1897 return nullptr;
1898 }
1900 // Eliminate blocks containing nothing interesting besides gotos. These are
1901 // often created by optimizer, which splits all critical edges. If these
1902 // splits end up being unused after optimization and register allocation,
1903 // fold them back away to avoid unnecessary branching.
1904 bool
1905 jit::UnsplitEdges(LIRGraph *lir)
1906 {
1907 for (size_t i = 0; i < lir->numBlocks(); i++) {
1908 LBlock *bb = lir->getBlock(i);
1909 MBasicBlock *mirBlock = bb->mir();
1911 // Renumber the MIR blocks as we go, since we may remove some.
1912 mirBlock->setId(i);
1914 // Register allocation is done by this point, so we don't need the phis
1915 // anymore. Clear them to avoid needed to keep them current as we edit
1916 // the CFG.
1917 bb->clearPhis();
1918 mirBlock->discardAllPhis();
1920 // First make sure the MIR block looks sane. Some of these checks may be
1921 // over-conservative, but we're attempting to keep everything in MIR
1922 // current as we modify the LIR, so only proceed if the MIR is simple.
1923 if (mirBlock->numPredecessors() == 0 || mirBlock->numSuccessors() != 1 ||
1924 !mirBlock->begin()->isGoto())
1925 {
1926 continue;
1927 }
1929 // The MIR block is empty, but check the LIR block too (in case the
1930 // register allocator inserted spill code there, or whatever).
1931 LGoto *theGoto = FindLeadingGoto(bb);
1932 if (!theGoto)
1933 continue;
1934 MBasicBlock *target = theGoto->target();
1935 if (target == mirBlock || target != mirBlock->getSuccessor(0))
1936 continue;
1938 // If we haven't yet cleared the phis for the successor, do so now so
1939 // that the CFG manipulation routines don't trip over them.
1940 if (!target->phisEmpty()) {
1941 target->discardAllPhis();
1942 target->lir()->clearPhis();
1943 }
1945 // Edit the CFG to remove lir/mirBlock and reconnect all its edges.
1946 for (size_t j = 0; j < mirBlock->numPredecessors(); j++) {
1947 MBasicBlock *mirPred = mirBlock->getPredecessor(j);
1949 for (size_t k = 0; k < mirPred->numSuccessors(); k++) {
1950 if (mirPred->getSuccessor(k) == mirBlock) {
1951 mirPred->replaceSuccessor(k, target);
1952 if (!target->addPredecessorWithoutPhis(mirPred))
1953 return false;
1954 }
1955 }
1957 LInstruction *predTerm = *mirPred->lir()->rbegin();
1958 for (size_t k = 0; k < predTerm->numSuccessors(); k++) {
1959 if (predTerm->getSuccessor(k) == mirBlock)
1960 predTerm->setSuccessor(k, target);
1961 }
1962 }
1963 target->removePredecessor(mirBlock);
1965 // Zap the block.
1966 lir->removeBlock(i);
1967 lir->mir().removeBlock(mirBlock);
1968 --i;
1969 }
1971 return true;
1972 }
1974 bool
1975 LinearSum::multiply(int32_t scale)
1976 {
1977 for (size_t i = 0; i < terms_.length(); i++) {
1978 if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale))
1979 return false;
1980 }
1981 return SafeMul(scale, constant_, &constant_);
1982 }
1984 bool
1985 LinearSum::add(const LinearSum &other)
1986 {
1987 for (size_t i = 0; i < other.terms_.length(); i++) {
1988 if (!add(other.terms_[i].term, other.terms_[i].scale))
1989 return false;
1990 }
1991 return add(other.constant_);
1992 }
1994 bool
1995 LinearSum::add(MDefinition *term, int32_t scale)
1996 {
1997 JS_ASSERT(term);
1999 if (scale == 0)
2000 return true;
2002 if (term->isConstant()) {
2003 int32_t constant = term->toConstant()->value().toInt32();
2004 if (!SafeMul(constant, scale, &constant))
2005 return false;
2006 return add(constant);
2007 }
2009 for (size_t i = 0; i < terms_.length(); i++) {
2010 if (term == terms_[i].term) {
2011 if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale))
2012 return false;
2013 if (terms_[i].scale == 0) {
2014 terms_[i] = terms_.back();
2015 terms_.popBack();
2016 }
2017 return true;
2018 }
2019 }
2021 terms_.append(LinearTerm(term, scale));
2022 return true;
2023 }
2025 bool
2026 LinearSum::add(int32_t constant)
2027 {
2028 return SafeAdd(constant, constant_, &constant_);
2029 }
2031 void
2032 LinearSum::print(Sprinter &sp) const
2033 {
2034 for (size_t i = 0; i < terms_.length(); i++) {
2035 int32_t scale = terms_[i].scale;
2036 int32_t id = terms_[i].term->id();
2037 JS_ASSERT(scale);
2038 if (scale > 0) {
2039 if (i)
2040 sp.printf("+");
2041 if (scale == 1)
2042 sp.printf("#%d", id);
2043 else
2044 sp.printf("%d*#%d", scale, id);
2045 } else if (scale == -1) {
2046 sp.printf("-#%d", id);
2047 } else {
2048 sp.printf("%d*#%d", scale, id);
2049 }
2050 }
2051 if (constant_ > 0)
2052 sp.printf("+%d", constant_);
2053 else if (constant_ < 0)
2054 sp.printf("%d", constant_);
2055 }
2057 void
2058 LinearSum::dump(FILE *fp) const
2059 {
2060 Sprinter sp(GetIonContext()->cx);
2061 sp.init();
2062 print(sp);
2063 fprintf(fp, "%s\n", sp.string());
2064 }
2066 void
2067 LinearSum::dump() const
2068 {
2069 dump(stderr);
2070 }
2072 static bool
2073 AnalyzePoppedThis(JSContext *cx, types::TypeObject *type,
2074 MDefinition *thisValue, MInstruction *ins, bool definitelyExecuted,
2075 HandleObject baseobj,
2076 Vector<types::TypeNewScript::Initializer> *initializerList,
2077 Vector<PropertyName *> *accessedProperties,
2078 bool *phandled)
2079 {
2080 // Determine the effect that a use of the |this| value when calling |new|
2081 // on a script has on the properties definitely held by the new object.
2083 if (ins->isCallSetProperty()) {
2084 MCallSetProperty *setprop = ins->toCallSetProperty();
2086 if (setprop->object() != thisValue)
2087 return true;
2089 // Don't use GetAtomId here, we need to watch for SETPROP on
2090 // integer properties and bail out. We can't mark the aggregate
2091 // JSID_VOID type property as being in a definite slot.
2092 if (setprop->name() == cx->names().prototype ||
2093 setprop->name() == cx->names().proto ||
2094 setprop->name() == cx->names().constructor)
2095 {
2096 return true;
2097 }
2099 // Ignore assignments to properties that were already written to.
2100 if (baseobj->nativeLookup(cx, NameToId(setprop->name()))) {
2101 *phandled = true;
2102 return true;
2103 }
2105 // Don't add definite properties for properties that were already
2106 // read in the constructor.
2107 for (size_t i = 0; i < accessedProperties->length(); i++) {
2108 if ((*accessedProperties)[i] == setprop->name())
2109 return true;
2110 }
2112 // Don't add definite properties to an object which won't fit in its
2113 // fixed slots.
2114 if (GetGCKindSlots(gc::GetGCObjectKind(baseobj->slotSpan() + 1)) <= baseobj->slotSpan())
2115 return true;
2117 // Assignments to new properties must always execute.
2118 if (!definitelyExecuted)
2119 return true;
2121 RootedId id(cx, NameToId(setprop->name()));
2122 if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) {
2123 // The prototype chain already contains a getter/setter for this
2124 // property, or type information is too imprecise.
2125 return true;
2126 }
2128 DebugOnly<unsigned> slotSpan = baseobj->slotSpan();
2129 if (!DefineNativeProperty(cx, baseobj, id, UndefinedHandleValue, nullptr, nullptr,
2130 JSPROP_ENUMERATE))
2131 {
2132 return false;
2133 }
2134 JS_ASSERT(baseobj->slotSpan() != slotSpan);
2135 JS_ASSERT(!baseobj->inDictionaryMode());
2137 Vector<MResumePoint *> callerResumePoints(cx);
2138 MBasicBlock *block = ins->block();
2139 for (MResumePoint *rp = block->callerResumePoint();
2140 rp;
2141 block = rp->block(), rp = block->callerResumePoint())
2142 {
2143 JSScript *script = rp->block()->info().script();
2144 if (!types::AddClearDefiniteFunctionUsesInScript(cx, type, script, block->info().script()))
2145 return true;
2146 if (!callerResumePoints.append(rp))
2147 return false;
2148 }
2150 for (int i = callerResumePoints.length() - 1; i >= 0; i--) {
2151 MResumePoint *rp = callerResumePoints[i];
2152 JSScript *script = rp->block()->info().script();
2153 types::TypeNewScript::Initializer entry(types::TypeNewScript::Initializer::SETPROP_FRAME,
2154 script->pcToOffset(rp->pc()));
2155 if (!initializerList->append(entry))
2156 return false;
2157 }
2159 JSScript *script = ins->block()->info().script();
2160 types::TypeNewScript::Initializer entry(types::TypeNewScript::Initializer::SETPROP,
2161 script->pcToOffset(setprop->resumePoint()->pc()));
2162 if (!initializerList->append(entry))
2163 return false;
2165 *phandled = true;
2166 return true;
2167 }
2169 if (ins->isCallGetProperty()) {
2170 MCallGetProperty *get = ins->toCallGetProperty();
2172 /*
2173 * Properties can be read from the 'this' object if the following hold:
2174 *
2175 * - The read is not on a getter along the prototype chain, which
2176 * could cause 'this' to escape.
2177 *
2178 * - The accessed property is either already a definite property or
2179 * is not later added as one. Since the definite properties are
2180 * added to the object at the point of its creation, reading a
2181 * definite property before it is assigned could incorrectly hit.
2182 */
2183 RootedId id(cx, NameToId(get->name()));
2184 if (!baseobj->nativeLookup(cx, id) && !accessedProperties->append(get->name()))
2185 return false;
2187 if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) {
2188 // The |this| value can escape if any property reads it does go
2189 // through a getter.
2190 return true;
2191 }
2193 *phandled = true;
2194 return true;
2195 }
2197 if (ins->isPostWriteBarrier()) {
2198 *phandled = true;
2199 return true;
2200 }
2202 return true;
2203 }
2205 static int
2206 CmpInstructions(const void *a, const void *b)
2207 {
2208 return (*static_cast<MInstruction * const *>(a))->id() -
2209 (*static_cast<MInstruction * const *>(b))->id();
2210 }
2212 bool
2213 jit::AnalyzeNewScriptProperties(JSContext *cx, JSFunction *fun,
2214 types::TypeObject *type, HandleObject baseobj,
2215 Vector<types::TypeNewScript::Initializer> *initializerList)
2216 {
2217 JS_ASSERT(cx->compartment()->activeAnalysis);
2219 // When invoking 'new' on the specified script, try to find some properties
2220 // which will definitely be added to the created object before it has a
2221 // chance to escape and be accessed elsewhere.
2223 RootedScript script(cx, fun->getOrCreateScript(cx));
2224 if (!script)
2225 return false;
2227 if (!jit::IsIonEnabled(cx) || !jit::IsBaselineEnabled(cx) ||
2228 !script->compileAndGo() || !script->canBaselineCompile())
2229 {
2230 return true;
2231 }
2233 static const uint32_t MAX_SCRIPT_SIZE = 2000;
2234 if (script->length() > MAX_SCRIPT_SIZE)
2235 return true;
2237 Vector<PropertyName *> accessedProperties(cx);
2239 LifoAlloc alloc(types::TypeZone::TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
2241 TempAllocator temp(&alloc);
2242 IonContext ictx(cx, &temp);
2244 if (!cx->compartment()->ensureJitCompartmentExists(cx))
2245 return false;
2247 if (!script->hasBaselineScript()) {
2248 MethodStatus status = BaselineCompile(cx, script);
2249 if (status == Method_Error)
2250 return false;
2251 if (status != Method_Compiled)
2252 return true;
2253 }
2255 types::TypeScript::SetThis(cx, script, types::Type::ObjectType(type));
2257 MIRGraph graph(&temp);
2258 CompileInfo info(script, fun,
2259 /* osrPc = */ nullptr, /* constructing = */ false,
2260 DefinitePropertiesAnalysis,
2261 script->needsArgsObj());
2263 AutoTempAllocatorRooter root(cx, &temp);
2265 const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_Normal);
2267 types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp);
2268 if (!constraints) {
2269 js_ReportOutOfMemory(cx);
2270 return false;
2271 }
2273 BaselineInspector inspector(script);
2274 const JitCompileOptions options(cx);
2276 IonBuilder builder(cx, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
2277 &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
2279 if (!builder.build()) {
2280 if (builder.abortReason() == AbortReason_Alloc)
2281 return false;
2282 return true;
2283 }
2285 types::FinishDefinitePropertiesAnalysis(cx, constraints);
2287 if (!SplitCriticalEdges(graph))
2288 return false;
2290 if (!RenumberBlocks(graph))
2291 return false;
2293 if (!BuildDominatorTree(graph))
2294 return false;
2296 if (!EliminatePhis(&builder, graph, AggressiveObservability))
2297 return false;
2299 MDefinition *thisValue = graph.begin()->getSlot(info.thisSlot());
2301 // Get a list of instructions using the |this| value in the order they
2302 // appear in the graph.
2303 Vector<MInstruction *> instructions(cx);
2305 for (MUseDefIterator uses(thisValue); uses; uses++) {
2306 MDefinition *use = uses.def();
2308 // Don't track |this| through assignments to phis.
2309 if (!use->isInstruction())
2310 return true;
2312 if (!instructions.append(use->toInstruction()))
2313 return false;
2314 }
2316 // Sort the instructions to visit in increasing order.
2317 qsort(instructions.begin(), instructions.length(),
2318 sizeof(MInstruction *), CmpInstructions);
2320 // Find all exit blocks in the graph.
2321 Vector<MBasicBlock *> exitBlocks(cx);
2322 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
2323 if (!block->numSuccessors() && !exitBlocks.append(*block))
2324 return false;
2325 }
2327 for (size_t i = 0; i < instructions.length(); i++) {
2328 MInstruction *ins = instructions[i];
2330 // Track whether the use of |this| is in unconditional code, i.e.
2331 // the block dominates all graph exits.
2332 bool definitelyExecuted = true;
2333 for (size_t i = 0; i < exitBlocks.length(); i++) {
2334 for (MBasicBlock *exit = exitBlocks[i];
2335 exit != ins->block();
2336 exit = exit->immediateDominator())
2337 {
2338 if (exit == exit->immediateDominator()) {
2339 definitelyExecuted = false;
2340 break;
2341 }
2342 }
2343 }
2345 // Also check to see if the instruction is inside a loop body. Even if
2346 // an access will always execute in the script, if it executes multiple
2347 // times then we can get confused when rolling back objects while
2348 // clearing the new script information.
2349 if (ins->block()->loopDepth() != 0)
2350 definitelyExecuted = false;
2352 bool handled = false;
2353 if (!AnalyzePoppedThis(cx, type, thisValue, ins, definitelyExecuted,
2354 baseobj, initializerList, &accessedProperties, &handled))
2355 {
2356 return false;
2357 }
2358 if (!handled)
2359 return true;
2360 }
2362 return true;
2363 }
2365 static bool
2366 ArgumentsUseCanBeLazy(JSContext *cx, JSScript *script, MInstruction *ins, size_t index)
2367 {
2368 // We can read the frame's arguments directly for f.apply(x, arguments).
2369 if (ins->isCall()) {
2370 if (*ins->toCall()->resumePoint()->pc() == JSOP_FUNAPPLY &&
2371 ins->toCall()->numActualArgs() == 2 &&
2372 index == MCall::IndexOfArgument(1))
2373 {
2374 return true;
2375 }
2376 }
2378 // arguments[i] can read fp->canonicalActualArg(i) directly.
2379 if (ins->isCallGetElement() && index == 0)
2380 return true;
2382 // arguments.length length can read fp->numActualArgs() directly.
2383 if (ins->isCallGetProperty() && index == 0 && ins->toCallGetProperty()->name() == cx->names().length)
2384 return true;
2386 return false;
2387 }
2389 bool
2390 jit::AnalyzeArgumentsUsage(JSContext *cx, JSScript *scriptArg)
2391 {
2392 RootedScript script(cx, scriptArg);
2393 types::AutoEnterAnalysis enter(cx);
2395 JS_ASSERT(!script->analyzedArgsUsage());
2397 // Treat the script as needing an arguments object until we determine it
2398 // does not need one. This both allows us to easily see where the arguments
2399 // object can escape through assignments to the function's named arguments,
2400 // and also simplifies handling of early returns.
2401 script->setNeedsArgsObj(true);
2403 if (!jit::IsIonEnabled(cx) || !script->compileAndGo())
2404 return true;
2406 static const uint32_t MAX_SCRIPT_SIZE = 10000;
2407 if (script->length() > MAX_SCRIPT_SIZE)
2408 return true;
2410 if (!script->ensureHasTypes(cx))
2411 return false;
2413 LifoAlloc alloc(types::TypeZone::TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
2415 TempAllocator temp(&alloc);
2416 IonContext ictx(cx, &temp);
2418 if (!cx->compartment()->ensureJitCompartmentExists(cx))
2419 return false;
2421 MIRGraph graph(&temp);
2422 CompileInfo info(script, script->functionNonDelazifying(),
2423 /* osrPc = */ nullptr, /* constructing = */ false,
2424 ArgumentsUsageAnalysis,
2425 /* needsArgsObj = */ true);
2427 AutoTempAllocatorRooter root(cx, &temp);
2429 const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_Normal);
2431 types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp);
2432 if (!constraints)
2433 return false;
2435 BaselineInspector inspector(script);
2436 const JitCompileOptions options(cx);
2438 IonBuilder builder(nullptr, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
2439 &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
2441 if (!builder.build()) {
2442 if (builder.abortReason() == AbortReason_Alloc)
2443 return false;
2444 return true;
2445 }
2447 if (!SplitCriticalEdges(graph))
2448 return false;
2450 if (!RenumberBlocks(graph))
2451 return false;
2453 if (!BuildDominatorTree(graph))
2454 return false;
2456 if (!EliminatePhis(&builder, graph, AggressiveObservability))
2457 return false;
2459 MDefinition *argumentsValue = graph.begin()->getSlot(info.argsObjSlot());
2461 for (MUseDefIterator uses(argumentsValue); uses; uses++) {
2462 MDefinition *use = uses.def();
2464 // Don't track |arguments| through assignments to phis.
2465 if (!use->isInstruction())
2466 return true;
2468 if (!ArgumentsUseCanBeLazy(cx, script, use->toInstruction(), uses.index()))
2469 return true;
2470 }
2472 script->setNeedsArgsObj(false);
2473 return true;
2474 }