michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/IonAnalysis.h" michael@0: michael@0: #include "jsanalyze.h" michael@0: michael@0: #include "jit/BaselineInspector.h" michael@0: #include "jit/BaselineJIT.h" michael@0: #include "jit/Ion.h" michael@0: #include "jit/IonBuilder.h" michael@0: #include "jit/IonOptimizationLevels.h" michael@0: #include "jit/LIR.h" michael@0: #include "jit/Lowering.h" michael@0: #include "jit/MIRGraph.h" michael@0: michael@0: #include "jsinferinlines.h" michael@0: #include "jsobjinlines.h" michael@0: #include "jsopcodeinlines.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::DebugOnly; michael@0: michael@0: // A critical edge is an edge which is neither its successor's only predecessor michael@0: // nor its predecessor's only successor. Critical edges must be split to michael@0: // prevent copy-insertion and code motion from affecting other edges. michael@0: bool michael@0: jit::SplitCriticalEdges(MIRGraph &graph) michael@0: { michael@0: for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { michael@0: if (block->numSuccessors() < 2) michael@0: continue; michael@0: for (size_t i = 0; i < block->numSuccessors(); i++) { michael@0: MBasicBlock *target = block->getSuccessor(i); michael@0: if (target->numPredecessors() < 2) michael@0: continue; michael@0: michael@0: // Create a new block inheriting from the predecessor. michael@0: MBasicBlock *split = MBasicBlock::NewSplitEdge(graph, block->info(), *block); michael@0: if (!split) michael@0: return false; michael@0: split->setLoopDepth(block->loopDepth()); michael@0: graph.insertBlockAfter(*block, split); michael@0: split->end(MGoto::New(graph.alloc(), target)); michael@0: michael@0: block->replaceSuccessor(i, split); michael@0: target->replacePredecessor(*block, split); michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: // Operands to a resume point which are dead at the point of the resume can be michael@0: // replaced with undefined values. This analysis supports limited detection of michael@0: // dead operands, pruning those which are defined in the resume point's basic michael@0: // block and have no uses outside the block or at points later than the resume michael@0: // point. michael@0: // michael@0: // This is intended to ensure that extra resume points within a basic block michael@0: // will not artificially extend the lifetimes of any SSA values. This could michael@0: // otherwise occur if the new resume point captured a value which is created michael@0: // between the old and new resume point and is dead at the new resume point. michael@0: bool michael@0: jit::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph) michael@0: { michael@0: // If we are compiling try blocks, locals and arguments may be observable michael@0: // from catch or finally blocks (which Ion does not compile). For now just michael@0: // disable the pass in this case. michael@0: if (graph.hasTryBlock()) michael@0: return true; michael@0: michael@0: for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { michael@0: if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)")) michael@0: return false; michael@0: michael@0: // The logic below can get confused on infinite loops. michael@0: if (block->isLoopHeader() && block->backedge() == *block) michael@0: continue; michael@0: michael@0: for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) { michael@0: // No benefit to replacing constant operands with other constants. michael@0: if (ins->isConstant()) michael@0: continue; michael@0: michael@0: // Scanning uses does not give us sufficient information to tell michael@0: // where instructions that are involved in box/unbox operations or michael@0: // parameter passing might be live. Rewriting uses of these terms michael@0: // in resume points may affect the interpreter's behavior. Rather michael@0: // than doing a more sophisticated analysis, just ignore these. michael@0: if (ins->isUnbox() || ins->isParameter() || ins->isTypeBarrier() || ins->isComputeThis()) michael@0: continue; michael@0: michael@0: // If the instruction's behavior has been constant folded into a michael@0: // separate instruction, we can't determine precisely where the michael@0: // instruction becomes dead and can't eliminate its uses. michael@0: if (ins->isImplicitlyUsed()) michael@0: continue; michael@0: michael@0: // Check if this instruction's result is only used within the michael@0: // current block, and keep track of its last use in a definition michael@0: // (not resume point). This requires the instructions in the block michael@0: // to be numbered, ensured by running this immediately after alias michael@0: // analysis. michael@0: uint32_t maxDefinition = 0; michael@0: for (MUseDefIterator uses(*ins); uses; uses++) { michael@0: if (uses.def()->block() != *block || michael@0: uses.def()->isBox() || michael@0: uses.def()->isPhi()) michael@0: { michael@0: maxDefinition = UINT32_MAX; michael@0: break; michael@0: } michael@0: maxDefinition = Max(maxDefinition, uses.def()->id()); michael@0: } michael@0: if (maxDefinition == UINT32_MAX) michael@0: continue; michael@0: michael@0: // Walk the uses a second time, removing any in resume points after michael@0: // the last use in a definition. michael@0: for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) { michael@0: if (uses->consumer()->isDefinition()) { michael@0: uses++; michael@0: continue; michael@0: } michael@0: MResumePoint *mrp = uses->consumer()->toResumePoint(); michael@0: if (mrp->block() != *block || michael@0: !mrp->instruction() || michael@0: mrp->instruction() == *ins || michael@0: mrp->instruction()->id() <= maxDefinition) michael@0: { michael@0: uses++; michael@0: continue; michael@0: } michael@0: michael@0: // The operand is an uneliminable slot. This currently michael@0: // includes argument slots in non-strict scripts (due to being michael@0: // observable via Function.arguments). michael@0: if (!block->info().canOptimizeOutSlot(uses->index())) { michael@0: uses++; michael@0: continue; michael@0: } michael@0: michael@0: // Store an optimized out magic value in place of all dead michael@0: // resume point operands. Making any such substitution can in michael@0: // general alter the interpreter's behavior, even though the michael@0: // code is dead, as the interpreter will still execute opcodes michael@0: // whose effects cannot be observed. If the undefined value michael@0: // were to flow to, say, a dead property access the michael@0: // interpreter could throw an exception; we avoid this problem michael@0: // by removing dead operands before removing dead code. michael@0: MConstant *constant = MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT)); michael@0: block->insertBefore(*(block->begin()), constant); michael@0: uses = mrp->replaceOperand(uses, constant); michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: // Instructions are useless if they are unused and have no side effects. michael@0: // This pass eliminates useless instructions. michael@0: // The graph itself is unchanged. michael@0: bool michael@0: jit::EliminateDeadCode(MIRGenerator *mir, MIRGraph &graph) michael@0: { michael@0: // Traverse in postorder so that we hit uses before definitions. michael@0: // Traverse instruction list backwards for the same reason. michael@0: for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { michael@0: if (mir->shouldCancel("Eliminate Dead Code (main loop)")) michael@0: return false; michael@0: michael@0: // Remove unused instructions. michael@0: for (MInstructionReverseIterator inst = block->rbegin(); inst != block->rend(); ) { michael@0: if (!inst->isEffectful() && !inst->resumePoint() && michael@0: !inst->hasUses() && !inst->isGuard() && michael@0: !inst->isControlInstruction()) { michael@0: inst = block->discardAt(inst); michael@0: } else { michael@0: inst++; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static inline bool michael@0: IsPhiObservable(MPhi *phi, Observability observe) michael@0: { michael@0: // If the phi has uses which are not reflected in SSA, then behavior in the michael@0: // interpreter may be affected by removing the phi. michael@0: if (phi->isImplicitlyUsed()) michael@0: return true; michael@0: michael@0: // Check for uses of this phi node outside of other phi nodes. michael@0: // Note that, initially, we skip reading resume points, which we michael@0: // don't count as actual uses. If the only uses are resume points, michael@0: // then the SSA name is never consumed by the program. However, michael@0: // after optimizations have been performed, it's possible that the michael@0: // actual uses in the program have been (incorrectly) optimized michael@0: // away, so we must be more conservative and consider resume michael@0: // points as well. michael@0: switch (observe) { michael@0: case AggressiveObservability: michael@0: for (MUseDefIterator iter(phi); iter; iter++) { michael@0: if (!iter.def()->isPhi()) michael@0: return true; michael@0: } michael@0: break; michael@0: michael@0: case ConservativeObservability: michael@0: for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) { michael@0: if (!iter->consumer()->isDefinition() || michael@0: !iter->consumer()->toDefinition()->isPhi()) michael@0: return true; michael@0: } michael@0: break; michael@0: } michael@0: michael@0: uint32_t slot = phi->slot(); michael@0: CompileInfo &info = phi->block()->info(); michael@0: JSFunction *fun = info.funMaybeLazy(); michael@0: michael@0: // If the Phi is of the |this| value, it must always be observable. michael@0: if (fun && slot == info.thisSlot()) michael@0: return true; michael@0: michael@0: // If the function may need an arguments object, then make sure to michael@0: // preserve the scope chain, because it may be needed to construct the michael@0: // arguments object during bailout. If we've already created an arguments michael@0: // object (or got one via OSR), preserve that as well. michael@0: if (fun && info.hasArguments() && michael@0: (slot == info.scopeChainSlot() || slot == info.argsObjSlot())) michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: // The Phi is an uneliminable slot. Currently this includes argument slots michael@0: // in non-strict scripts (due to being observable via Function.arguments). michael@0: if (fun && !info.canOptimizeOutSlot(slot)) michael@0: return true; michael@0: michael@0: return false; michael@0: } michael@0: michael@0: // Handles cases like: michael@0: // x is phi(a, x) --> a michael@0: // x is phi(a, a) --> a michael@0: static inline MDefinition * michael@0: IsPhiRedundant(MPhi *phi) michael@0: { michael@0: MDefinition *first = phi->operandIfRedundant(); michael@0: if (first == nullptr) michael@0: return nullptr; michael@0: michael@0: // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi. michael@0: if (phi->isImplicitlyUsed()) michael@0: first->setImplicitlyUsedUnchecked(); michael@0: michael@0: return first; michael@0: } michael@0: michael@0: bool michael@0: jit::EliminatePhis(MIRGenerator *mir, MIRGraph &graph, michael@0: Observability observe) michael@0: { michael@0: // Eliminates redundant or unobservable phis from the graph. A michael@0: // redundant phi is something like b = phi(a, a) or b = phi(a, b), michael@0: // both of which can be replaced with a. An unobservable phi is michael@0: // one that whose value is never used in the program. michael@0: // michael@0: // Note that we must be careful not to eliminate phis representing michael@0: // values that the interpreter will require later. When the graph michael@0: // is first constructed, we can be more aggressive, because there michael@0: // is a greater correspondence between the CFG and the bytecode. michael@0: // After optimizations such as GVN have been performed, however, michael@0: // the bytecode and CFG may not correspond as closely to one michael@0: // another. In that case, we must be more conservative. The flag michael@0: // |conservativeObservability| is used to indicate that eliminate michael@0: // phis is being run after some optimizations have been performed, michael@0: // and thus we should use more conservative rules about michael@0: // observability. The particular danger is that we can optimize michael@0: // away uses of a phi because we think they are not executable, michael@0: // but the foundation for that assumption is false TI information michael@0: // that will eventually be invalidated. Therefore, if michael@0: // |conservativeObservability| is set, we will consider any use michael@0: // from a resume point to be observable. Otherwise, we demand a michael@0: // use from an actual instruction. michael@0: michael@0: Vector worklist; michael@0: michael@0: // Add all observable phis to a worklist. We use the "in worklist" bit to michael@0: // mean "this phi is live". michael@0: for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { michael@0: if (mir->shouldCancel("Eliminate Phis (populate loop)")) michael@0: return false; michael@0: michael@0: MPhiIterator iter = block->phisBegin(); michael@0: while (iter != block->phisEnd()) { michael@0: // Flag all as unused, only observable phis would be marked as used michael@0: // when processed by the work list. michael@0: iter->setUnused(); michael@0: michael@0: // If the phi is redundant, remove it here. michael@0: if (MDefinition *redundant = IsPhiRedundant(*iter)) { michael@0: iter->replaceAllUsesWith(redundant); michael@0: iter = block->discardPhiAt(iter); michael@0: continue; michael@0: } michael@0: michael@0: // Enqueue observable Phis. michael@0: if (IsPhiObservable(*iter, observe)) { michael@0: iter->setInWorklist(); michael@0: if (!worklist.append(*iter)) michael@0: return false; michael@0: } michael@0: iter++; michael@0: } michael@0: } michael@0: michael@0: // Iteratively mark all phis reachable from live phis. michael@0: while (!worklist.empty()) { michael@0: if (mir->shouldCancel("Eliminate Phis (worklist)")) michael@0: return false; michael@0: michael@0: MPhi *phi = worklist.popCopy(); michael@0: JS_ASSERT(phi->isUnused()); michael@0: phi->setNotInWorklist(); michael@0: michael@0: // The removal of Phis can produce newly redundant phis. michael@0: if (MDefinition *redundant = IsPhiRedundant(phi)) { michael@0: // Add to the worklist the used phis which are impacted. michael@0: for (MUseDefIterator it(phi); it; it++) { michael@0: if (it.def()->isPhi()) { michael@0: MPhi *use = it.def()->toPhi(); michael@0: if (!use->isUnused()) { michael@0: use->setUnusedUnchecked(); michael@0: use->setInWorklist(); michael@0: if (!worklist.append(use)) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: phi->replaceAllUsesWith(redundant); michael@0: } else { michael@0: // Otherwise flag them as used. michael@0: phi->setNotUnused(); michael@0: } michael@0: michael@0: // The current phi is/was used, so all its operands are used. michael@0: for (size_t i = 0, e = phi->numOperands(); i < e; i++) { michael@0: MDefinition *in = phi->getOperand(i); michael@0: if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) michael@0: continue; michael@0: in->setInWorklist(); michael@0: if (!worklist.append(in->toPhi())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Sweep dead phis. michael@0: for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) { michael@0: MPhiIterator iter = block->phisBegin(); michael@0: while (iter != block->phisEnd()) { michael@0: if (iter->isUnused()) michael@0: iter = block->discardPhiAt(iter); michael@0: else michael@0: iter++; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: namespace { michael@0: michael@0: // The type analysis algorithm inserts conversions and box/unbox instructions michael@0: // to make the IR graph well-typed for future passes. michael@0: // michael@0: // Phi adjustment: If a phi's inputs are all the same type, the phi is michael@0: // specialized to return that type. michael@0: // michael@0: // Input adjustment: Each input is asked to apply conversion operations to its michael@0: // inputs. This may include Box, Unbox, or other instruction-specific type michael@0: // conversion operations. michael@0: // michael@0: class TypeAnalyzer michael@0: { michael@0: MIRGenerator *mir; michael@0: MIRGraph &graph; michael@0: Vector phiWorklist_; michael@0: michael@0: TempAllocator &alloc() const { michael@0: return graph.alloc(); michael@0: } michael@0: michael@0: bool addPhiToWorklist(MPhi *phi) { michael@0: if (phi->isInWorklist()) michael@0: return true; michael@0: if (!phiWorklist_.append(phi)) michael@0: return false; michael@0: phi->setInWorklist(); michael@0: return true; michael@0: } michael@0: MPhi *popPhi() { michael@0: MPhi *phi = phiWorklist_.popCopy(); michael@0: phi->setNotInWorklist(); michael@0: return phi; michael@0: } michael@0: michael@0: bool respecialize(MPhi *phi, MIRType type); michael@0: bool propagateSpecialization(MPhi *phi); michael@0: bool specializePhis(); michael@0: void replaceRedundantPhi(MPhi *phi); michael@0: void adjustPhiInputs(MPhi *phi); michael@0: bool adjustInputs(MDefinition *def); michael@0: bool insertConversions(); michael@0: michael@0: bool checkFloatCoherency(); michael@0: bool graphContainsFloat32(); michael@0: bool markPhiConsumers(); michael@0: bool markPhiProducers(); michael@0: bool specializeValidFloatOps(); michael@0: bool tryEmitFloatOperations(); michael@0: michael@0: public: michael@0: TypeAnalyzer(MIRGenerator *mir, MIRGraph &graph) michael@0: : mir(mir), graph(graph) michael@0: { } michael@0: michael@0: bool analyze(); michael@0: }; michael@0: michael@0: } /* anonymous namespace */ michael@0: michael@0: // Try to specialize this phi based on its non-cyclic inputs. michael@0: static MIRType michael@0: GuessPhiType(MPhi *phi, bool *hasInputsWithEmptyTypes) michael@0: { michael@0: #ifdef DEBUG michael@0: // Check that different magic constants aren't flowing together. Ignore michael@0: // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized michael@0: // away. michael@0: MIRType magicType = MIRType_None; michael@0: for (size_t i = 0; i < phi->numOperands(); i++) { michael@0: MDefinition *in = phi->getOperand(i); michael@0: if (in->type() == MIRType_MagicOptimizedArguments || michael@0: in->type() == MIRType_MagicHole || michael@0: in->type() == MIRType_MagicIsConstructing) michael@0: { michael@0: if (magicType == MIRType_None) michael@0: magicType = in->type(); michael@0: MOZ_ASSERT(magicType == in->type()); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: *hasInputsWithEmptyTypes = false; michael@0: michael@0: MIRType type = MIRType_None; michael@0: bool convertibleToFloat32 = false; michael@0: bool hasPhiInputs = false; michael@0: for (size_t i = 0, e = phi->numOperands(); i < e; i++) { michael@0: MDefinition *in = phi->getOperand(i); michael@0: if (in->isPhi()) { michael@0: hasPhiInputs = true; michael@0: if (!in->toPhi()->triedToSpecialize()) michael@0: continue; michael@0: if (in->type() == MIRType_None) { michael@0: // The operand is a phi we tried to specialize, but we were michael@0: // unable to guess its type. propagateSpecialization will michael@0: // propagate the type to this phi when it becomes known. michael@0: continue; michael@0: } michael@0: } michael@0: michael@0: // Ignore operands which we've never observed. michael@0: if (in->resultTypeSet() && in->resultTypeSet()->empty()) { michael@0: *hasInputsWithEmptyTypes = true; michael@0: continue; michael@0: } michael@0: michael@0: if (type == MIRType_None) { michael@0: type = in->type(); michael@0: if (in->canProduceFloat32()) michael@0: convertibleToFloat32 = true; michael@0: continue; michael@0: } michael@0: if (type != in->type()) { michael@0: if (convertibleToFloat32 && in->type() == MIRType_Float32) { michael@0: // If we only saw definitions that can be converted into Float32 before and michael@0: // encounter a Float32 value, promote previous values to Float32 michael@0: type = MIRType_Float32; michael@0: } else if (IsNumberType(type) && IsNumberType(in->type())) { michael@0: // Specialize phis with int32 and double operands as double. michael@0: type = MIRType_Double; michael@0: convertibleToFloat32 &= in->canProduceFloat32(); michael@0: } else { michael@0: return MIRType_Value; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (type == MIRType_None && !hasPhiInputs) { michael@0: // All inputs are non-phis with empty typesets. Use MIRType_Value michael@0: // in this case, as it's impossible to get better type information. michael@0: JS_ASSERT(*hasInputsWithEmptyTypes); michael@0: type = MIRType_Value; michael@0: } michael@0: michael@0: return type; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::respecialize(MPhi *phi, MIRType type) michael@0: { michael@0: if (phi->type() == type) michael@0: return true; michael@0: phi->specialize(type); michael@0: return addPhiToWorklist(phi); michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::propagateSpecialization(MPhi *phi) michael@0: { michael@0: JS_ASSERT(phi->type() != MIRType_None); michael@0: michael@0: // Verify that this specialization matches any phis depending on it. michael@0: for (MUseDefIterator iter(phi); iter; iter++) { michael@0: if (!iter.def()->isPhi()) michael@0: continue; michael@0: MPhi *use = iter.def()->toPhi(); michael@0: if (!use->triedToSpecialize()) michael@0: continue; michael@0: if (use->type() == MIRType_None) { michael@0: // We tried to specialize this phi, but were unable to guess its michael@0: // type. Now that we know the type of one of its operands, we can michael@0: // specialize it. michael@0: if (!respecialize(use, phi->type())) michael@0: return false; michael@0: continue; michael@0: } michael@0: if (use->type() != phi->type()) { michael@0: // Specialize phis with int32 that can be converted to float and float operands as floats. michael@0: if ((use->type() == MIRType_Int32 && use->canProduceFloat32() && phi->type() == MIRType_Float32) || michael@0: (phi->type() == MIRType_Int32 && phi->canProduceFloat32() && use->type() == MIRType_Float32)) michael@0: { michael@0: if (!respecialize(use, MIRType_Float32)) michael@0: return false; michael@0: continue; michael@0: } michael@0: michael@0: // Specialize phis with int32 and double operands as double. michael@0: if (IsNumberType(use->type()) && IsNumberType(phi->type())) { michael@0: if (!respecialize(use, MIRType_Double)) michael@0: return false; michael@0: continue; michael@0: } michael@0: michael@0: // This phi in our use chain can now no longer be specialized. michael@0: if (!respecialize(use, MIRType_Value)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::specializePhis() michael@0: { michael@0: Vector phisWithEmptyInputTypes; michael@0: michael@0: for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) { michael@0: if (mir->shouldCancel("Specialize Phis (main loop)")) michael@0: return false; michael@0: michael@0: for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) { michael@0: bool hasInputsWithEmptyTypes; michael@0: MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes); michael@0: phi->specialize(type); michael@0: if (type == MIRType_None) { michael@0: // We tried to guess the type but failed because all operands are michael@0: // phis we still have to visit. Set the triedToSpecialize flag but michael@0: // don't propagate the type to other phis, propagateSpecialization michael@0: // will do that once we know the type of one of the operands. michael@0: michael@0: // Edge case: when this phi has a non-phi input with an empty michael@0: // typeset, it's possible for two phis to have a cyclic michael@0: // dependency and they will both have MIRType_None. Specialize michael@0: // such phis to MIRType_Value later on. michael@0: if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi)) michael@0: return false; michael@0: continue; michael@0: } michael@0: if (!propagateSpecialization(*phi)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: do { michael@0: while (!phiWorklist_.empty()) { michael@0: if (mir->shouldCancel("Specialize Phis (worklist)")) michael@0: return false; michael@0: michael@0: MPhi *phi = popPhi(); michael@0: if (!propagateSpecialization(phi)) michael@0: return false; michael@0: } michael@0: michael@0: // When two phis have a cyclic dependency and inputs that have an empty michael@0: // typeset (which are ignored by GuessPhiType), we may still have to michael@0: // specialize these to MIRType_Value. michael@0: while (!phisWithEmptyInputTypes.empty()) { michael@0: if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)")) michael@0: return false; michael@0: michael@0: MPhi *phi = phisWithEmptyInputTypes.popCopy(); michael@0: if (phi->type() == MIRType_None) { michael@0: phi->specialize(MIRType_Value); michael@0: if (!propagateSpecialization(phi)) michael@0: return false; michael@0: } michael@0: } michael@0: } while (!phiWorklist_.empty()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: TypeAnalyzer::adjustPhiInputs(MPhi *phi) michael@0: { michael@0: MIRType phiType = phi->type(); michael@0: JS_ASSERT(phiType != MIRType_None); michael@0: michael@0: // If we specialized a type that's not Value, there are 3 cases: michael@0: // 1. Every input is of that type. michael@0: // 2. Every observed input is of that type (i.e., some inputs haven't been executed yet). michael@0: // 3. Inputs were doubles and int32s, and was specialized to double. michael@0: if (phiType != MIRType_Value) { michael@0: for (size_t i = 0, e = phi->numOperands(); i < e; i++) { michael@0: MDefinition *in = phi->getOperand(i); michael@0: if (in->type() == phiType) michael@0: continue; michael@0: michael@0: if (in->isBox() && in->toBox()->input()->type() == phiType) { michael@0: phi->replaceOperand(i, in->toBox()->input()); michael@0: } else { michael@0: MInstruction *replacement; michael@0: michael@0: if (phiType == MIRType_Double && IsFloatType(in->type())) { michael@0: // Convert int32 operands to double. michael@0: replacement = MToDouble::New(alloc(), in); michael@0: } else if (phiType == MIRType_Float32) { michael@0: if (in->type() == MIRType_Int32 || in->type() == MIRType_Double) { michael@0: replacement = MToFloat32::New(alloc(), in); michael@0: } else { michael@0: // See comment below michael@0: if (in->type() != MIRType_Value) { michael@0: MBox *box = MBox::New(alloc(), in); michael@0: in->block()->insertBefore(in->block()->lastIns(), box); michael@0: in = box; michael@0: } michael@0: michael@0: MUnbox *unbox = MUnbox::New(alloc(), in, MIRType_Double, MUnbox::Fallible); michael@0: in->block()->insertBefore(in->block()->lastIns(), unbox); michael@0: replacement = MToFloat32::New(alloc(), in); michael@0: } michael@0: } else { michael@0: // If we know this branch will fail to convert to phiType, michael@0: // insert a box that'll immediately fail in the fallible unbox michael@0: // below. michael@0: if (in->type() != MIRType_Value) { michael@0: MBox *box = MBox::New(alloc(), in); michael@0: in->block()->insertBefore(in->block()->lastIns(), box); michael@0: in = box; michael@0: } michael@0: michael@0: // Be optimistic and insert unboxes when the operand is a michael@0: // value. michael@0: replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible); michael@0: } michael@0: michael@0: in->block()->insertBefore(in->block()->lastIns(), replacement); michael@0: phi->replaceOperand(i, replacement); michael@0: } michael@0: } michael@0: michael@0: return; michael@0: } michael@0: michael@0: // Box every typed input. michael@0: for (size_t i = 0, e = phi->numOperands(); i < e; i++) { michael@0: MDefinition *in = phi->getOperand(i); michael@0: if (in->type() == MIRType_Value) michael@0: continue; michael@0: michael@0: if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input())) { michael@0: // The input is being explicitly unboxed, so sneak past and grab michael@0: // the original box. michael@0: phi->replaceOperand(i, in->toUnbox()->input()); michael@0: } else { michael@0: MDefinition *box = BoxInputsPolicy::alwaysBoxAt(alloc(), in->block()->lastIns(), in); michael@0: phi->replaceOperand(i, box); michael@0: } michael@0: } michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::adjustInputs(MDefinition *def) michael@0: { michael@0: TypePolicy *policy = def->typePolicy(); michael@0: if (policy && !policy->adjustInputs(alloc(), def->toInstruction())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: TypeAnalyzer::replaceRedundantPhi(MPhi *phi) michael@0: { michael@0: MBasicBlock *block = phi->block(); michael@0: js::Value v; michael@0: switch (phi->type()) { michael@0: case MIRType_Undefined: michael@0: v = UndefinedValue(); michael@0: break; michael@0: case MIRType_Null: michael@0: v = NullValue(); michael@0: break; michael@0: case MIRType_MagicOptimizedArguments: michael@0: v = MagicValue(JS_OPTIMIZED_ARGUMENTS); michael@0: break; michael@0: case MIRType_MagicOptimizedOut: michael@0: v = MagicValue(JS_OPTIMIZED_OUT); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected type"); michael@0: } michael@0: MConstant *c = MConstant::New(alloc(), v); michael@0: // The instruction pass will insert the box michael@0: block->insertBefore(*(block->begin()), c); michael@0: phi->replaceAllUsesWith(c); michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::insertConversions() michael@0: { michael@0: // Instructions are processed in reverse postorder: all uses are defs are michael@0: // seen before uses. This ensures that output adjustment (which may rewrite michael@0: // inputs of uses) does not conflict with input adjustment. michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { michael@0: if (mir->shouldCancel("Insert Conversions")) michael@0: return false; michael@0: michael@0: for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd();) { michael@0: if (phi->type() == MIRType_Undefined || michael@0: phi->type() == MIRType_Null || michael@0: phi->type() == MIRType_MagicOptimizedArguments || michael@0: phi->type() == MIRType_MagicOptimizedOut) michael@0: { michael@0: replaceRedundantPhi(*phi); michael@0: phi = block->discardPhiAt(phi); michael@0: } else { michael@0: adjustPhiInputs(*phi); michael@0: phi++; michael@0: } michael@0: } michael@0: for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { michael@0: if (!adjustInputs(*iter)) michael@0: return false; michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: // This function tries to emit Float32 specialized operations whenever it's possible. michael@0: // MIR nodes are flagged as: michael@0: // - Producers, when they can create Float32 that might need to be coerced into a Double. michael@0: // Loads in Float32 arrays and conversions to Float32 are producers. michael@0: // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32. michael@0: // Stores in Float32 arrays and conversions to Float32 are consumers. michael@0: // - Float32 commutative, when using the Float32 instruction instead of the Double instruction michael@0: // does not result in a compound loss of precision. This is the case for +, -, /, * with 2 michael@0: // operands, for instance. However, an addition with 3 operands is not commutative anymore, michael@0: // so an intermediate coercion is needed. michael@0: // Except for phis, all these flags are known after Ion building, so they cannot change during michael@0: // the process. michael@0: // michael@0: // The idea behind the algorithm is easy: whenever we can prove that a commutative operation michael@0: // has only producers as inputs and consumers as uses, we can specialize the operation as a michael@0: // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even michael@0: // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones. michael@0: // michael@0: // Phis have a special status. Phis need to be flagged as producers or consumers as they can michael@0: // be inputs or outputs of commutative instructions. Fortunately, producers and consumers michael@0: // properties are such that we can deduce the property using all non phis inputs first (which form michael@0: // an initial phi graph) and then propagate all properties from one phi to another using a michael@0: // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as michael@0: // many flagged phis as the previous iteration (so the worst steady state case is all phis being michael@0: // flagged as false). michael@0: // michael@0: // In a nutshell, the algorithm applies three passes: michael@0: // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on michael@0: // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation, michael@0: // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a michael@0: // consumer if all of its uses are consumers. michael@0: // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply michael@0: // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers. michael@0: // 3 - Go through all commutative operations and ensure their inputs are all producers and their michael@0: // uses are all consumers. michael@0: bool michael@0: TypeAnalyzer::markPhiConsumers() michael@0: { michael@0: JS_ASSERT(phiWorklist_.empty()); michael@0: michael@0: // Iterate in postorder so worklist is initialized to RPO. michael@0: for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); ++block) { michael@0: if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Initial state")) michael@0: return false; michael@0: michael@0: for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) { michael@0: JS_ASSERT(!phi->isInWorklist()); michael@0: bool canConsumeFloat32 = true; michael@0: for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) { michael@0: MDefinition *usedef = use.def(); michael@0: canConsumeFloat32 &= usedef->isPhi() || usedef->canConsumeFloat32(use.use()); michael@0: } michael@0: phi->setCanConsumeFloat32(canConsumeFloat32); michael@0: if (canConsumeFloat32 && !addPhiToWorklist(*phi)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: while (!phiWorklist_.empty()) { michael@0: if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Fixed point")) michael@0: return false; michael@0: michael@0: MPhi *phi = popPhi(); michael@0: JS_ASSERT(phi->canConsumeFloat32(nullptr /* unused */)); michael@0: michael@0: bool validConsumer = true; michael@0: for (MUseDefIterator use(phi); use; use++) { michael@0: MDefinition *def = use.def(); michael@0: if (def->isPhi() && !def->canConsumeFloat32(use.use())) { michael@0: validConsumer = false; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (validConsumer) michael@0: continue; michael@0: michael@0: // Propagate invalidated phis michael@0: phi->setCanConsumeFloat32(false); michael@0: for (size_t i = 0, e = phi->numOperands(); i < e; ++i) { michael@0: MDefinition *input = phi->getOperand(i); michael@0: if (input->isPhi() && !input->isInWorklist() && input->canConsumeFloat32(nullptr /* unused */)) michael@0: { michael@0: if (!addPhiToWorklist(input->toPhi())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::markPhiProducers() michael@0: { michael@0: JS_ASSERT(phiWorklist_.empty()); michael@0: michael@0: // Iterate in reverse postorder so worklist is initialized to PO. michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) { michael@0: if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - initial state")) michael@0: return false; michael@0: michael@0: for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) { michael@0: JS_ASSERT(!phi->isInWorklist()); michael@0: bool canProduceFloat32 = true; michael@0: for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e; ++i) { michael@0: MDefinition *input = phi->getOperand(i); michael@0: canProduceFloat32 &= input->isPhi() || input->canProduceFloat32(); michael@0: } michael@0: phi->setCanProduceFloat32(canProduceFloat32); michael@0: if (canProduceFloat32 && !addPhiToWorklist(*phi)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: while (!phiWorklist_.empty()) { michael@0: if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - Fixed point")) michael@0: return false; michael@0: michael@0: MPhi *phi = popPhi(); michael@0: JS_ASSERT(phi->canProduceFloat32()); michael@0: michael@0: bool validProducer = true; michael@0: for (size_t i = 0, e = phi->numOperands(); i < e; ++i) { michael@0: MDefinition *input = phi->getOperand(i); michael@0: if (input->isPhi() && !input->canProduceFloat32()) { michael@0: validProducer = false; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (validProducer) michael@0: continue; michael@0: michael@0: // Propagate invalidated phis michael@0: phi->setCanProduceFloat32(false); michael@0: for (MUseDefIterator use(phi); use; use++) { michael@0: MDefinition *def = use.def(); michael@0: if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32()) michael@0: { michael@0: if (!addPhiToWorklist(def->toPhi())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::specializeValidFloatOps() michael@0: { michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) { michael@0: if (mir->shouldCancel("Ensure Float32 commutativity - Instructions")) michael@0: return false; michael@0: michael@0: for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) { michael@0: if (!ins->isFloat32Commutative()) michael@0: continue; michael@0: michael@0: if (ins->type() == MIRType_Float32) michael@0: continue; michael@0: michael@0: // This call will try to specialize the instruction iff all uses are consumers and michael@0: // all inputs are producers. michael@0: ins->trySpecializeFloat32(alloc()); michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::graphContainsFloat32() michael@0: { michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) { michael@0: if (mir->shouldCancel("Ensure Float32 commutativity - Graph contains Float32")) michael@0: return false; michael@0: michael@0: for (MDefinitionIterator def(*block); def; def++) { michael@0: if (def->type() == MIRType_Float32) michael@0: return true; michael@0: } michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::tryEmitFloatOperations() michael@0: { michael@0: // Backends that currently don't know how to generate Float32 specialized instructions michael@0: // shouldn't run this pass and just let all instructions as specialized for Double. michael@0: if (!LIRGenerator::allowFloat32Optimizations()) michael@0: return true; michael@0: michael@0: // Asm.js uses the ahead of time type checks to specialize operations, no need to check michael@0: // them again at this point. michael@0: if (mir->compilingAsmJS()) michael@0: return true; michael@0: michael@0: // Check ahead of time that there is at least one definition typed as Float32, otherwise we michael@0: // don't need this pass. michael@0: if (!graphContainsFloat32()) michael@0: return true; michael@0: michael@0: if (!markPhiConsumers()) michael@0: return false; michael@0: if (!markPhiProducers()) michael@0: return false; michael@0: if (!specializeValidFloatOps()) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::checkFloatCoherency() michael@0: { michael@0: #ifdef DEBUG michael@0: // Asserts that all Float32 instructions are flowing into Float32 consumers or specialized michael@0: // operations michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) { michael@0: if (mir->shouldCancel("Check Float32 coherency")) michael@0: return false; michael@0: michael@0: for (MDefinitionIterator def(*block); def; def++) { michael@0: if (def->type() != MIRType_Float32) michael@0: continue; michael@0: michael@0: for (MUseDefIterator use(*def); use; use++) { michael@0: MDefinition *consumer = use.def(); michael@0: JS_ASSERT(consumer->isConsistentFloat32Use(use.use())); michael@0: } michael@0: } michael@0: } michael@0: #endif michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: TypeAnalyzer::analyze() michael@0: { michael@0: if (!tryEmitFloatOperations()) michael@0: return false; michael@0: if (!specializePhis()) michael@0: return false; michael@0: if (!insertConversions()) michael@0: return false; michael@0: if (!checkFloatCoherency()) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: jit::ApplyTypeInformation(MIRGenerator *mir, MIRGraph &graph) michael@0: { michael@0: TypeAnalyzer analyzer(mir, graph); michael@0: michael@0: if (!analyzer.analyze()) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: jit::MakeMRegExpHoistable(MIRGraph &graph) michael@0: { michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { michael@0: for (MDefinitionIterator iter(*block); iter; iter++) { michael@0: if (!iter->isRegExp()) michael@0: continue; michael@0: michael@0: MRegExp *regexp = iter->toRegExp(); michael@0: michael@0: // Test if MRegExp is hoistable by looking at all uses. michael@0: bool hoistable = true; michael@0: for (MUseIterator i = regexp->usesBegin(); i != regexp->usesEnd(); i++) { michael@0: // Ignore resume points. At this point all uses are listed. michael@0: // No DCE or GVN or something has happened. michael@0: if (i->consumer()->isResumePoint()) michael@0: continue; michael@0: michael@0: JS_ASSERT(i->consumer()->isDefinition()); michael@0: michael@0: // All MRegExp* MIR's don't adjust the regexp. michael@0: MDefinition *use = i->consumer()->toDefinition(); michael@0: if (use->isRegExpReplace()) michael@0: continue; michael@0: if (use->isRegExpExec()) michael@0: continue; michael@0: if (use->isRegExpTest()) michael@0: continue; michael@0: michael@0: hoistable = false; michael@0: break; michael@0: } michael@0: michael@0: if (!hoistable) michael@0: continue; michael@0: michael@0: // Make MRegExp hoistable michael@0: regexp->setMovable(); michael@0: michael@0: // That would be incorrect for global/sticky, because lastIndex could be wrong. michael@0: // Therefore setting the lastIndex to 0. That is faster than a not movable regexp. michael@0: RegExpObject *source = regexp->source(); michael@0: if (source->sticky() || source->global()) { michael@0: JS_ASSERT(regexp->mustClone()); michael@0: MConstant *zero = MConstant::New(graph.alloc(), Int32Value(0)); michael@0: regexp->block()->insertAfter(regexp, zero); michael@0: michael@0: MStoreFixedSlot *lastIndex = michael@0: MStoreFixedSlot::New(graph.alloc(), regexp, RegExpObject::lastIndexSlot(), zero); michael@0: regexp->block()->insertAfter(zero, lastIndex); michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: jit::RenumberBlocks(MIRGraph &graph) michael@0: { michael@0: size_t id = 0; michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) michael@0: block->setId(id++); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: // A Simple, Fast Dominance Algorithm by Cooper et al. michael@0: // Modified to support empty intersections for OSR, and in RPO. michael@0: static MBasicBlock * michael@0: IntersectDominators(MBasicBlock *block1, MBasicBlock *block2) michael@0: { michael@0: MBasicBlock *finger1 = block1; michael@0: MBasicBlock *finger2 = block2; michael@0: michael@0: JS_ASSERT(finger1); michael@0: JS_ASSERT(finger2); michael@0: michael@0: // In the original paper, the block ID comparisons are on the postorder index. michael@0: // This implementation iterates in RPO, so the comparisons are reversed. michael@0: michael@0: // For this function to be called, the block must have multiple predecessors. michael@0: // If a finger is then found to be self-dominating, it must therefore be michael@0: // reachable from multiple roots through non-intersecting control flow. michael@0: // nullptr is returned in this case, to denote an empty intersection. michael@0: michael@0: while (finger1->id() != finger2->id()) { michael@0: while (finger1->id() > finger2->id()) { michael@0: MBasicBlock *idom = finger1->immediateDominator(); michael@0: if (idom == finger1) michael@0: return nullptr; // Empty intersection. michael@0: finger1 = idom; michael@0: } michael@0: michael@0: while (finger2->id() > finger1->id()) { michael@0: MBasicBlock *idom = finger2->immediateDominator(); michael@0: if (idom == finger2) michael@0: return nullptr; // Empty intersection. michael@0: finger2 = idom; michael@0: } michael@0: } michael@0: return finger1; michael@0: } michael@0: michael@0: static void michael@0: ComputeImmediateDominators(MIRGraph &graph) michael@0: { michael@0: // The default start block is a root and therefore only self-dominates. michael@0: MBasicBlock *startBlock = *graph.begin(); michael@0: startBlock->setImmediateDominator(startBlock); michael@0: michael@0: // Any OSR block is a root and therefore only self-dominates. michael@0: MBasicBlock *osrBlock = graph.osrBlock(); michael@0: if (osrBlock) michael@0: osrBlock->setImmediateDominator(osrBlock); michael@0: michael@0: bool changed = true; michael@0: michael@0: while (changed) { michael@0: changed = false; michael@0: michael@0: ReversePostorderIterator block = graph.rpoBegin(); michael@0: michael@0: // For each block in RPO, intersect all dominators. michael@0: for (; block != graph.rpoEnd(); block++) { michael@0: // If a node has once been found to have no exclusive dominator, michael@0: // it will never have an exclusive dominator, so it may be skipped. michael@0: if (block->immediateDominator() == *block) michael@0: continue; michael@0: michael@0: MBasicBlock *newIdom = block->getPredecessor(0); michael@0: michael@0: // Find the first common dominator. michael@0: for (size_t i = 1; i < block->numPredecessors(); i++) { michael@0: MBasicBlock *pred = block->getPredecessor(i); michael@0: if (pred->immediateDominator() == nullptr) michael@0: continue; michael@0: michael@0: newIdom = IntersectDominators(pred, newIdom); michael@0: michael@0: // If there is no common dominator, the block self-dominates. michael@0: if (newIdom == nullptr) { michael@0: block->setImmediateDominator(*block); michael@0: changed = true; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (newIdom && block->immediateDominator() != newIdom) { michael@0: block->setImmediateDominator(newIdom); michael@0: changed = true; michael@0: } michael@0: } michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: // Assert that all blocks have dominator information. michael@0: for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { michael@0: JS_ASSERT(block->immediateDominator() != nullptr); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: bool michael@0: jit::BuildDominatorTree(MIRGraph &graph) michael@0: { michael@0: ComputeImmediateDominators(graph); michael@0: michael@0: // Traversing through the graph in post-order means that every use michael@0: // of a definition is visited before the def itself. Since a def michael@0: // dominates its uses, by the time we reach a particular michael@0: // block, we have processed all of its dominated children, so michael@0: // block->numDominated() is accurate. michael@0: for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) { michael@0: MBasicBlock *child = *i; michael@0: MBasicBlock *parent = child->immediateDominator(); michael@0: michael@0: // If the block only self-dominates, it has no definite parent. michael@0: if (child == parent) michael@0: continue; michael@0: michael@0: if (!parent->addImmediatelyDominatedBlock(child)) michael@0: return false; michael@0: michael@0: // An additional +1 for the child block. michael@0: parent->addNumDominated(child->numDominated() + 1); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: // If compiling with OSR, many blocks will self-dominate. michael@0: // Without OSR, there is only one root block which dominates all. michael@0: if (!graph.osrBlock()) michael@0: JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1); michael@0: #endif michael@0: // Now, iterate through the dominator tree and annotate every michael@0: // block with its index in the pre-order traversal of the michael@0: // dominator tree. michael@0: Vector worklist(graph.alloc()); michael@0: michael@0: // The index of the current block in the CFG traversal. michael@0: size_t index = 0; michael@0: michael@0: // Add all self-dominating blocks to the worklist. michael@0: // This includes all roots. Order does not matter. michael@0: for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { michael@0: MBasicBlock *block = *i; michael@0: if (block->immediateDominator() == block) { michael@0: if (!worklist.append(block)) michael@0: return false; michael@0: } michael@0: } michael@0: // Starting from each self-dominating block, traverse the CFG in pre-order. michael@0: while (!worklist.empty()) { michael@0: MBasicBlock *block = worklist.popCopy(); michael@0: block->setDomIndex(index); michael@0: michael@0: if (!worklist.append(block->immediatelyDominatedBlocksBegin(), michael@0: block->immediatelyDominatedBlocksEnd())) { michael@0: return false; michael@0: } michael@0: index++; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: jit::BuildPhiReverseMapping(MIRGraph &graph) michael@0: { michael@0: // Build a mapping such that given a basic block, whose successor has one or michael@0: // more phis, we can find our specific input to that phi. To make this fast michael@0: // mapping work we rely on a specific property of our structured control michael@0: // flow graph: For a block with phis, its predecessors each have only one michael@0: // successor with phis. Consider each case: michael@0: // * Blocks with less than two predecessors cannot have phis. michael@0: // * Breaks. A break always has exactly one successor, and the break michael@0: // catch block has exactly one predecessor for each break, as michael@0: // well as a final predecessor for the actual loop exit. michael@0: // * Continues. A continue always has exactly one successor, and the michael@0: // continue catch block has exactly one predecessor for each michael@0: // continue, as well as a final predecessor for the actual michael@0: // loop continuation. The continue itself has exactly one michael@0: // successor. michael@0: // * An if. Each branch as exactly one predecessor. michael@0: // * A switch. Each branch has exactly one predecessor. michael@0: // * Loop tail. A new block is always created for the exit, and if a michael@0: // break statement is present, the exit block will forward michael@0: // directly to the break block. michael@0: for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { michael@0: if (block->numPredecessors() < 2) { michael@0: JS_ASSERT(block->phisEmpty()); michael@0: continue; michael@0: } michael@0: michael@0: // Assert on the above. michael@0: for (size_t j = 0; j < block->numPredecessors(); j++) { michael@0: MBasicBlock *pred = block->getPredecessor(j); michael@0: michael@0: #ifdef DEBUG michael@0: size_t numSuccessorsWithPhis = 0; michael@0: for (size_t k = 0; k < pred->numSuccessors(); k++) { michael@0: MBasicBlock *successor = pred->getSuccessor(k); michael@0: if (!successor->phisEmpty()) michael@0: numSuccessorsWithPhis++; michael@0: } michael@0: JS_ASSERT(numSuccessorsWithPhis <= 1); michael@0: #endif michael@0: michael@0: pred->setSuccessorWithPhis(*block, j); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: static bool michael@0: CheckSuccessorImpliesPredecessor(MBasicBlock *A, MBasicBlock *B) michael@0: { michael@0: // Assuming B = succ(A), verify A = pred(B). michael@0: for (size_t i = 0; i < B->numPredecessors(); i++) { michael@0: if (A == B->getPredecessor(i)) michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: static bool michael@0: CheckPredecessorImpliesSuccessor(MBasicBlock *A, MBasicBlock *B) michael@0: { michael@0: // Assuming B = pred(A), verify A = succ(B). michael@0: for (size_t i = 0; i < B->numSuccessors(); i++) { michael@0: if (A == B->getSuccessor(i)) michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: static bool michael@0: CheckOperandImpliesUse(MNode *n, MDefinition *operand) michael@0: { michael@0: for (MUseIterator i = operand->usesBegin(); i != operand->usesEnd(); i++) { michael@0: if (i->consumer() == n) michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: static bool michael@0: CheckUseImpliesOperand(MDefinition *def, MUse *use) michael@0: { michael@0: return use->consumer()->getOperand(use->index()) == def; michael@0: } michael@0: #endif // DEBUG michael@0: michael@0: void michael@0: jit::AssertBasicGraphCoherency(MIRGraph &graph) michael@0: { michael@0: #ifdef DEBUG michael@0: JS_ASSERT(graph.entryBlock()->numPredecessors() == 0); michael@0: JS_ASSERT(graph.entryBlock()->phisEmpty()); michael@0: JS_ASSERT(!graph.entryBlock()->unreachable()); michael@0: michael@0: if (MBasicBlock *osrBlock = graph.osrBlock()) { michael@0: JS_ASSERT(osrBlock->numPredecessors() == 0); michael@0: JS_ASSERT(osrBlock->phisEmpty()); michael@0: JS_ASSERT(osrBlock != graph.entryBlock()); michael@0: JS_ASSERT(!osrBlock->unreachable()); michael@0: } michael@0: michael@0: if (MResumePoint *resumePoint = graph.entryResumePoint()) michael@0: JS_ASSERT(resumePoint->block() == graph.entryBlock()); michael@0: michael@0: // Assert successor and predecessor list coherency. michael@0: uint32_t count = 0; michael@0: for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { michael@0: count++; michael@0: michael@0: JS_ASSERT(&block->graph() == &graph); michael@0: michael@0: for (size_t i = 0; i < block->numSuccessors(); i++) michael@0: JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i))); michael@0: michael@0: for (size_t i = 0; i < block->numPredecessors(); i++) michael@0: JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i))); michael@0: michael@0: // Assert that use chains are valid for this instruction. michael@0: for (MDefinitionIterator iter(*block); iter; iter++) { michael@0: for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) michael@0: JS_ASSERT(CheckOperandImpliesUse(*iter, iter->getOperand(i))); michael@0: } michael@0: for (MResumePointIterator iter(block->resumePointsBegin()); iter != block->resumePointsEnd(); iter++) { michael@0: for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) { michael@0: if (iter->getUseFor(i)->hasProducer()) michael@0: JS_ASSERT(CheckOperandImpliesUse(*iter, iter->getOperand(i))); michael@0: } michael@0: } michael@0: for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) { michael@0: JS_ASSERT(phi->numOperands() == block->numPredecessors()); michael@0: } michael@0: for (MDefinitionIterator iter(*block); iter; iter++) { michael@0: JS_ASSERT(iter->block() == *block); michael@0: for (MUseIterator i(iter->usesBegin()); i != iter->usesEnd(); i++) michael@0: JS_ASSERT(CheckUseImpliesOperand(*iter, *i)); michael@0: michael@0: if (iter->isInstruction()) { michael@0: if (MResumePoint *resume = iter->toInstruction()->resumePoint()) { michael@0: if (MInstruction *ins = resume->instruction()) michael@0: JS_ASSERT(ins->block() == iter->block()); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: JS_ASSERT(graph.numBlocks() == count); michael@0: #endif michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: static void michael@0: AssertReversePostOrder(MIRGraph &graph) michael@0: { michael@0: // Check that every block is visited after all its predecessors (except backedges). michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { michael@0: JS_ASSERT(!block->isMarked()); michael@0: michael@0: for (size_t i = 0; i < block->numPredecessors(); i++) { michael@0: MBasicBlock *pred = block->getPredecessor(i); michael@0: JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked()); michael@0: } michael@0: michael@0: block->mark(); michael@0: } michael@0: michael@0: graph.unmarkBlocks(); michael@0: } michael@0: #endif michael@0: michael@0: void michael@0: jit::AssertGraphCoherency(MIRGraph &graph) michael@0: { michael@0: #ifdef DEBUG michael@0: if (!js_JitOptions.checkGraphConsistency) michael@0: return; michael@0: AssertBasicGraphCoherency(graph); michael@0: AssertReversePostOrder(graph); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: jit::AssertExtendedGraphCoherency(MIRGraph &graph) michael@0: { michael@0: // Checks the basic GraphCoherency but also other conditions that michael@0: // do not hold immediately (such as the fact that critical edges michael@0: // are split) michael@0: michael@0: #ifdef DEBUG michael@0: if (!js_JitOptions.checkGraphConsistency) michael@0: return; michael@0: AssertGraphCoherency(graph); michael@0: michael@0: uint32_t idx = 0; michael@0: for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { michael@0: JS_ASSERT(block->id() == idx++); michael@0: michael@0: // No critical edges: michael@0: if (block->numSuccessors() > 1) michael@0: for (size_t i = 0; i < block->numSuccessors(); i++) michael@0: JS_ASSERT(block->getSuccessor(i)->numPredecessors() == 1); michael@0: michael@0: if (block->isLoopHeader()) { michael@0: JS_ASSERT(block->numPredecessors() == 2); michael@0: MBasicBlock *backedge = block->getPredecessor(1); michael@0: JS_ASSERT(backedge->id() >= block->id()); michael@0: JS_ASSERT(backedge->numSuccessors() == 1); michael@0: JS_ASSERT(backedge->getSuccessor(0) == *block); michael@0: } michael@0: michael@0: if (!block->phisEmpty()) { michael@0: for (size_t i = 0; i < block->numPredecessors(); i++) { michael@0: MBasicBlock *pred = block->getPredecessor(i); michael@0: JS_ASSERT(pred->successorWithPhis() == *block); michael@0: JS_ASSERT(pred->positionInPhiSuccessor() == i); michael@0: } michael@0: } michael@0: michael@0: uint32_t successorWithPhis = 0; michael@0: for (size_t i = 0; i < block->numSuccessors(); i++) michael@0: if (!block->getSuccessor(i)->phisEmpty()) michael@0: successorWithPhis++; michael@0: michael@0: JS_ASSERT(successorWithPhis <= 1); michael@0: JS_ASSERT_IF(successorWithPhis, block->successorWithPhis() != nullptr); michael@0: michael@0: // I'd like to assert this, but it's not necc. true. Sometimes we set this michael@0: // flag to non-nullptr just because a successor has multiple preds, even if it michael@0: // does not actually have any phis. michael@0: // michael@0: // JS_ASSERT_IF(!successorWithPhis, block->successorWithPhis() == nullptr); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: michael@0: struct BoundsCheckInfo michael@0: { michael@0: MBoundsCheck *check; michael@0: uint32_t validUntil; michael@0: }; michael@0: michael@0: typedef HashMap, michael@0: IonAllocPolicy> BoundsCheckMap; michael@0: michael@0: // Compute a hash for bounds checks which ignores constant offsets in the index. michael@0: static HashNumber michael@0: BoundsCheckHashIgnoreOffset(MBoundsCheck *check) michael@0: { michael@0: SimpleLinearSum indexSum = ExtractLinearSum(check->index()); michael@0: uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0; michael@0: uintptr_t length = uintptr_t(check->length()); michael@0: return index ^ length; michael@0: } michael@0: michael@0: static MBoundsCheck * michael@0: FindDominatingBoundsCheck(BoundsCheckMap &checks, MBoundsCheck *check, size_t index) michael@0: { michael@0: // See the comment in ValueNumberer::findDominatingDef. michael@0: HashNumber hash = BoundsCheckHashIgnoreOffset(check); michael@0: BoundsCheckMap::Ptr p = checks.lookup(hash); michael@0: if (!p || index > p->value().validUntil) { michael@0: // We didn't find a dominating bounds check. michael@0: BoundsCheckInfo info; michael@0: info.check = check; michael@0: info.validUntil = index + check->block()->numDominated(); michael@0: michael@0: if(!checks.put(hash, info)) michael@0: return nullptr; michael@0: michael@0: return check; michael@0: } michael@0: michael@0: return p->value().check; michael@0: } michael@0: michael@0: // Extract a linear sum from ins, if possible (otherwise giving the sum 'ins + 0'). michael@0: SimpleLinearSum michael@0: jit::ExtractLinearSum(MDefinition *ins) michael@0: { michael@0: if (ins->isBeta()) michael@0: ins = ins->getOperand(0); michael@0: michael@0: if (ins->type() != MIRType_Int32) michael@0: return SimpleLinearSum(ins, 0); michael@0: michael@0: if (ins->isConstant()) { michael@0: const Value &v = ins->toConstant()->value(); michael@0: JS_ASSERT(v.isInt32()); michael@0: return SimpleLinearSum(nullptr, v.toInt32()); michael@0: } else if (ins->isAdd() || ins->isSub()) { michael@0: MDefinition *lhs = ins->getOperand(0); michael@0: MDefinition *rhs = ins->getOperand(1); michael@0: if (lhs->type() == MIRType_Int32 && rhs->type() == MIRType_Int32) { michael@0: SimpleLinearSum lsum = ExtractLinearSum(lhs); michael@0: SimpleLinearSum rsum = ExtractLinearSum(rhs); michael@0: michael@0: if (lsum.term && rsum.term) michael@0: return SimpleLinearSum(ins, 0); michael@0: michael@0: // Check if this is of the form + n, n + or - n. michael@0: if (ins->isAdd()) { michael@0: int32_t constant; michael@0: if (!SafeAdd(lsum.constant, rsum.constant, &constant)) michael@0: return SimpleLinearSum(ins, 0); michael@0: return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant); michael@0: } else if (lsum.term) { michael@0: int32_t constant; michael@0: if (!SafeSub(lsum.constant, rsum.constant, &constant)) michael@0: return SimpleLinearSum(ins, 0); michael@0: return SimpleLinearSum(lsum.term, constant); michael@0: } michael@0: } michael@0: } michael@0: michael@0: return SimpleLinearSum(ins, 0); michael@0: } michael@0: michael@0: // Extract a linear inequality holding when a boolean test goes in the michael@0: // specified direction, of the form 'lhs + lhsN <= rhs' (or >=). michael@0: bool michael@0: jit::ExtractLinearInequality(MTest *test, BranchDirection direction, michael@0: SimpleLinearSum *plhs, MDefinition **prhs, bool *plessEqual) michael@0: { michael@0: if (!test->getOperand(0)->isCompare()) michael@0: return false; michael@0: michael@0: MCompare *compare = test->getOperand(0)->toCompare(); michael@0: michael@0: MDefinition *lhs = compare->getOperand(0); michael@0: MDefinition *rhs = compare->getOperand(1); michael@0: michael@0: // TODO: optimize Compare_UInt32 michael@0: if (!compare->isInt32Comparison()) michael@0: return false; michael@0: michael@0: JS_ASSERT(lhs->type() == MIRType_Int32); michael@0: JS_ASSERT(rhs->type() == MIRType_Int32); michael@0: michael@0: JSOp jsop = compare->jsop(); michael@0: if (direction == FALSE_BRANCH) michael@0: jsop = NegateCompareOp(jsop); michael@0: michael@0: SimpleLinearSum lsum = ExtractLinearSum(lhs); michael@0: SimpleLinearSum rsum = ExtractLinearSum(rhs); michael@0: michael@0: if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant)) michael@0: return false; michael@0: michael@0: // Normalize operations to use <= or >=. michael@0: switch (jsop) { michael@0: case JSOP_LE: michael@0: *plessEqual = true; michael@0: break; michael@0: case JSOP_LT: michael@0: /* x < y ==> x + 1 <= y */ michael@0: if (!SafeAdd(lsum.constant, 1, &lsum.constant)) michael@0: return false; michael@0: *plessEqual = true; michael@0: break; michael@0: case JSOP_GE: michael@0: *plessEqual = false; michael@0: break; michael@0: case JSOP_GT: michael@0: /* x > y ==> x - 1 >= y */ michael@0: if (!SafeSub(lsum.constant, 1, &lsum.constant)) michael@0: return false; michael@0: *plessEqual = false; michael@0: break; michael@0: default: michael@0: return false; michael@0: } michael@0: michael@0: *plhs = lsum; michael@0: *prhs = rsum.term; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: TryEliminateBoundsCheck(BoundsCheckMap &checks, size_t blockIndex, MBoundsCheck *dominated, bool *eliminated) michael@0: { michael@0: JS_ASSERT(!*eliminated); michael@0: michael@0: // Replace all uses of the bounds check with the actual index. michael@0: // This is (a) necessary, because we can coalesce two different michael@0: // bounds checks and would otherwise use the wrong index and michael@0: // (b) helps register allocation. Note that this is safe since michael@0: // no other pass after bounds check elimination moves instructions. michael@0: dominated->replaceAllUsesWith(dominated->index()); michael@0: michael@0: if (!dominated->isMovable()) michael@0: return true; michael@0: michael@0: MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, dominated, blockIndex); michael@0: if (!dominating) michael@0: return false; michael@0: michael@0: if (dominating == dominated) { michael@0: // We didn't find a dominating bounds check. michael@0: return true; michael@0: } michael@0: michael@0: // We found two bounds checks with the same hash number, but we still have michael@0: // to make sure the lengths and index terms are equal. michael@0: if (dominating->length() != dominated->length()) michael@0: return true; michael@0: michael@0: SimpleLinearSum sumA = ExtractLinearSum(dominating->index()); michael@0: SimpleLinearSum sumB = ExtractLinearSum(dominated->index()); michael@0: michael@0: // Both terms should be nullptr or the same definition. michael@0: if (sumA.term != sumB.term) michael@0: return true; michael@0: michael@0: // This bounds check is redundant. michael@0: *eliminated = true; michael@0: michael@0: // Normalize the ranges according to the constant offsets in the two indexes. michael@0: int32_t minimumA, maximumA, minimumB, maximumB; michael@0: if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) || michael@0: !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) || michael@0: !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) || michael@0: !SafeAdd(sumB.constant, dominated->maximum(), &maximumB)) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: // Update the dominating check to cover both ranges, denormalizing the michael@0: // result per the constant offset in the index. michael@0: int32_t newMinimum, newMaximum; michael@0: if (!SafeSub(Min(minimumA, minimumB), sumA.constant, &newMinimum) || michael@0: !SafeSub(Max(maximumA, maximumB), sumA.constant, &newMaximum)) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: dominating->setMinimum(newMinimum); michael@0: dominating->setMaximum(newMaximum); michael@0: return true; michael@0: } michael@0: michael@0: static void michael@0: TryEliminateTypeBarrierFromTest(MTypeBarrier *barrier, bool filtersNull, bool filtersUndefined, michael@0: MTest *test, BranchDirection direction, bool *eliminated) michael@0: { michael@0: JS_ASSERT(filtersNull || filtersUndefined); michael@0: michael@0: // Watch for code patterns similar to 'if (x.f) { ... = x.f }'. If x.f michael@0: // is either an object or null/undefined, there will be a type barrier on michael@0: // the latter read as the null/undefined value is never realized there. michael@0: // The type barrier can be eliminated, however, by looking at tests michael@0: // performed on the result of the first operation that filter out all michael@0: // types that have been seen in the first access but not the second. michael@0: michael@0: // A test 'if (x.f)' filters both null and undefined. michael@0: michael@0: // Disregard the possible unbox added before the Typebarrier for checking. michael@0: MDefinition *input = barrier->input(); michael@0: MUnbox *inputUnbox = nullptr; michael@0: if (input->isUnbox() && input->toUnbox()->mode() != MUnbox::Fallible) { michael@0: inputUnbox = input->toUnbox(); michael@0: input = inputUnbox->input(); michael@0: } michael@0: michael@0: MDefinition *subject = nullptr; michael@0: bool removeUndefined; michael@0: bool removeNull; michael@0: test->filtersUndefinedOrNull(direction == TRUE_BRANCH, &subject, &removeUndefined, &removeNull); michael@0: michael@0: // The Test doesn't filter undefined nor null. michael@0: if (!subject) michael@0: return; michael@0: michael@0: // Make sure the subject equals the input to the TypeBarrier. michael@0: if (subject != input) michael@0: return; michael@0: michael@0: // When the TypeBarrier filters undefined, the test must at least also do, michael@0: // this, before the TypeBarrier can get removed. michael@0: if (!removeUndefined && filtersUndefined) michael@0: return; michael@0: michael@0: // When the TypeBarrier filters null, the test must at least also do, michael@0: // this, before the TypeBarrier can get removed. michael@0: if (!removeNull && filtersNull) michael@0: return; michael@0: michael@0: // Eliminate the TypeBarrier. The possible TypeBarrier unboxing is kept, michael@0: // but made infallible. michael@0: *eliminated = true; michael@0: if (inputUnbox) michael@0: inputUnbox->makeInfallible(); michael@0: barrier->replaceAllUsesWith(barrier->input()); michael@0: } michael@0: michael@0: static bool michael@0: TryEliminateTypeBarrier(MTypeBarrier *barrier, bool *eliminated) michael@0: { michael@0: JS_ASSERT(!*eliminated); michael@0: michael@0: const types::TemporaryTypeSet *barrierTypes = barrier->resultTypeSet(); michael@0: const types::TemporaryTypeSet *inputTypes = barrier->input()->resultTypeSet(); michael@0: michael@0: // Disregard the possible unbox added before the Typebarrier. michael@0: if (barrier->input()->isUnbox() && barrier->input()->toUnbox()->mode() != MUnbox::Fallible) michael@0: inputTypes = barrier->input()->toUnbox()->input()->resultTypeSet(); michael@0: michael@0: if (!barrierTypes || !inputTypes) michael@0: return true; michael@0: michael@0: bool filtersNull = barrierTypes->filtersType(inputTypes, types::Type::NullType()); michael@0: bool filtersUndefined = barrierTypes->filtersType(inputTypes, types::Type::UndefinedType()); michael@0: michael@0: if (!filtersNull && !filtersUndefined) michael@0: return true; michael@0: michael@0: MBasicBlock *block = barrier->block(); michael@0: while (true) { michael@0: BranchDirection direction; michael@0: MTest *test = block->immediateDominatorBranch(&direction); michael@0: michael@0: if (test) { michael@0: TryEliminateTypeBarrierFromTest(barrier, filtersNull, filtersUndefined, michael@0: test, direction, eliminated); michael@0: } michael@0: michael@0: MBasicBlock *previous = block->immediateDominator(); michael@0: if (previous == block) michael@0: break; michael@0: block = previous; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: // Eliminate checks which are redundant given each other or other instructions. michael@0: // michael@0: // A type barrier is considered redundant if all missing types have been tested michael@0: // for by earlier control instructions. michael@0: // michael@0: // A bounds check is considered redundant if it's dominated by another bounds michael@0: // check with the same length and the indexes differ by only a constant amount. michael@0: // In this case we eliminate the redundant bounds check and update the other one michael@0: // to cover the ranges of both checks. michael@0: // michael@0: // Bounds checks are added to a hash map and since the hash function ignores michael@0: // differences in constant offset, this offers a fast way to find redundant michael@0: // checks. michael@0: bool michael@0: jit::EliminateRedundantChecks(MIRGraph &graph) michael@0: { michael@0: BoundsCheckMap checks(graph.alloc()); michael@0: michael@0: if (!checks.init()) michael@0: return false; michael@0: michael@0: // Stack for pre-order CFG traversal. michael@0: Vector worklist(graph.alloc()); michael@0: michael@0: // The index of the current block in the CFG traversal. michael@0: size_t index = 0; michael@0: michael@0: // Add all self-dominating blocks to the worklist. michael@0: // This includes all roots. Order does not matter. michael@0: for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) { michael@0: MBasicBlock *block = *i; michael@0: if (block->immediateDominator() == block) { michael@0: if (!worklist.append(block)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Starting from each self-dominating block, traverse the CFG in pre-order. michael@0: while (!worklist.empty()) { michael@0: MBasicBlock *block = worklist.popCopy(); michael@0: michael@0: // Add all immediate dominators to the front of the worklist. michael@0: if (!worklist.append(block->immediatelyDominatedBlocksBegin(), michael@0: block->immediatelyDominatedBlocksEnd())) { michael@0: return false; michael@0: } michael@0: michael@0: for (MDefinitionIterator iter(block); iter; ) { michael@0: bool eliminated = false; michael@0: michael@0: if (iter->isBoundsCheck()) { michael@0: if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated)) michael@0: return false; michael@0: } else if (iter->isTypeBarrier()) { michael@0: if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated)) michael@0: return false; michael@0: } else if (iter->isConvertElementsToDoubles()) { michael@0: // Now that code motion passes have finished, replace any michael@0: // ConvertElementsToDoubles with the actual elements. michael@0: MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles(); michael@0: ins->replaceAllUsesWith(ins->elements()); michael@0: } michael@0: michael@0: if (eliminated) michael@0: iter = block->discardDefAt(iter); michael@0: else michael@0: iter++; michael@0: } michael@0: index++; michael@0: } michael@0: michael@0: JS_ASSERT(index == graph.numBlocks()); michael@0: return true; michael@0: } michael@0: michael@0: // If the given block contains a goto and nothing interesting before that, michael@0: // return the goto. Return nullptr otherwise. michael@0: static LGoto * michael@0: FindLeadingGoto(LBlock *bb) michael@0: { michael@0: for (LInstructionIterator ins(bb->begin()); ins != bb->end(); ins++) { michael@0: // Ignore labels. michael@0: if (ins->isLabel()) michael@0: continue; michael@0: // If we have a goto, we're good to go. michael@0: if (ins->isGoto()) michael@0: return ins->toGoto(); michael@0: break; michael@0: } michael@0: return nullptr; michael@0: } michael@0: michael@0: // Eliminate blocks containing nothing interesting besides gotos. These are michael@0: // often created by optimizer, which splits all critical edges. If these michael@0: // splits end up being unused after optimization and register allocation, michael@0: // fold them back away to avoid unnecessary branching. michael@0: bool michael@0: jit::UnsplitEdges(LIRGraph *lir) michael@0: { michael@0: for (size_t i = 0; i < lir->numBlocks(); i++) { michael@0: LBlock *bb = lir->getBlock(i); michael@0: MBasicBlock *mirBlock = bb->mir(); michael@0: michael@0: // Renumber the MIR blocks as we go, since we may remove some. michael@0: mirBlock->setId(i); michael@0: michael@0: // Register allocation is done by this point, so we don't need the phis michael@0: // anymore. Clear them to avoid needed to keep them current as we edit michael@0: // the CFG. michael@0: bb->clearPhis(); michael@0: mirBlock->discardAllPhis(); michael@0: michael@0: // First make sure the MIR block looks sane. Some of these checks may be michael@0: // over-conservative, but we're attempting to keep everything in MIR michael@0: // current as we modify the LIR, so only proceed if the MIR is simple. michael@0: if (mirBlock->numPredecessors() == 0 || mirBlock->numSuccessors() != 1 || michael@0: !mirBlock->begin()->isGoto()) michael@0: { michael@0: continue; michael@0: } michael@0: michael@0: // The MIR block is empty, but check the LIR block too (in case the michael@0: // register allocator inserted spill code there, or whatever). michael@0: LGoto *theGoto = FindLeadingGoto(bb); michael@0: if (!theGoto) michael@0: continue; michael@0: MBasicBlock *target = theGoto->target(); michael@0: if (target == mirBlock || target != mirBlock->getSuccessor(0)) michael@0: continue; michael@0: michael@0: // If we haven't yet cleared the phis for the successor, do so now so michael@0: // that the CFG manipulation routines don't trip over them. michael@0: if (!target->phisEmpty()) { michael@0: target->discardAllPhis(); michael@0: target->lir()->clearPhis(); michael@0: } michael@0: michael@0: // Edit the CFG to remove lir/mirBlock and reconnect all its edges. michael@0: for (size_t j = 0; j < mirBlock->numPredecessors(); j++) { michael@0: MBasicBlock *mirPred = mirBlock->getPredecessor(j); michael@0: michael@0: for (size_t k = 0; k < mirPred->numSuccessors(); k++) { michael@0: if (mirPred->getSuccessor(k) == mirBlock) { michael@0: mirPred->replaceSuccessor(k, target); michael@0: if (!target->addPredecessorWithoutPhis(mirPred)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: LInstruction *predTerm = *mirPred->lir()->rbegin(); michael@0: for (size_t k = 0; k < predTerm->numSuccessors(); k++) { michael@0: if (predTerm->getSuccessor(k) == mirBlock) michael@0: predTerm->setSuccessor(k, target); michael@0: } michael@0: } michael@0: target->removePredecessor(mirBlock); michael@0: michael@0: // Zap the block. michael@0: lir->removeBlock(i); michael@0: lir->mir().removeBlock(mirBlock); michael@0: --i; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: LinearSum::multiply(int32_t scale) michael@0: { michael@0: for (size_t i = 0; i < terms_.length(); i++) { michael@0: if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale)) michael@0: return false; michael@0: } michael@0: return SafeMul(scale, constant_, &constant_); michael@0: } michael@0: michael@0: bool michael@0: LinearSum::add(const LinearSum &other) michael@0: { michael@0: for (size_t i = 0; i < other.terms_.length(); i++) { michael@0: if (!add(other.terms_[i].term, other.terms_[i].scale)) michael@0: return false; michael@0: } michael@0: return add(other.constant_); michael@0: } michael@0: michael@0: bool michael@0: LinearSum::add(MDefinition *term, int32_t scale) michael@0: { michael@0: JS_ASSERT(term); michael@0: michael@0: if (scale == 0) michael@0: return true; michael@0: michael@0: if (term->isConstant()) { michael@0: int32_t constant = term->toConstant()->value().toInt32(); michael@0: if (!SafeMul(constant, scale, &constant)) michael@0: return false; michael@0: return add(constant); michael@0: } michael@0: michael@0: for (size_t i = 0; i < terms_.length(); i++) { michael@0: if (term == terms_[i].term) { michael@0: if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale)) michael@0: return false; michael@0: if (terms_[i].scale == 0) { michael@0: terms_[i] = terms_.back(); michael@0: terms_.popBack(); michael@0: } michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: terms_.append(LinearTerm(term, scale)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: LinearSum::add(int32_t constant) michael@0: { michael@0: return SafeAdd(constant, constant_, &constant_); michael@0: } michael@0: michael@0: void michael@0: LinearSum::print(Sprinter &sp) const michael@0: { michael@0: for (size_t i = 0; i < terms_.length(); i++) { michael@0: int32_t scale = terms_[i].scale; michael@0: int32_t id = terms_[i].term->id(); michael@0: JS_ASSERT(scale); michael@0: if (scale > 0) { michael@0: if (i) michael@0: sp.printf("+"); michael@0: if (scale == 1) michael@0: sp.printf("#%d", id); michael@0: else michael@0: sp.printf("%d*#%d", scale, id); michael@0: } else if (scale == -1) { michael@0: sp.printf("-#%d", id); michael@0: } else { michael@0: sp.printf("%d*#%d", scale, id); michael@0: } michael@0: } michael@0: if (constant_ > 0) michael@0: sp.printf("+%d", constant_); michael@0: else if (constant_ < 0) michael@0: sp.printf("%d", constant_); michael@0: } michael@0: michael@0: void michael@0: LinearSum::dump(FILE *fp) const michael@0: { michael@0: Sprinter sp(GetIonContext()->cx); michael@0: sp.init(); michael@0: print(sp); michael@0: fprintf(fp, "%s\n", sp.string()); michael@0: } michael@0: michael@0: void michael@0: LinearSum::dump() const michael@0: { michael@0: dump(stderr); michael@0: } michael@0: michael@0: static bool michael@0: AnalyzePoppedThis(JSContext *cx, types::TypeObject *type, michael@0: MDefinition *thisValue, MInstruction *ins, bool definitelyExecuted, michael@0: HandleObject baseobj, michael@0: Vector *initializerList, michael@0: Vector *accessedProperties, michael@0: bool *phandled) michael@0: { michael@0: // Determine the effect that a use of the |this| value when calling |new| michael@0: // on a script has on the properties definitely held by the new object. michael@0: michael@0: if (ins->isCallSetProperty()) { michael@0: MCallSetProperty *setprop = ins->toCallSetProperty(); michael@0: michael@0: if (setprop->object() != thisValue) michael@0: return true; michael@0: michael@0: // Don't use GetAtomId here, we need to watch for SETPROP on michael@0: // integer properties and bail out. We can't mark the aggregate michael@0: // JSID_VOID type property as being in a definite slot. michael@0: if (setprop->name() == cx->names().prototype || michael@0: setprop->name() == cx->names().proto || michael@0: setprop->name() == cx->names().constructor) michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: // Ignore assignments to properties that were already written to. michael@0: if (baseobj->nativeLookup(cx, NameToId(setprop->name()))) { michael@0: *phandled = true; michael@0: return true; michael@0: } michael@0: michael@0: // Don't add definite properties for properties that were already michael@0: // read in the constructor. michael@0: for (size_t i = 0; i < accessedProperties->length(); i++) { michael@0: if ((*accessedProperties)[i] == setprop->name()) michael@0: return true; michael@0: } michael@0: michael@0: // Don't add definite properties to an object which won't fit in its michael@0: // fixed slots. michael@0: if (GetGCKindSlots(gc::GetGCObjectKind(baseobj->slotSpan() + 1)) <= baseobj->slotSpan()) michael@0: return true; michael@0: michael@0: // Assignments to new properties must always execute. michael@0: if (!definitelyExecuted) michael@0: return true; michael@0: michael@0: RootedId id(cx, NameToId(setprop->name())); michael@0: if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) { michael@0: // The prototype chain already contains a getter/setter for this michael@0: // property, or type information is too imprecise. michael@0: return true; michael@0: } michael@0: michael@0: DebugOnly slotSpan = baseobj->slotSpan(); michael@0: if (!DefineNativeProperty(cx, baseobj, id, UndefinedHandleValue, nullptr, nullptr, michael@0: JSPROP_ENUMERATE)) michael@0: { michael@0: return false; michael@0: } michael@0: JS_ASSERT(baseobj->slotSpan() != slotSpan); michael@0: JS_ASSERT(!baseobj->inDictionaryMode()); michael@0: michael@0: Vector callerResumePoints(cx); michael@0: MBasicBlock *block = ins->block(); michael@0: for (MResumePoint *rp = block->callerResumePoint(); michael@0: rp; michael@0: block = rp->block(), rp = block->callerResumePoint()) michael@0: { michael@0: JSScript *script = rp->block()->info().script(); michael@0: if (!types::AddClearDefiniteFunctionUsesInScript(cx, type, script, block->info().script())) michael@0: return true; michael@0: if (!callerResumePoints.append(rp)) michael@0: return false; michael@0: } michael@0: michael@0: for (int i = callerResumePoints.length() - 1; i >= 0; i--) { michael@0: MResumePoint *rp = callerResumePoints[i]; michael@0: JSScript *script = rp->block()->info().script(); michael@0: types::TypeNewScript::Initializer entry(types::TypeNewScript::Initializer::SETPROP_FRAME, michael@0: script->pcToOffset(rp->pc())); michael@0: if (!initializerList->append(entry)) michael@0: return false; michael@0: } michael@0: michael@0: JSScript *script = ins->block()->info().script(); michael@0: types::TypeNewScript::Initializer entry(types::TypeNewScript::Initializer::SETPROP, michael@0: script->pcToOffset(setprop->resumePoint()->pc())); michael@0: if (!initializerList->append(entry)) michael@0: return false; michael@0: michael@0: *phandled = true; michael@0: return true; michael@0: } michael@0: michael@0: if (ins->isCallGetProperty()) { michael@0: MCallGetProperty *get = ins->toCallGetProperty(); michael@0: michael@0: /* michael@0: * Properties can be read from the 'this' object if the following hold: michael@0: * michael@0: * - The read is not on a getter along the prototype chain, which michael@0: * could cause 'this' to escape. michael@0: * michael@0: * - The accessed property is either already a definite property or michael@0: * is not later added as one. Since the definite properties are michael@0: * added to the object at the point of its creation, reading a michael@0: * definite property before it is assigned could incorrectly hit. michael@0: */ michael@0: RootedId id(cx, NameToId(get->name())); michael@0: if (!baseobj->nativeLookup(cx, id) && !accessedProperties->append(get->name())) michael@0: return false; michael@0: michael@0: if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) { michael@0: // The |this| value can escape if any property reads it does go michael@0: // through a getter. michael@0: return true; michael@0: } michael@0: michael@0: *phandled = true; michael@0: return true; michael@0: } michael@0: michael@0: if (ins->isPostWriteBarrier()) { michael@0: *phandled = true; michael@0: return true; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static int michael@0: CmpInstructions(const void *a, const void *b) michael@0: { michael@0: return (*static_cast(a))->id() - michael@0: (*static_cast(b))->id(); michael@0: } michael@0: michael@0: bool michael@0: jit::AnalyzeNewScriptProperties(JSContext *cx, JSFunction *fun, michael@0: types::TypeObject *type, HandleObject baseobj, michael@0: Vector *initializerList) michael@0: { michael@0: JS_ASSERT(cx->compartment()->activeAnalysis); michael@0: michael@0: // When invoking 'new' on the specified script, try to find some properties michael@0: // which will definitely be added to the created object before it has a michael@0: // chance to escape and be accessed elsewhere. michael@0: michael@0: RootedScript script(cx, fun->getOrCreateScript(cx)); michael@0: if (!script) michael@0: return false; michael@0: michael@0: if (!jit::IsIonEnabled(cx) || !jit::IsBaselineEnabled(cx) || michael@0: !script->compileAndGo() || !script->canBaselineCompile()) michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: static const uint32_t MAX_SCRIPT_SIZE = 2000; michael@0: if (script->length() > MAX_SCRIPT_SIZE) michael@0: return true; michael@0: michael@0: Vector accessedProperties(cx); michael@0: michael@0: LifoAlloc alloc(types::TypeZone::TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE); michael@0: michael@0: TempAllocator temp(&alloc); michael@0: IonContext ictx(cx, &temp); michael@0: michael@0: if (!cx->compartment()->ensureJitCompartmentExists(cx)) michael@0: return false; michael@0: michael@0: if (!script->hasBaselineScript()) { michael@0: MethodStatus status = BaselineCompile(cx, script); michael@0: if (status == Method_Error) michael@0: return false; michael@0: if (status != Method_Compiled) michael@0: return true; michael@0: } michael@0: michael@0: types::TypeScript::SetThis(cx, script, types::Type::ObjectType(type)); michael@0: michael@0: MIRGraph graph(&temp); michael@0: CompileInfo info(script, fun, michael@0: /* osrPc = */ nullptr, /* constructing = */ false, michael@0: DefinitePropertiesAnalysis, michael@0: script->needsArgsObj()); michael@0: michael@0: AutoTempAllocatorRooter root(cx, &temp); michael@0: michael@0: const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_Normal); michael@0: michael@0: types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp); michael@0: if (!constraints) { michael@0: js_ReportOutOfMemory(cx); michael@0: return false; michael@0: } michael@0: michael@0: BaselineInspector inspector(script); michael@0: const JitCompileOptions options(cx); michael@0: michael@0: IonBuilder builder(cx, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints, michael@0: &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr); michael@0: michael@0: if (!builder.build()) { michael@0: if (builder.abortReason() == AbortReason_Alloc) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: types::FinishDefinitePropertiesAnalysis(cx, constraints); michael@0: michael@0: if (!SplitCriticalEdges(graph)) michael@0: return false; michael@0: michael@0: if (!RenumberBlocks(graph)) michael@0: return false; michael@0: michael@0: if (!BuildDominatorTree(graph)) michael@0: return false; michael@0: michael@0: if (!EliminatePhis(&builder, graph, AggressiveObservability)) michael@0: return false; michael@0: michael@0: MDefinition *thisValue = graph.begin()->getSlot(info.thisSlot()); michael@0: michael@0: // Get a list of instructions using the |this| value in the order they michael@0: // appear in the graph. michael@0: Vector instructions(cx); michael@0: michael@0: for (MUseDefIterator uses(thisValue); uses; uses++) { michael@0: MDefinition *use = uses.def(); michael@0: michael@0: // Don't track |this| through assignments to phis. michael@0: if (!use->isInstruction()) michael@0: return true; michael@0: michael@0: if (!instructions.append(use->toInstruction())) michael@0: return false; michael@0: } michael@0: michael@0: // Sort the instructions to visit in increasing order. michael@0: qsort(instructions.begin(), instructions.length(), michael@0: sizeof(MInstruction *), CmpInstructions); michael@0: michael@0: // Find all exit blocks in the graph. michael@0: Vector exitBlocks(cx); michael@0: for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) { michael@0: if (!block->numSuccessors() && !exitBlocks.append(*block)) michael@0: return false; michael@0: } michael@0: michael@0: for (size_t i = 0; i < instructions.length(); i++) { michael@0: MInstruction *ins = instructions[i]; michael@0: michael@0: // Track whether the use of |this| is in unconditional code, i.e. michael@0: // the block dominates all graph exits. michael@0: bool definitelyExecuted = true; michael@0: for (size_t i = 0; i < exitBlocks.length(); i++) { michael@0: for (MBasicBlock *exit = exitBlocks[i]; michael@0: exit != ins->block(); michael@0: exit = exit->immediateDominator()) michael@0: { michael@0: if (exit == exit->immediateDominator()) { michael@0: definitelyExecuted = false; michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Also check to see if the instruction is inside a loop body. Even if michael@0: // an access will always execute in the script, if it executes multiple michael@0: // times then we can get confused when rolling back objects while michael@0: // clearing the new script information. michael@0: if (ins->block()->loopDepth() != 0) michael@0: definitelyExecuted = false; michael@0: michael@0: bool handled = false; michael@0: if (!AnalyzePoppedThis(cx, type, thisValue, ins, definitelyExecuted, michael@0: baseobj, initializerList, &accessedProperties, &handled)) michael@0: { michael@0: return false; michael@0: } michael@0: if (!handled) michael@0: return true; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: ArgumentsUseCanBeLazy(JSContext *cx, JSScript *script, MInstruction *ins, size_t index) michael@0: { michael@0: // We can read the frame's arguments directly for f.apply(x, arguments). michael@0: if (ins->isCall()) { michael@0: if (*ins->toCall()->resumePoint()->pc() == JSOP_FUNAPPLY && michael@0: ins->toCall()->numActualArgs() == 2 && michael@0: index == MCall::IndexOfArgument(1)) michael@0: { michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: // arguments[i] can read fp->canonicalActualArg(i) directly. michael@0: if (ins->isCallGetElement() && index == 0) michael@0: return true; michael@0: michael@0: // arguments.length length can read fp->numActualArgs() directly. michael@0: if (ins->isCallGetProperty() && index == 0 && ins->toCallGetProperty()->name() == cx->names().length) michael@0: return true; michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: jit::AnalyzeArgumentsUsage(JSContext *cx, JSScript *scriptArg) michael@0: { michael@0: RootedScript script(cx, scriptArg); michael@0: types::AutoEnterAnalysis enter(cx); michael@0: michael@0: JS_ASSERT(!script->analyzedArgsUsage()); michael@0: michael@0: // Treat the script as needing an arguments object until we determine it michael@0: // does not need one. This both allows us to easily see where the arguments michael@0: // object can escape through assignments to the function's named arguments, michael@0: // and also simplifies handling of early returns. michael@0: script->setNeedsArgsObj(true); michael@0: michael@0: if (!jit::IsIonEnabled(cx) || !script->compileAndGo()) michael@0: return true; michael@0: michael@0: static const uint32_t MAX_SCRIPT_SIZE = 10000; michael@0: if (script->length() > MAX_SCRIPT_SIZE) michael@0: return true; michael@0: michael@0: if (!script->ensureHasTypes(cx)) michael@0: return false; michael@0: michael@0: LifoAlloc alloc(types::TypeZone::TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE); michael@0: michael@0: TempAllocator temp(&alloc); michael@0: IonContext ictx(cx, &temp); michael@0: michael@0: if (!cx->compartment()->ensureJitCompartmentExists(cx)) michael@0: return false; michael@0: michael@0: MIRGraph graph(&temp); michael@0: CompileInfo info(script, script->functionNonDelazifying(), michael@0: /* osrPc = */ nullptr, /* constructing = */ false, michael@0: ArgumentsUsageAnalysis, michael@0: /* needsArgsObj = */ true); michael@0: michael@0: AutoTempAllocatorRooter root(cx, &temp); michael@0: michael@0: const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_Normal); michael@0: michael@0: types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp); michael@0: if (!constraints) michael@0: return false; michael@0: michael@0: BaselineInspector inspector(script); michael@0: const JitCompileOptions options(cx); michael@0: michael@0: IonBuilder builder(nullptr, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints, michael@0: &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr); michael@0: michael@0: if (!builder.build()) { michael@0: if (builder.abortReason() == AbortReason_Alloc) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: if (!SplitCriticalEdges(graph)) michael@0: return false; michael@0: michael@0: if (!RenumberBlocks(graph)) michael@0: return false; michael@0: michael@0: if (!BuildDominatorTree(graph)) michael@0: return false; michael@0: michael@0: if (!EliminatePhis(&builder, graph, AggressiveObservability)) michael@0: return false; michael@0: michael@0: MDefinition *argumentsValue = graph.begin()->getSlot(info.argsObjSlot()); michael@0: michael@0: for (MUseDefIterator uses(argumentsValue); uses; uses++) { michael@0: MDefinition *use = uses.def(); michael@0: michael@0: // Don't track |arguments| through assignments to phis. michael@0: if (!use->isInstruction()) michael@0: return true; michael@0: michael@0: if (!ArgumentsUseCanBeLazy(cx, script, use->toInstruction(), uses.index())) michael@0: return true; michael@0: } michael@0: michael@0: script->setNeedsArgsObj(false); michael@0: return true; michael@0: }