michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/IonBuilder.h" michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: michael@0: #include "builtin/Eval.h" michael@0: #include "builtin/TypedObject.h" michael@0: #include "frontend/SourceNotes.h" michael@0: #include "jit/BaselineFrame.h" michael@0: #include "jit/BaselineInspector.h" michael@0: #include "jit/Ion.h" michael@0: #include "jit/IonOptimizationLevels.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/Lowering.h" michael@0: #include "jit/MIRGraph.h" michael@0: #include "vm/ArgumentsObject.h" michael@0: #include "vm/Opcodes.h" michael@0: #include "vm/RegExpStatics.h" michael@0: michael@0: #include "jsinferinlines.h" michael@0: #include "jsobjinlines.h" michael@0: #include "jsopcodeinlines.h" michael@0: #include "jsscriptinlines.h" michael@0: michael@0: #include "jit/CompileInfo-inl.h" michael@0: #include "jit/ExecutionMode-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::DebugOnly; michael@0: using mozilla::Maybe; michael@0: using mozilla::SafeCast; michael@0: michael@0: class jit::BaselineFrameInspector michael@0: { michael@0: public: michael@0: types::Type thisType; michael@0: JSObject *singletonScopeChain; michael@0: michael@0: Vector argTypes; michael@0: Vector varTypes; michael@0: michael@0: BaselineFrameInspector(TempAllocator *temp) michael@0: : thisType(types::Type::UndefinedType()), michael@0: singletonScopeChain(nullptr), michael@0: argTypes(*temp), michael@0: varTypes(*temp) michael@0: {} michael@0: }; michael@0: michael@0: BaselineFrameInspector * michael@0: jit::NewBaselineFrameInspector(TempAllocator *temp, BaselineFrame *frame, CompileInfo *info) michael@0: { michael@0: JS_ASSERT(frame); michael@0: michael@0: BaselineFrameInspector *inspector = temp->lifoAlloc()->new_(temp); michael@0: if (!inspector) michael@0: return nullptr; michael@0: michael@0: // Note: copying the actual values into a temporary structure for use michael@0: // during compilation could capture nursery pointers, so the values' types michael@0: // are recorded instead. michael@0: michael@0: inspector->thisType = types::GetMaybeOptimizedOutValueType(frame->thisValue()); michael@0: michael@0: if (frame->scopeChain()->hasSingletonType()) michael@0: inspector->singletonScopeChain = frame->scopeChain(); michael@0: michael@0: JSScript *script = frame->script(); michael@0: michael@0: if (script->functionNonDelazifying()) { michael@0: if (!inspector->argTypes.reserve(frame->numFormalArgs())) michael@0: return nullptr; michael@0: for (size_t i = 0; i < frame->numFormalArgs(); i++) { michael@0: if (script->formalIsAliased(i)) { michael@0: inspector->argTypes.infallibleAppend(types::Type::UndefinedType()); michael@0: } else if (!script->argsObjAliasesFormals()) { michael@0: types::Type type = types::GetMaybeOptimizedOutValueType(frame->unaliasedFormal(i)); michael@0: inspector->argTypes.infallibleAppend(type); michael@0: } else if (frame->hasArgsObj()) { michael@0: types::Type type = types::GetMaybeOptimizedOutValueType(frame->argsObj().arg(i)); michael@0: inspector->argTypes.infallibleAppend(type); michael@0: } else { michael@0: inspector->argTypes.infallibleAppend(types::Type::UndefinedType()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (!inspector->varTypes.reserve(frame->script()->nfixed())) michael@0: return nullptr; michael@0: for (size_t i = 0; i < frame->script()->nfixed(); i++) { michael@0: if (info->isSlotAliasedAtOsr(i + info->firstLocalSlot())) { michael@0: inspector->varTypes.infallibleAppend(types::Type::UndefinedType()); michael@0: } else { michael@0: types::Type type = types::GetMaybeOptimizedOutValueType(frame->unaliasedLocal(i)); michael@0: inspector->varTypes.infallibleAppend(type); michael@0: } michael@0: } michael@0: michael@0: return inspector; michael@0: } michael@0: michael@0: IonBuilder::IonBuilder(JSContext *analysisContext, CompileCompartment *comp, michael@0: const JitCompileOptions &options, TempAllocator *temp, michael@0: MIRGraph *graph, types::CompilerConstraintList *constraints, michael@0: BaselineInspector *inspector, CompileInfo *info, michael@0: const OptimizationInfo *optimizationInfo, michael@0: BaselineFrameInspector *baselineFrame, size_t inliningDepth, michael@0: uint32_t loopDepth) michael@0: : MIRGenerator(comp, options, temp, graph, info, optimizationInfo), michael@0: backgroundCodegen_(nullptr), michael@0: analysisContext(analysisContext), michael@0: baselineFrame_(baselineFrame), michael@0: abortReason_(AbortReason_Disable), michael@0: descrSetHash_(nullptr), michael@0: constraints_(constraints), michael@0: analysis_(*temp, info->script()), michael@0: thisTypes(nullptr), michael@0: argTypes(nullptr), michael@0: typeArray(nullptr), michael@0: typeArrayHint(0), michael@0: bytecodeTypeMap(nullptr), michael@0: loopDepth_(loopDepth), michael@0: callerResumePoint_(nullptr), michael@0: callerBuilder_(nullptr), michael@0: cfgStack_(*temp), michael@0: loops_(*temp), michael@0: switches_(*temp), michael@0: labels_(*temp), michael@0: iterators_(*temp), michael@0: loopHeaders_(*temp), michael@0: inspector(inspector), michael@0: inliningDepth_(inliningDepth), michael@0: numLoopRestarts_(0), michael@0: failedBoundsCheck_(info->script()->failedBoundsCheck()), michael@0: failedShapeGuard_(info->script()->failedShapeGuard()), michael@0: nonStringIteration_(false), michael@0: lazyArguments_(nullptr), michael@0: inlineCallInfo_(nullptr) michael@0: { michael@0: script_ = info->script(); michael@0: pc = info->startPC(); michael@0: michael@0: JS_ASSERT(script()->hasBaselineScript() == (info->executionMode() != ArgumentsUsageAnalysis)); michael@0: JS_ASSERT(!!analysisContext == (info->executionMode() == DefinitePropertiesAnalysis)); michael@0: } michael@0: michael@0: void michael@0: IonBuilder::clearForBackEnd() michael@0: { michael@0: JS_ASSERT(!analysisContext); michael@0: baselineFrame_ = nullptr; michael@0: michael@0: // The caches below allocate data from the malloc heap. Release this before michael@0: // later phases of compilation to avoid leaks, as the top level IonBuilder michael@0: // is not explicitly destroyed. Note that builders for inner scripts are michael@0: // constructed on the stack and will release this memory on destruction. michael@0: gsn.purge(); michael@0: scopeCoordinateNameCache.purge(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::abort(const char *message, ...) michael@0: { michael@0: // Don't call PCToLineNumber in release builds. michael@0: #ifdef DEBUG michael@0: va_list ap; michael@0: va_start(ap, message); michael@0: abortFmt(message, ap); michael@0: va_end(ap); michael@0: IonSpew(IonSpew_Abort, "aborted @ %s:%d", script()->filename(), PCToLineNumber(script(), pc)); michael@0: #endif michael@0: return false; michael@0: } michael@0: michael@0: void michael@0: IonBuilder::spew(const char *message) michael@0: { michael@0: // Don't call PCToLineNumber in release builds. michael@0: #ifdef DEBUG michael@0: IonSpew(IonSpew_MIR, "%s @ %s:%d", message, script()->filename(), PCToLineNumber(script(), pc)); michael@0: #endif michael@0: } michael@0: michael@0: static inline int32_t michael@0: GetJumpOffset(jsbytecode *pc) michael@0: { michael@0: JS_ASSERT(js_CodeSpec[JSOp(*pc)].type() == JOF_JUMP); michael@0: return GET_JUMP_OFFSET(pc); michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::If(jsbytecode *join, MTest *test) michael@0: { michael@0: CFGState state; michael@0: state.state = IF_TRUE; michael@0: state.stopAt = join; michael@0: state.branch.ifFalse = test->ifFalse(); michael@0: state.branch.test = test; michael@0: return state; michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MTest *test) michael@0: { michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: michael@0: CFGState state; michael@0: // If the end of the false path is the same as the start of the michael@0: // false path, then the "else" block is empty and we can devolve michael@0: // this to the IF_TRUE case. We handle this here because there is michael@0: // still an extra GOTO on the true path and we want stopAt to point michael@0: // there, whereas the IF_TRUE case does not have the GOTO. michael@0: state.state = (falseEnd == ifFalse->pc()) michael@0: ? IF_TRUE_EMPTY_ELSE michael@0: : IF_ELSE_TRUE; michael@0: state.stopAt = trueEnd; michael@0: state.branch.falseEnd = falseEnd; michael@0: state.branch.ifFalse = ifFalse; michael@0: state.branch.test = test; michael@0: return state; michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::AndOr(jsbytecode *join, MBasicBlock *joinStart) michael@0: { michael@0: CFGState state; michael@0: state.state = AND_OR; michael@0: state.stopAt = join; michael@0: state.branch.ifFalse = joinStart; michael@0: state.branch.test = nullptr; michael@0: return state; michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::TableSwitch(jsbytecode *exitpc, MTableSwitch *ins) michael@0: { michael@0: CFGState state; michael@0: state.state = TABLE_SWITCH; michael@0: state.stopAt = exitpc; michael@0: state.tableswitch.exitpc = exitpc; michael@0: state.tableswitch.breaks = nullptr; michael@0: state.tableswitch.ins = ins; michael@0: state.tableswitch.currentBlock = 0; michael@0: return state; michael@0: } michael@0: michael@0: JSFunction * michael@0: IonBuilder::getSingleCallTarget(types::TemporaryTypeSet *calleeTypes) michael@0: { michael@0: if (!calleeTypes) michael@0: return nullptr; michael@0: michael@0: JSObject *obj = calleeTypes->getSingleton(); michael@0: if (!obj || !obj->is()) michael@0: return nullptr; michael@0: michael@0: return &obj->as(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPolyCallTargets(types::TemporaryTypeSet *calleeTypes, bool constructing, michael@0: ObjectVector &targets, uint32_t maxTargets, bool *gotLambda) michael@0: { michael@0: JS_ASSERT(targets.empty()); michael@0: JS_ASSERT(gotLambda); michael@0: *gotLambda = false; michael@0: michael@0: if (!calleeTypes) michael@0: return true; michael@0: michael@0: if (calleeTypes->baseFlags() != 0) michael@0: return true; michael@0: michael@0: unsigned objCount = calleeTypes->getObjectCount(); michael@0: michael@0: if (objCount == 0 || objCount > maxTargets) michael@0: return true; michael@0: michael@0: if (!targets.reserve(objCount)) michael@0: return false; michael@0: for(unsigned i = 0; i < objCount; i++) { michael@0: JSObject *obj = calleeTypes->getSingleObject(i); michael@0: JSFunction *fun; michael@0: if (obj) { michael@0: if (!obj->is()) { michael@0: targets.clear(); michael@0: return true; michael@0: } michael@0: fun = &obj->as(); michael@0: } else { michael@0: types::TypeObject *typeObj = calleeTypes->getTypeObject(i); michael@0: JS_ASSERT(typeObj); michael@0: if (!typeObj->interpretedFunction) { michael@0: targets.clear(); michael@0: return true; michael@0: } michael@0: michael@0: fun = typeObj->interpretedFunction; michael@0: *gotLambda = true; michael@0: } michael@0: michael@0: // Don't optimize if we're constructing and the callee is not a michael@0: // constructor, so that CallKnown does not have to handle this case michael@0: // (it should always throw). michael@0: if (constructing && !fun->isInterpretedConstructor() && !fun->isNativeConstructor()) { michael@0: targets.clear(); michael@0: return true; michael@0: } michael@0: michael@0: DebugOnly appendOk = targets.append(fun); michael@0: JS_ASSERT(appendOk); michael@0: } michael@0: michael@0: // For now, only inline "singleton" lambda calls michael@0: if (*gotLambda && targets.length() > 1) michael@0: targets.clear(); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: IonBuilder::InliningDecision michael@0: IonBuilder::DontInline(JSScript *targetScript, const char *reason) michael@0: { michael@0: if (targetScript) { michael@0: IonSpew(IonSpew_Inlining, "Cannot inline %s:%u: %s", michael@0: targetScript->filename(), targetScript->lineno(), reason); michael@0: } else { michael@0: IonSpew(IonSpew_Inlining, "Cannot inline: %s", reason); michael@0: } michael@0: michael@0: return InliningDecision_DontInline; michael@0: } michael@0: michael@0: IonBuilder::InliningDecision michael@0: IonBuilder::canInlineTarget(JSFunction *target, CallInfo &callInfo) michael@0: { michael@0: if (!optimizationInfo().inlineInterpreted()) michael@0: return InliningDecision_DontInline; michael@0: michael@0: if (!target->isInterpreted()) michael@0: return DontInline(nullptr, "Non-interpreted target"); michael@0: michael@0: // Allow constructing lazy scripts when performing the definite properties michael@0: // analysis, as baseline has not been used to warm the caller up yet. michael@0: if (target->isInterpreted() && info().executionMode() == DefinitePropertiesAnalysis) { michael@0: RootedScript script(analysisContext, target->getOrCreateScript(analysisContext)); michael@0: if (!script) michael@0: return InliningDecision_Error; michael@0: michael@0: if (!script->hasBaselineScript() && script->canBaselineCompile()) { michael@0: MethodStatus status = BaselineCompile(analysisContext, script); michael@0: if (status == Method_Error) michael@0: return InliningDecision_Error; michael@0: if (status != Method_Compiled) michael@0: return InliningDecision_DontInline; michael@0: } michael@0: } michael@0: michael@0: if (!target->hasScript()) michael@0: return DontInline(nullptr, "Lazy script"); michael@0: michael@0: JSScript *inlineScript = target->nonLazyScript(); michael@0: if (callInfo.constructing() && !target->isInterpretedConstructor()) michael@0: return DontInline(inlineScript, "Callee is not a constructor"); michael@0: michael@0: ExecutionMode executionMode = info().executionMode(); michael@0: if (!CanIonCompile(inlineScript, executionMode)) michael@0: return DontInline(inlineScript, "Disabled Ion compilation"); michael@0: michael@0: // Don't inline functions which don't have baseline scripts. michael@0: if (!inlineScript->hasBaselineScript()) michael@0: return DontInline(inlineScript, "No baseline jitcode"); michael@0: michael@0: if (TooManyArguments(target->nargs())) michael@0: return DontInline(inlineScript, "Too many args"); michael@0: michael@0: if (TooManyArguments(callInfo.argc())) michael@0: return DontInline(inlineScript, "Too many args"); michael@0: michael@0: // Allow inlining of recursive calls, but only one level deep. michael@0: IonBuilder *builder = callerBuilder_; michael@0: while (builder) { michael@0: if (builder->script() == inlineScript) michael@0: return DontInline(inlineScript, "Recursive call"); michael@0: builder = builder->callerBuilder_; michael@0: } michael@0: michael@0: if (target->isHeavyweight()) michael@0: return DontInline(inlineScript, "Heavyweight function"); michael@0: michael@0: if (inlineScript->uninlineable()) michael@0: return DontInline(inlineScript, "Uninlineable script"); michael@0: michael@0: if (inlineScript->needsArgsObj()) michael@0: return DontInline(inlineScript, "Script that needs an arguments object"); michael@0: michael@0: if (!inlineScript->compileAndGo()) michael@0: return DontInline(inlineScript, "Non-compileAndGo script"); michael@0: michael@0: types::TypeObjectKey *targetType = types::TypeObjectKey::get(target); michael@0: if (targetType->unknownProperties()) michael@0: return DontInline(inlineScript, "Target type has unknown properties"); michael@0: michael@0: return InliningDecision_Inline; michael@0: } michael@0: michael@0: void michael@0: IonBuilder::popCfgStack() michael@0: { michael@0: if (cfgStack_.back().isLoop()) michael@0: loops_.popBack(); michael@0: if (cfgStack_.back().state == CFGState::LABEL) michael@0: labels_.popBack(); michael@0: cfgStack_.popBack(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::analyzeNewLoopTypes(MBasicBlock *entry, jsbytecode *start, jsbytecode *end) michael@0: { michael@0: // The phi inputs at the loop head only reflect types for variables that michael@0: // were present at the start of the loop. If the variable changes to a new michael@0: // type within the loop body, and that type is carried around to the loop michael@0: // head, then we need to know about the new type up front. michael@0: // michael@0: // Since SSA information hasn't been constructed for the loop body yet, we michael@0: // need a separate analysis to pick out the types that might flow around michael@0: // the loop header. This is a best-effort analysis that may either over- michael@0: // or under-approximate the set of such types. michael@0: // michael@0: // Over-approximating the types may lead to inefficient generated code, and michael@0: // under-approximating the types will cause the loop body to be analyzed michael@0: // multiple times as the correct types are deduced (see finishLoop). michael@0: michael@0: // If we restarted processing of an outer loop then get loop header types michael@0: // directly from the last time we have previously processed this loop. This michael@0: // both avoids repeated work from the bytecode traverse below, and will michael@0: // also pick up types discovered while previously building the loop body. michael@0: for (size_t i = 0; i < loopHeaders_.length(); i++) { michael@0: if (loopHeaders_[i].pc == start) { michael@0: MBasicBlock *oldEntry = loopHeaders_[i].header; michael@0: for (MPhiIterator oldPhi = oldEntry->phisBegin(); michael@0: oldPhi != oldEntry->phisEnd(); michael@0: oldPhi++) michael@0: { michael@0: MPhi *newPhi = entry->getSlot(oldPhi->slot())->toPhi(); michael@0: if (!newPhi->addBackedgeType(oldPhi->type(), oldPhi->resultTypeSet())) michael@0: return false; michael@0: } michael@0: // Update the most recent header for this loop encountered, in case michael@0: // new types flow to the phis and the loop is processed at least michael@0: // three times. michael@0: loopHeaders_[i].header = entry; michael@0: return true; michael@0: } michael@0: } michael@0: loopHeaders_.append(LoopHeader(start, entry)); michael@0: michael@0: jsbytecode *last = nullptr, *earlier = nullptr; michael@0: for (jsbytecode *pc = start; pc != end; earlier = last, last = pc, pc += GetBytecodeLength(pc)) { michael@0: uint32_t slot; michael@0: if (*pc == JSOP_SETLOCAL) michael@0: slot = info().localSlot(GET_LOCALNO(pc)); michael@0: else if (*pc == JSOP_SETARG) michael@0: slot = info().argSlotUnchecked(GET_ARGNO(pc)); michael@0: else michael@0: continue; michael@0: if (slot >= info().firstStackSlot()) michael@0: continue; michael@0: if (!analysis().maybeInfo(pc)) michael@0: continue; michael@0: michael@0: MPhi *phi = entry->getSlot(slot)->toPhi(); michael@0: michael@0: if (*last == JSOP_POS) michael@0: last = earlier; michael@0: michael@0: if (js_CodeSpec[*last].format & JOF_TYPESET) { michael@0: types::TemporaryTypeSet *typeSet = bytecodeTypes(last); michael@0: if (!typeSet->empty()) { michael@0: MIRType type = typeSet->getKnownMIRType(); michael@0: if (!phi->addBackedgeType(type, typeSet)) michael@0: return false; michael@0: } michael@0: } else if (*last == JSOP_GETLOCAL || *last == JSOP_GETARG) { michael@0: uint32_t slot = (*last == JSOP_GETLOCAL) michael@0: ? info().localSlot(GET_LOCALNO(last)) michael@0: : info().argSlotUnchecked(GET_ARGNO(last)); michael@0: if (slot < info().firstStackSlot()) { michael@0: MPhi *otherPhi = entry->getSlot(slot)->toPhi(); michael@0: if (otherPhi->hasBackedgeType()) { michael@0: if (!phi->addBackedgeType(otherPhi->type(), otherPhi->resultTypeSet())) michael@0: return false; michael@0: } michael@0: } michael@0: } else { michael@0: MIRType type = MIRType_None; michael@0: switch (*last) { michael@0: case JSOP_VOID: michael@0: case JSOP_UNDEFINED: michael@0: type = MIRType_Undefined; michael@0: break; michael@0: case JSOP_NULL: michael@0: type = MIRType_Null; michael@0: break; michael@0: case JSOP_ZERO: michael@0: case JSOP_ONE: michael@0: case JSOP_INT8: michael@0: case JSOP_INT32: michael@0: case JSOP_UINT16: michael@0: case JSOP_UINT24: michael@0: case JSOP_BITAND: michael@0: case JSOP_BITOR: michael@0: case JSOP_BITXOR: michael@0: case JSOP_BITNOT: michael@0: case JSOP_RSH: michael@0: case JSOP_LSH: michael@0: case JSOP_URSH: michael@0: type = MIRType_Int32; michael@0: break; michael@0: case JSOP_FALSE: michael@0: case JSOP_TRUE: michael@0: case JSOP_EQ: michael@0: case JSOP_NE: michael@0: case JSOP_LT: michael@0: case JSOP_LE: michael@0: case JSOP_GT: michael@0: case JSOP_GE: michael@0: case JSOP_NOT: michael@0: case JSOP_STRICTEQ: michael@0: case JSOP_STRICTNE: michael@0: case JSOP_IN: michael@0: case JSOP_INSTANCEOF: michael@0: type = MIRType_Boolean; michael@0: break; michael@0: case JSOP_DOUBLE: michael@0: type = MIRType_Double; michael@0: break; michael@0: case JSOP_STRING: michael@0: case JSOP_TYPEOF: michael@0: case JSOP_TYPEOFEXPR: michael@0: case JSOP_ITERNEXT: michael@0: type = MIRType_String; michael@0: break; michael@0: case JSOP_ADD: michael@0: case JSOP_SUB: michael@0: case JSOP_MUL: michael@0: case JSOP_DIV: michael@0: case JSOP_MOD: michael@0: case JSOP_NEG: michael@0: type = inspector->expectedResultType(last); michael@0: default: michael@0: break; michael@0: } michael@0: if (type != MIRType_None) { michael@0: if (!phi->addBackedgeType(type, nullptr)) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::pushLoop(CFGState::State initial, jsbytecode *stopAt, MBasicBlock *entry, bool osr, michael@0: jsbytecode *loopHead, jsbytecode *initialPc, michael@0: jsbytecode *bodyStart, jsbytecode *bodyEnd, jsbytecode *exitpc, michael@0: jsbytecode *continuepc) michael@0: { michael@0: if (!continuepc) michael@0: continuepc = entry->pc(); michael@0: michael@0: ControlFlowInfo loop(cfgStack_.length(), continuepc); michael@0: if (!loops_.append(loop)) michael@0: return false; michael@0: michael@0: CFGState state; michael@0: state.state = initial; michael@0: state.stopAt = stopAt; michael@0: state.loop.bodyStart = bodyStart; michael@0: state.loop.bodyEnd = bodyEnd; michael@0: state.loop.exitpc = exitpc; michael@0: state.loop.continuepc = continuepc; michael@0: state.loop.entry = entry; michael@0: state.loop.osr = osr; michael@0: state.loop.successor = nullptr; michael@0: state.loop.breaks = nullptr; michael@0: state.loop.continues = nullptr; michael@0: state.loop.initialState = initial; michael@0: state.loop.initialPc = initialPc; michael@0: state.loop.initialStopAt = stopAt; michael@0: state.loop.loopHead = loopHead; michael@0: return cfgStack_.append(state); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::init() michael@0: { michael@0: if (!types::TypeScript::FreezeTypeSets(constraints(), script(), michael@0: &thisTypes, &argTypes, &typeArray)) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: if (!analysis().init(alloc(), gsn)) michael@0: return false; michael@0: michael@0: // The baseline script normally has the bytecode type map, but compute michael@0: // it ourselves if we do not have a baseline script. michael@0: if (script()->hasBaselineScript()) { michael@0: bytecodeTypeMap = script()->baselineScript()->bytecodeTypeMap(); michael@0: } else { michael@0: bytecodeTypeMap = alloc_->lifoAlloc()->newArrayUninitialized(script()->nTypeSets()); michael@0: if (!bytecodeTypeMap) michael@0: return false; michael@0: types::FillBytecodeTypeMap(script(), bytecodeTypeMap); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::build() michael@0: { michael@0: if (!init()) michael@0: return false; michael@0: michael@0: if (!setCurrentAndSpecializePhis(newBlock(pc))) michael@0: return false; michael@0: if (!current) michael@0: return false; michael@0: michael@0: #ifdef DEBUG michael@0: if (info().executionMode() == SequentialExecution && script()->hasIonScript()) { michael@0: IonSpew(IonSpew_Scripts, "Recompiling script %s:%d (%p) (usecount=%d, level=%s)", michael@0: script()->filename(), script()->lineno(), (void *)script(), michael@0: (int)script()->getUseCount(), OptimizationLevelString(optimizationInfo().level())); michael@0: } else { michael@0: IonSpew(IonSpew_Scripts, "Analyzing script %s:%d (%p) (usecount=%d, level=%s)", michael@0: script()->filename(), script()->lineno(), (void *)script(), michael@0: (int)script()->getUseCount(), OptimizationLevelString(optimizationInfo().level())); michael@0: } michael@0: #endif michael@0: michael@0: initParameters(); michael@0: michael@0: // Initialize local variables. michael@0: for (uint32_t i = 0; i < info().nlocals(); i++) { michael@0: MConstant *undef = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(undef); michael@0: current->initSlot(info().localSlot(i), undef); michael@0: } michael@0: michael@0: // Initialize something for the scope chain. We can bail out before the michael@0: // start instruction, but the snapshot is encoded *at* the start michael@0: // instruction, which means generating any code that could load into michael@0: // registers is illegal. michael@0: MInstruction *scope = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(scope); michael@0: current->initSlot(info().scopeChainSlot(), scope); michael@0: michael@0: // Initialize the return value. michael@0: MInstruction *returnValue = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(returnValue); michael@0: current->initSlot(info().returnValueSlot(), returnValue); michael@0: michael@0: // Initialize the arguments object slot to undefined if necessary. michael@0: if (info().hasArguments()) { michael@0: MInstruction *argsObj = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(argsObj); michael@0: current->initSlot(info().argsObjSlot(), argsObj); michael@0: } michael@0: michael@0: // Emit the start instruction, so we can begin real instructions. michael@0: current->makeStart(MStart::New(alloc(), MStart::StartType_Default)); michael@0: if (instrumentedProfiling()) michael@0: current->add(MProfilerStackOp::New(alloc(), script(), MProfilerStackOp::Enter)); michael@0: michael@0: // Guard against over-recursion. Do this before we start unboxing, since michael@0: // this will create an OSI point that will read the incoming argument michael@0: // values, which is nice to do before their last real use, to minimize michael@0: // register/stack pressure. michael@0: MCheckOverRecursed *check = MCheckOverRecursed::New(alloc()); michael@0: current->add(check); michael@0: check->setResumePoint(current->entryResumePoint()); michael@0: michael@0: // Parameters have been checked to correspond to the typeset, now we unbox michael@0: // what we can in an infallible manner. michael@0: rewriteParameters(); michael@0: michael@0: // It's safe to start emitting actual IR, so now build the scope chain. michael@0: if (!initScopeChain()) michael@0: return false; michael@0: michael@0: if (info().needsArgsObj() && !initArgumentsObject()) michael@0: return false; michael@0: michael@0: // Prevent |this| from being DCE'd: necessary for constructors. michael@0: if (info().funMaybeLazy()) michael@0: current->getSlot(info().thisSlot())->setGuard(); michael@0: michael@0: // The type analysis phase attempts to insert unbox operations near michael@0: // definitions of values. It also attempts to replace uses in resume points michael@0: // with the narrower, unboxed variants. However, we must prevent this michael@0: // replacement from happening on values in the entry snapshot. Otherwise we michael@0: // could get this: michael@0: // michael@0: // v0 = MParameter(0) michael@0: // v1 = MParameter(1) michael@0: // -- ResumePoint(v2, v3) michael@0: // v2 = Unbox(v0, INT32) michael@0: // v3 = Unbox(v1, INT32) michael@0: // michael@0: // So we attach the initial resume point to each parameter, which the type michael@0: // analysis explicitly checks (this is the same mechanism used for michael@0: // effectful operations). michael@0: for (uint32_t i = 0; i < info().endArgSlot(); i++) { michael@0: MInstruction *ins = current->getEntrySlot(i)->toInstruction(); michael@0: if (ins->type() == MIRType_Value) michael@0: ins->setResumePoint(current->entryResumePoint()); michael@0: } michael@0: michael@0: // lazyArguments should never be accessed in |argsObjAliasesFormals| scripts. michael@0: if (info().hasArguments() && !info().argsObjAliasesFormals()) { michael@0: lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS)); michael@0: current->add(lazyArguments_); michael@0: } michael@0: michael@0: insertRecompileCheck(); michael@0: michael@0: if (!traverseBytecode()) michael@0: return false; michael@0: michael@0: if (!maybeAddOsrTypeBarriers()) michael@0: return false; michael@0: michael@0: if (!processIterators()) michael@0: return false; michael@0: michael@0: JS_ASSERT(loopDepth_ == 0); michael@0: abortReason_ = AbortReason_NoAbort; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::processIterators() michael@0: { michael@0: // Find phis that must directly hold an iterator live. michael@0: Vector worklist; michael@0: for (size_t i = 0; i < iterators_.length(); i++) { michael@0: MInstruction *ins = iterators_[i]; michael@0: for (MUseDefIterator iter(ins); iter; iter++) { michael@0: if (iter.def()->isPhi()) { michael@0: if (!worklist.append(iter.def()->toPhi())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Propagate the iterator and live status of phis to all other connected michael@0: // phis. michael@0: while (!worklist.empty()) { michael@0: MPhi *phi = worklist.popCopy(); michael@0: phi->setIterator(); michael@0: phi->setImplicitlyUsedUnchecked(); michael@0: michael@0: for (MUseDefIterator iter(phi); iter; iter++) { michael@0: if (iter.def()->isPhi()) { michael@0: MPhi *other = iter.def()->toPhi(); michael@0: if (!other->isIterator() && !worklist.append(other)) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::buildInline(IonBuilder *callerBuilder, MResumePoint *callerResumePoint, michael@0: CallInfo &callInfo) michael@0: { michael@0: if (!init()) michael@0: return false; michael@0: michael@0: inlineCallInfo_ = &callInfo; michael@0: michael@0: IonSpew(IonSpew_Scripts, "Inlining script %s:%d (%p)", michael@0: script()->filename(), script()->lineno(), (void *)script()); michael@0: michael@0: callerBuilder_ = callerBuilder; michael@0: callerResumePoint_ = callerResumePoint; michael@0: michael@0: if (callerBuilder->failedBoundsCheck_) michael@0: failedBoundsCheck_ = true; michael@0: michael@0: if (callerBuilder->failedShapeGuard_) michael@0: failedShapeGuard_ = true; michael@0: michael@0: // Generate single entrance block. michael@0: if (!setCurrentAndSpecializePhis(newBlock(pc))) michael@0: return false; michael@0: if (!current) michael@0: return false; michael@0: michael@0: current->setCallerResumePoint(callerResumePoint); michael@0: michael@0: // Connect the entrance block to the last block in the caller's graph. michael@0: MBasicBlock *predecessor = callerBuilder->current; michael@0: JS_ASSERT(predecessor == callerResumePoint->block()); michael@0: michael@0: // All further instructions generated in from this scope should be michael@0: // considered as part of the function that we're inlining. We also need to michael@0: // keep track of the inlining depth because all scripts inlined on the same michael@0: // level contiguously have only one InlineExit node. michael@0: if (instrumentedProfiling()) { michael@0: predecessor->add(MProfilerStackOp::New(alloc(), script(), michael@0: MProfilerStackOp::InlineEnter, michael@0: inliningDepth_)); michael@0: } michael@0: michael@0: predecessor->end(MGoto::New(alloc(), current)); michael@0: if (!current->addPredecessorWithoutPhis(predecessor)) michael@0: return false; michael@0: michael@0: // Initialize scope chain slot to Undefined. It's set later by |initScopeChain|. michael@0: MInstruction *scope = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(scope); michael@0: current->initSlot(info().scopeChainSlot(), scope); michael@0: michael@0: // Initialize |return value| slot. michael@0: MInstruction *returnValue = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(returnValue); michael@0: current->initSlot(info().returnValueSlot(), returnValue); michael@0: michael@0: // Initialize |arguments| slot. michael@0: if (info().hasArguments()) { michael@0: MInstruction *argsObj = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(argsObj); michael@0: current->initSlot(info().argsObjSlot(), argsObj); michael@0: } michael@0: michael@0: // Initialize |this| slot. michael@0: current->initSlot(info().thisSlot(), callInfo.thisArg()); michael@0: michael@0: IonSpew(IonSpew_Inlining, "Initializing %u arg slots", info().nargs()); michael@0: michael@0: // NB: Ion does not inline functions which |needsArgsObj|. So using argSlot() michael@0: // instead of argSlotUnchecked() below is OK michael@0: JS_ASSERT(!info().needsArgsObj()); michael@0: michael@0: // Initialize actually set arguments. michael@0: uint32_t existing_args = Min(callInfo.argc(), info().nargs()); michael@0: for (size_t i = 0; i < existing_args; ++i) { michael@0: MDefinition *arg = callInfo.getArg(i); michael@0: current->initSlot(info().argSlot(i), arg); michael@0: } michael@0: michael@0: // Pass Undefined for missing arguments michael@0: for (size_t i = callInfo.argc(); i < info().nargs(); ++i) { michael@0: MConstant *arg = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(arg); michael@0: current->initSlot(info().argSlot(i), arg); michael@0: } michael@0: michael@0: // Initialize the scope chain now that args are initialized. michael@0: if (!initScopeChain(callInfo.fun())) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_Inlining, "Initializing %u local slots", info().nlocals()); michael@0: michael@0: // Initialize local variables. michael@0: for (uint32_t i = 0; i < info().nlocals(); i++) { michael@0: MConstant *undef = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(undef); michael@0: current->initSlot(info().localSlot(i), undef); michael@0: } michael@0: michael@0: IonSpew(IonSpew_Inlining, "Inline entry block MResumePoint %p, %u operands", michael@0: (void *) current->entryResumePoint(), current->entryResumePoint()->numOperands()); michael@0: michael@0: // +2 for the scope chain and |this|, maybe another +1 for arguments object slot. michael@0: JS_ASSERT(current->entryResumePoint()->numOperands() == info().totalSlots()); michael@0: michael@0: if (script_->argumentsHasVarBinding()) { michael@0: lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS)); michael@0: current->add(lazyArguments_); michael@0: } michael@0: michael@0: insertRecompileCheck(); michael@0: michael@0: if (!traverseBytecode()) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition *param, int32_t argIndex) michael@0: { michael@0: JS_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg()); michael@0: michael@0: types::TemporaryTypeSet *types = param->resultTypeSet(); michael@0: MDefinition *actual = ensureDefiniteType(param, types->getKnownMIRType()); michael@0: if (actual == param) michael@0: return; michael@0: michael@0: // Careful! We leave the original MParameter in the entry resume point. The michael@0: // arguments still need to be checked unless proven otherwise at the call michael@0: // site, and these checks can bailout. We can end up: michael@0: // v0 = Parameter(0) michael@0: // v1 = Unbox(v0, INT32) michael@0: // -- ResumePoint(v0) michael@0: // michael@0: // As usual, it would be invalid for v1 to be captured in the initial michael@0: // resume point, rather than v0. michael@0: current->rewriteSlot(slotIdx, actual); michael@0: } michael@0: michael@0: // Apply Type Inference information to parameters early on, unboxing them if michael@0: // they have a definitive type. The actual guards will be emitted by the code michael@0: // generator, explicitly, as part of the function prologue. michael@0: void michael@0: IonBuilder::rewriteParameters() michael@0: { michael@0: JS_ASSERT(info().scopeChainSlot() == 0); michael@0: michael@0: if (!info().funMaybeLazy()) michael@0: return; michael@0: michael@0: for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) { michael@0: MDefinition *param = current->getSlot(i); michael@0: rewriteParameter(i, param, param->toParameter()->index()); michael@0: } michael@0: } michael@0: michael@0: void michael@0: IonBuilder::initParameters() michael@0: { michael@0: if (!info().funMaybeLazy()) michael@0: return; michael@0: michael@0: // If we are doing OSR on a frame which initially executed in the michael@0: // interpreter and didn't accumulate type information, try to use that OSR michael@0: // frame to determine possible initial types for 'this' and parameters. michael@0: michael@0: if (thisTypes->empty() && baselineFrame_) michael@0: thisTypes->addType(baselineFrame_->thisType, alloc_->lifoAlloc()); michael@0: michael@0: MParameter *param = MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes); michael@0: current->add(param); michael@0: current->initSlot(info().thisSlot(), param); michael@0: michael@0: for (uint32_t i = 0; i < info().nargs(); i++) { michael@0: types::TemporaryTypeSet *types = &argTypes[i]; michael@0: if (types->empty() && baselineFrame_ && michael@0: !script_->baselineScript()->modifiesArguments()) michael@0: { michael@0: types->addType(baselineFrame_->argTypes[i], alloc_->lifoAlloc()); michael@0: } michael@0: michael@0: param = MParameter::New(alloc(), i, types); michael@0: current->add(param); michael@0: current->initSlot(info().argSlotUnchecked(i), param); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::initScopeChain(MDefinition *callee) michael@0: { michael@0: MInstruction *scope = nullptr; michael@0: michael@0: // If the script doesn't use the scopechain, then it's already initialized michael@0: // from earlier. However, always make a scope chain when |needsArgsObj| is true michael@0: // for the script, since arguments object construction requires the scope chain michael@0: // to be passed in. michael@0: if (!info().needsArgsObj() && !analysis().usesScopeChain()) michael@0: return true; michael@0: michael@0: // The scope chain is only tracked in scripts that have NAME opcodes which michael@0: // will try to access the scope. For other scripts, the scope instructions michael@0: // will be held live by resume points and code will still be generated for michael@0: // them, so just use a constant undefined value. michael@0: if (!script()->compileAndGo()) michael@0: return abort("non-CNG global scripts are not supported"); michael@0: michael@0: if (JSFunction *fun = info().funMaybeLazy()) { michael@0: if (!callee) { michael@0: MCallee *calleeIns = MCallee::New(alloc()); michael@0: current->add(calleeIns); michael@0: callee = calleeIns; michael@0: } michael@0: scope = MFunctionEnvironment::New(alloc(), callee); michael@0: current->add(scope); michael@0: michael@0: // This reproduce what is done in CallObject::createForFunction. Skip michael@0: // this for analyses, as the script might not have a baseline script michael@0: // with template objects yet. michael@0: if (fun->isHeavyweight() && !info().executionModeIsAnalysis()) { michael@0: if (fun->isNamedLambda()) { michael@0: scope = createDeclEnvObject(callee, scope); michael@0: if (!scope) michael@0: return false; michael@0: } michael@0: michael@0: scope = createCallObject(callee, scope); michael@0: if (!scope) michael@0: return false; michael@0: } michael@0: } else { michael@0: scope = constant(ObjectValue(script()->global())); michael@0: } michael@0: michael@0: current->setScopeChain(scope); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::initArgumentsObject() michael@0: { michael@0: IonSpew(IonSpew_MIR, "%s:%d - Emitting code to initialize arguments object! block=%p", michael@0: script()->filename(), script()->lineno(), current); michael@0: JS_ASSERT(info().needsArgsObj()); michael@0: MCreateArgumentsObject *argsObj = MCreateArgumentsObject::New(alloc(), current->scopeChain()); michael@0: current->add(argsObj); michael@0: current->setArgumentsObject(argsObj); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::addOsrValueTypeBarrier(uint32_t slot, MInstruction **def_, michael@0: MIRType type, types::TemporaryTypeSet *typeSet) michael@0: { michael@0: MInstruction *&def = *def_; michael@0: MBasicBlock *osrBlock = def->block(); michael@0: michael@0: // Clear bogus type information added in newOsrPreheader(). michael@0: def->setResultType(MIRType_Value); michael@0: def->setResultTypeSet(nullptr); michael@0: michael@0: if (typeSet && !typeSet->unknown()) { michael@0: MInstruction *barrier = MTypeBarrier::New(alloc(), def, typeSet); michael@0: osrBlock->insertBefore(osrBlock->lastIns(), barrier); michael@0: osrBlock->rewriteSlot(slot, barrier); michael@0: def = barrier; michael@0: } else if (type == MIRType_Null || michael@0: type == MIRType_Undefined || michael@0: type == MIRType_MagicOptimizedArguments) michael@0: { michael@0: // No unbox instruction will be added below, so check the type by michael@0: // adding a type barrier for a singleton type set. michael@0: types::Type ntype = types::Type::PrimitiveType(ValueTypeFromMIRType(type)); michael@0: typeSet = alloc_->lifoAlloc()->new_(ntype); michael@0: if (!typeSet) michael@0: return false; michael@0: MInstruction *barrier = MTypeBarrier::New(alloc(), def, typeSet); michael@0: osrBlock->insertBefore(osrBlock->lastIns(), barrier); michael@0: osrBlock->rewriteSlot(slot, barrier); michael@0: def = barrier; michael@0: } michael@0: michael@0: switch (type) { michael@0: case MIRType_Boolean: michael@0: case MIRType_Int32: michael@0: case MIRType_Double: michael@0: case MIRType_String: michael@0: case MIRType_Object: michael@0: if (type != def->type()) { michael@0: MUnbox *unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible); michael@0: osrBlock->insertBefore(osrBlock->lastIns(), unbox); michael@0: osrBlock->rewriteSlot(slot, unbox); michael@0: def = unbox; michael@0: } michael@0: break; michael@0: michael@0: case MIRType_Null: michael@0: { michael@0: MConstant *c = MConstant::New(alloc(), NullValue()); michael@0: osrBlock->insertBefore(osrBlock->lastIns(), c); michael@0: osrBlock->rewriteSlot(slot, c); michael@0: def = c; michael@0: break; michael@0: } michael@0: michael@0: case MIRType_Undefined: michael@0: { michael@0: MConstant *c = MConstant::New(alloc(), UndefinedValue()); michael@0: osrBlock->insertBefore(osrBlock->lastIns(), c); michael@0: osrBlock->rewriteSlot(slot, c); michael@0: def = c; michael@0: break; michael@0: } michael@0: michael@0: case MIRType_MagicOptimizedArguments: michael@0: JS_ASSERT(lazyArguments_); michael@0: osrBlock->rewriteSlot(slot, lazyArguments_); michael@0: def = lazyArguments_; michael@0: break; michael@0: michael@0: default: michael@0: break; michael@0: } michael@0: michael@0: JS_ASSERT(def == osrBlock->getSlot(slot)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::maybeAddOsrTypeBarriers() michael@0: { michael@0: if (!info().osrPc()) michael@0: return true; michael@0: michael@0: // The loop has successfully been processed, and the loop header phis michael@0: // have their final type. Add unboxes and type barriers in the OSR michael@0: // block to check that the values have the appropriate type, and update michael@0: // the types in the preheader. michael@0: michael@0: MBasicBlock *osrBlock = graph().osrBlock(); michael@0: if (!osrBlock) { michael@0: // Because IonBuilder does not compile catch blocks, it's possible to michael@0: // end up without an OSR block if the OSR pc is only reachable via a michael@0: // break-statement inside the catch block. For instance: michael@0: // michael@0: // for (;;) { michael@0: // try { michael@0: // throw 3; michael@0: // } catch(e) { michael@0: // break; michael@0: // } michael@0: // } michael@0: // while (..) { } // <= OSR here, only reachable via catch block. michael@0: // michael@0: // For now we just abort in this case. michael@0: JS_ASSERT(graph().hasTryBlock()); michael@0: return abort("OSR block only reachable through catch block"); michael@0: } michael@0: michael@0: MBasicBlock *preheader = osrBlock->getSuccessor(0); michael@0: MBasicBlock *header = preheader->getSuccessor(0); michael@0: static const size_t OSR_PHI_POSITION = 1; michael@0: JS_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock); michael@0: michael@0: MPhiIterator headerPhi = header->phisBegin(); michael@0: while (headerPhi != header->phisEnd() && headerPhi->slot() < info().startArgSlot()) michael@0: headerPhi++; michael@0: michael@0: for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++, headerPhi++) { michael@0: // Aliased slots are never accessed, since they need to go through michael@0: // the callobject. The typebarriers are added there and can be michael@0: // discarded here. michael@0: if (info().isSlotAliasedAtOsr(i)) michael@0: continue; michael@0: michael@0: MInstruction *def = osrBlock->getSlot(i)->toInstruction(); michael@0: michael@0: JS_ASSERT(headerPhi->slot() == i); michael@0: MPhi *preheaderPhi = preheader->getSlot(i)->toPhi(); michael@0: michael@0: MIRType type = headerPhi->type(); michael@0: types::TemporaryTypeSet *typeSet = headerPhi->resultTypeSet(); michael@0: michael@0: if (!addOsrValueTypeBarrier(i, &def, type, typeSet)) michael@0: return false; michael@0: michael@0: preheaderPhi->replaceOperand(OSR_PHI_POSITION, def); michael@0: preheaderPhi->setResultType(type); michael@0: preheaderPhi->setResultTypeSet(typeSet); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: // We try to build a control-flow graph in the order that it would be built as michael@0: // if traversing the AST. This leads to a nice ordering and lets us build SSA michael@0: // in one pass, since the bytecode is structured. michael@0: // michael@0: // We traverse the bytecode iteratively, maintaining a current basic block. michael@0: // Each basic block has a mapping of local slots to instructions, as well as a michael@0: // stack depth. As we encounter instructions we mutate this mapping in the michael@0: // current block. michael@0: // michael@0: // Things get interesting when we encounter a control structure. This can be michael@0: // either an IFEQ, downward GOTO, or a decompiler hint stashed away in source michael@0: // notes. Once we encounter such an opcode, we recover the structure of the michael@0: // control flow (its branches and bounds), and push it on a stack. michael@0: // michael@0: // As we continue traversing the bytecode, we look for points that would michael@0: // terminate the topmost control flow path pushed on the stack. These are: michael@0: // (1) The bounds of the current structure (end of a loop or join/edge of a michael@0: // branch). michael@0: // (2) A "return", "break", or "continue" statement. michael@0: // michael@0: // For (1), we expect that there is a current block in the progress of being michael@0: // built, and we complete the necessary edges in the CFG. For (2), we expect michael@0: // that there is no active block. michael@0: // michael@0: // For normal diamond join points, we construct Phi nodes as we add michael@0: // predecessors. For loops, care must be taken to propagate Phi nodes back michael@0: // through uses in the loop body. michael@0: bool michael@0: IonBuilder::traverseBytecode() michael@0: { michael@0: for (;;) { michael@0: JS_ASSERT(pc < info().limitPC()); michael@0: michael@0: for (;;) { michael@0: if (!alloc().ensureBallast()) michael@0: return false; michael@0: michael@0: // Check if we've hit an expected join point or edge in the bytecode. michael@0: // Leaving one control structure could place us at the edge of another, michael@0: // thus |while| instead of |if| so we don't skip any opcodes. michael@0: if (!cfgStack_.empty() && cfgStack_.back().stopAt == pc) { michael@0: ControlStatus status = processCfgStack(); michael@0: if (status == ControlStatus_Error) michael@0: return false; michael@0: if (status == ControlStatus_Abort) michael@0: return abort("Aborted while processing control flow"); michael@0: if (!current) michael@0: return true; michael@0: continue; michael@0: } michael@0: michael@0: // Some opcodes need to be handled early because they affect control michael@0: // flow, terminating the current basic block and/or instructing the michael@0: // traversal algorithm to continue from a new pc. michael@0: // michael@0: // (1) If the opcode does not affect control flow, then the opcode michael@0: // is inspected and transformed to IR. This is the process_opcode michael@0: // label. michael@0: // (2) A loop could be detected via a forward GOTO. In this case, michael@0: // we don't want to process the GOTO, but the following michael@0: // instruction. michael@0: // (3) A RETURN, STOP, BREAK, or CONTINUE may require processing the michael@0: // CFG stack to terminate open branches. michael@0: // michael@0: // Similar to above, snooping control flow could land us at another michael@0: // control flow point, so we iterate until it's time to inspect a real michael@0: // opcode. michael@0: ControlStatus status; michael@0: if ((status = snoopControlFlow(JSOp(*pc))) == ControlStatus_None) michael@0: break; michael@0: if (status == ControlStatus_Error) michael@0: return false; michael@0: if (status == ControlStatus_Abort) michael@0: return abort("Aborted while processing control flow"); michael@0: if (!current) michael@0: return true; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: // In debug builds, after compiling this op, check that all values michael@0: // popped by this opcode either: michael@0: // michael@0: // (1) Have the ImplicitlyUsed flag set on them. michael@0: // (2) Have more uses than before compiling this op (the value is michael@0: // used as operand of a new MIR instruction). michael@0: // michael@0: // This is used to catch problems where IonBuilder pops a value without michael@0: // adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it. michael@0: Vector popped(alloc()); michael@0: Vector poppedUses(alloc()); michael@0: unsigned nuses = GetUseCount(script_, script_->pcToOffset(pc)); michael@0: michael@0: for (unsigned i = 0; i < nuses; i++) { michael@0: MDefinition *def = current->peek(-int32_t(i + 1)); michael@0: if (!popped.append(def) || !poppedUses.append(def->defUseCount())) michael@0: return false; michael@0: } michael@0: #endif michael@0: michael@0: // Nothing in inspectOpcode() is allowed to advance the pc. michael@0: JSOp op = JSOp(*pc); michael@0: if (!inspectOpcode(op)) michael@0: return false; michael@0: michael@0: #ifdef DEBUG michael@0: for (size_t i = 0; i < popped.length(); i++) { michael@0: switch (op) { michael@0: case JSOP_POP: michael@0: case JSOP_POPN: michael@0: case JSOP_DUPAT: michael@0: case JSOP_DUP: michael@0: case JSOP_DUP2: michael@0: case JSOP_PICK: michael@0: case JSOP_SWAP: michael@0: case JSOP_SETARG: michael@0: case JSOP_SETLOCAL: michael@0: case JSOP_SETRVAL: michael@0: case JSOP_VOID: michael@0: // Don't require SSA uses for values popped by these ops. michael@0: break; michael@0: michael@0: case JSOP_POS: michael@0: case JSOP_TOID: michael@0: // These ops may leave their input on the stack without setting michael@0: // the ImplicitlyUsed flag. If this value will be popped immediately, michael@0: // we may replace it with |undefined|, but the difference is michael@0: // not observable. michael@0: JS_ASSERT(i == 0); michael@0: if (current->peek(-1) == popped[0]) michael@0: break; michael@0: // FALL THROUGH michael@0: michael@0: default: michael@0: JS_ASSERT(popped[i]->isImplicitlyUsed() || michael@0: michael@0: // MNewDerivedTypedObject instances are michael@0: // often dead unless they escape from the michael@0: // fn. See IonBuilder::loadTypedObjectData() michael@0: // for more details. michael@0: popped[i]->isNewDerivedTypedObject() || michael@0: michael@0: popped[i]->defUseCount() > poppedUses[i]); michael@0: break; michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: pc += js_CodeSpec[op].length; michael@0: current->updateTrackedPc(pc); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::snoopControlFlow(JSOp op) michael@0: { michael@0: switch (op) { michael@0: case JSOP_NOP: michael@0: return maybeLoop(op, info().getNote(gsn, pc)); michael@0: michael@0: case JSOP_POP: michael@0: return maybeLoop(op, info().getNote(gsn, pc)); michael@0: michael@0: case JSOP_RETURN: michael@0: case JSOP_RETRVAL: michael@0: return processReturn(op); michael@0: michael@0: case JSOP_THROW: michael@0: return processThrow(); michael@0: michael@0: case JSOP_GOTO: michael@0: { michael@0: jssrcnote *sn = info().getNote(gsn, pc); michael@0: switch (sn ? SN_TYPE(sn) : SRC_NULL) { michael@0: case SRC_BREAK: michael@0: case SRC_BREAK2LABEL: michael@0: return processBreak(op, sn); michael@0: michael@0: case SRC_CONTINUE: michael@0: return processContinue(op); michael@0: michael@0: case SRC_SWITCHBREAK: michael@0: return processSwitchBreak(op); michael@0: michael@0: case SRC_WHILE: michael@0: case SRC_FOR_IN: michael@0: case SRC_FOR_OF: michael@0: // while (cond) { } michael@0: return whileOrForInLoop(sn); michael@0: michael@0: default: michael@0: // Hard assert for now - make an error later. michael@0: MOZ_ASSUME_UNREACHABLE("unknown goto case"); michael@0: } michael@0: break; michael@0: } michael@0: michael@0: case JSOP_TABLESWITCH: michael@0: return tableSwitch(op, info().getNote(gsn, pc)); michael@0: michael@0: case JSOP_IFNE: michael@0: // We should never reach an IFNE, it's a stopAt point, which will michael@0: // trigger closing the loop. michael@0: MOZ_ASSUME_UNREACHABLE("we should never reach an ifne!"); michael@0: michael@0: default: michael@0: break; michael@0: } michael@0: return ControlStatus_None; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::inspectOpcode(JSOp op) michael@0: { michael@0: switch (op) { michael@0: case JSOP_NOP: michael@0: case JSOP_LINENO: michael@0: case JSOP_LOOPENTRY: michael@0: return true; michael@0: michael@0: case JSOP_LABEL: michael@0: return jsop_label(); michael@0: michael@0: case JSOP_UNDEFINED: michael@0: return pushConstant(UndefinedValue()); michael@0: michael@0: case JSOP_IFEQ: michael@0: return jsop_ifeq(JSOP_IFEQ); michael@0: michael@0: case JSOP_TRY: michael@0: return jsop_try(); michael@0: michael@0: case JSOP_CONDSWITCH: michael@0: return jsop_condswitch(); michael@0: michael@0: case JSOP_BITNOT: michael@0: return jsop_bitnot(); michael@0: michael@0: case JSOP_BITAND: michael@0: case JSOP_BITOR: michael@0: case JSOP_BITXOR: michael@0: case JSOP_LSH: michael@0: case JSOP_RSH: michael@0: case JSOP_URSH: michael@0: return jsop_bitop(op); michael@0: michael@0: case JSOP_ADD: michael@0: case JSOP_SUB: michael@0: case JSOP_MUL: michael@0: case JSOP_DIV: michael@0: case JSOP_MOD: michael@0: return jsop_binary(op); michael@0: michael@0: case JSOP_POS: michael@0: return jsop_pos(); michael@0: michael@0: case JSOP_NEG: michael@0: return jsop_neg(); michael@0: michael@0: case JSOP_AND: michael@0: case JSOP_OR: michael@0: return jsop_andor(op); michael@0: michael@0: case JSOP_DEFVAR: michael@0: case JSOP_DEFCONST: michael@0: return jsop_defvar(GET_UINT32_INDEX(pc)); michael@0: michael@0: case JSOP_DEFFUN: michael@0: return jsop_deffun(GET_UINT32_INDEX(pc)); michael@0: michael@0: case JSOP_EQ: michael@0: case JSOP_NE: michael@0: case JSOP_STRICTEQ: michael@0: case JSOP_STRICTNE: michael@0: case JSOP_LT: michael@0: case JSOP_LE: michael@0: case JSOP_GT: michael@0: case JSOP_GE: michael@0: return jsop_compare(op); michael@0: michael@0: case JSOP_DOUBLE: michael@0: return pushConstant(info().getConst(pc)); michael@0: michael@0: case JSOP_STRING: michael@0: return pushConstant(StringValue(info().getAtom(pc))); michael@0: michael@0: case JSOP_ZERO: michael@0: return pushConstant(Int32Value(0)); michael@0: michael@0: case JSOP_ONE: michael@0: return pushConstant(Int32Value(1)); michael@0: michael@0: case JSOP_NULL: michael@0: return pushConstant(NullValue()); michael@0: michael@0: case JSOP_VOID: michael@0: current->pop(); michael@0: return pushConstant(UndefinedValue()); michael@0: michael@0: case JSOP_HOLE: michael@0: return pushConstant(MagicValue(JS_ELEMENTS_HOLE)); michael@0: michael@0: case JSOP_FALSE: michael@0: return pushConstant(BooleanValue(false)); michael@0: michael@0: case JSOP_TRUE: michael@0: return pushConstant(BooleanValue(true)); michael@0: michael@0: case JSOP_ARGUMENTS: michael@0: return jsop_arguments(); michael@0: michael@0: case JSOP_RUNONCE: michael@0: return jsop_runonce(); michael@0: michael@0: case JSOP_REST: michael@0: return jsop_rest(); michael@0: michael@0: case JSOP_GETARG: michael@0: if (info().argsObjAliasesFormals()) { michael@0: MGetArgumentsObjectArg *getArg = MGetArgumentsObjectArg::New(alloc(), michael@0: current->argumentsObject(), michael@0: GET_ARGNO(pc)); michael@0: current->add(getArg); michael@0: current->push(getArg); michael@0: } else { michael@0: current->pushArg(GET_ARGNO(pc)); michael@0: } michael@0: return true; michael@0: michael@0: case JSOP_SETARG: michael@0: return jsop_setarg(GET_ARGNO(pc)); michael@0: michael@0: case JSOP_GETLOCAL: michael@0: current->pushLocal(GET_LOCALNO(pc)); michael@0: return true; michael@0: michael@0: case JSOP_SETLOCAL: michael@0: current->setLocal(GET_LOCALNO(pc)); michael@0: return true; michael@0: michael@0: case JSOP_POP: michael@0: current->pop(); michael@0: michael@0: // POP opcodes frequently appear where values are killed, e.g. after michael@0: // SET* opcodes. Place a resume point afterwards to avoid capturing michael@0: // the dead value in later snapshots, except in places where that michael@0: // resume point is obviously unnecessary. michael@0: if (pc[JSOP_POP_LENGTH] == JSOP_POP) michael@0: return true; michael@0: return maybeInsertResume(); michael@0: michael@0: case JSOP_POPN: michael@0: for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++) michael@0: current->pop(); michael@0: return true; michael@0: michael@0: case JSOP_DUPAT: michael@0: current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc)); michael@0: return true; michael@0: michael@0: case JSOP_NEWINIT: michael@0: if (GET_UINT8(pc) == JSProto_Array) michael@0: return jsop_newarray(0); michael@0: return jsop_newobject(); michael@0: michael@0: case JSOP_NEWARRAY: michael@0: return jsop_newarray(GET_UINT24(pc)); michael@0: michael@0: case JSOP_NEWOBJECT: michael@0: return jsop_newobject(); michael@0: michael@0: case JSOP_INITELEM: michael@0: return jsop_initelem(); michael@0: michael@0: case JSOP_INITELEM_ARRAY: michael@0: return jsop_initelem_array(); michael@0: michael@0: case JSOP_INITPROP: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_initprop(name); michael@0: } michael@0: michael@0: case JSOP_MUTATEPROTO: michael@0: { michael@0: return jsop_mutateproto(); michael@0: } michael@0: michael@0: case JSOP_INITPROP_GETTER: michael@0: case JSOP_INITPROP_SETTER: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_initprop_getter_setter(name); michael@0: } michael@0: michael@0: case JSOP_INITELEM_GETTER: michael@0: case JSOP_INITELEM_SETTER: michael@0: return jsop_initelem_getter_setter(); michael@0: michael@0: case JSOP_ENDINIT: michael@0: return true; michael@0: michael@0: case JSOP_FUNCALL: michael@0: return jsop_funcall(GET_ARGC(pc)); michael@0: michael@0: case JSOP_FUNAPPLY: michael@0: return jsop_funapply(GET_ARGC(pc)); michael@0: michael@0: case JSOP_CALL: michael@0: case JSOP_NEW: michael@0: return jsop_call(GET_ARGC(pc), (JSOp)*pc == JSOP_NEW); michael@0: michael@0: case JSOP_EVAL: michael@0: return jsop_eval(GET_ARGC(pc)); michael@0: michael@0: case JSOP_INT8: michael@0: return pushConstant(Int32Value(GET_INT8(pc))); michael@0: michael@0: case JSOP_UINT16: michael@0: return pushConstant(Int32Value(GET_UINT16(pc))); michael@0: michael@0: case JSOP_GETGNAME: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_getgname(name); michael@0: } michael@0: michael@0: case JSOP_BINDGNAME: michael@0: return pushConstant(ObjectValue(script()->global())); michael@0: michael@0: case JSOP_SETGNAME: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: JSObject *obj = &script()->global(); michael@0: return setStaticName(obj, name); michael@0: } michael@0: michael@0: case JSOP_NAME: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_getname(name); michael@0: } michael@0: michael@0: case JSOP_GETINTRINSIC: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_intrinsic(name); michael@0: } michael@0: michael@0: case JSOP_BINDNAME: michael@0: return jsop_bindname(info().getName(pc)); michael@0: michael@0: case JSOP_DUP: michael@0: current->pushSlot(current->stackDepth() - 1); michael@0: return true; michael@0: michael@0: case JSOP_DUP2: michael@0: return jsop_dup2(); michael@0: michael@0: case JSOP_SWAP: michael@0: current->swapAt(-1); michael@0: return true; michael@0: michael@0: case JSOP_PICK: michael@0: current->pick(-GET_INT8(pc)); michael@0: return true; michael@0: michael@0: case JSOP_GETALIASEDVAR: michael@0: return jsop_getaliasedvar(ScopeCoordinate(pc)); michael@0: michael@0: case JSOP_SETALIASEDVAR: michael@0: return jsop_setaliasedvar(ScopeCoordinate(pc)); michael@0: michael@0: case JSOP_UINT24: michael@0: return pushConstant(Int32Value(GET_UINT24(pc))); michael@0: michael@0: case JSOP_INT32: michael@0: return pushConstant(Int32Value(GET_INT32(pc))); michael@0: michael@0: case JSOP_LOOPHEAD: michael@0: // JSOP_LOOPHEAD is handled when processing the loop header. michael@0: MOZ_ASSUME_UNREACHABLE("JSOP_LOOPHEAD outside loop"); michael@0: michael@0: case JSOP_GETELEM: michael@0: case JSOP_CALLELEM: michael@0: return jsop_getelem(); michael@0: michael@0: case JSOP_SETELEM: michael@0: return jsop_setelem(); michael@0: michael@0: case JSOP_LENGTH: michael@0: return jsop_length(); michael@0: michael@0: case JSOP_NOT: michael@0: return jsop_not(); michael@0: michael@0: case JSOP_THIS: michael@0: return jsop_this(); michael@0: michael@0: case JSOP_CALLEE: { michael@0: MDefinition *callee = getCallee(); michael@0: current->push(callee); michael@0: return true; michael@0: } michael@0: michael@0: case JSOP_GETPROP: michael@0: case JSOP_CALLPROP: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_getprop(name); michael@0: } michael@0: michael@0: case JSOP_SETPROP: michael@0: case JSOP_SETNAME: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_setprop(name); michael@0: } michael@0: michael@0: case JSOP_DELPROP: michael@0: { michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_delprop(name); michael@0: } michael@0: michael@0: case JSOP_DELELEM: michael@0: return jsop_delelem(); michael@0: michael@0: case JSOP_REGEXP: michael@0: return jsop_regexp(info().getRegExp(pc)); michael@0: michael@0: case JSOP_OBJECT: michael@0: return jsop_object(info().getObject(pc)); michael@0: michael@0: case JSOP_TYPEOF: michael@0: case JSOP_TYPEOFEXPR: michael@0: return jsop_typeof(); michael@0: michael@0: case JSOP_TOID: michael@0: return jsop_toid(); michael@0: michael@0: case JSOP_LAMBDA: michael@0: return jsop_lambda(info().getFunction(pc)); michael@0: michael@0: case JSOP_LAMBDA_ARROW: michael@0: return jsop_lambda_arrow(info().getFunction(pc)); michael@0: michael@0: case JSOP_ITER: michael@0: return jsop_iter(GET_INT8(pc)); michael@0: michael@0: case JSOP_ITERNEXT: michael@0: return jsop_iternext(); michael@0: michael@0: case JSOP_MOREITER: michael@0: return jsop_itermore(); michael@0: michael@0: case JSOP_ENDITER: michael@0: return jsop_iterend(); michael@0: michael@0: case JSOP_IN: michael@0: return jsop_in(); michael@0: michael@0: case JSOP_SETRVAL: michael@0: JS_ASSERT(!script()->noScriptRval()); michael@0: current->setSlot(info().returnValueSlot(), current->pop()); michael@0: return true; michael@0: michael@0: case JSOP_INSTANCEOF: michael@0: return jsop_instanceof(); michael@0: michael@0: case JSOP_DEBUGLEAVEBLOCK: michael@0: return true; michael@0: michael@0: default: michael@0: #ifdef DEBUG michael@0: return abort("Unsupported opcode: %s (line %d)", js_CodeName[op], info().lineno(pc)); michael@0: #else michael@0: return abort("Unsupported opcode: %d (line %d)", op, info().lineno(pc)); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: // Given that the current control flow structure has ended forcefully, michael@0: // via a return, break, or continue (rather than joining), propagate the michael@0: // termination up. For example, a return nested 5 loops deep may terminate michael@0: // every outer loop at once, if there are no intervening conditionals: michael@0: // michael@0: // for (...) { michael@0: // for (...) { michael@0: // return x; michael@0: // } michael@0: // } michael@0: // michael@0: // If |current| is nullptr when this function returns, then there is no more michael@0: // control flow to be processed. michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processControlEnd() michael@0: { michael@0: JS_ASSERT(!current); michael@0: michael@0: if (cfgStack_.empty()) { michael@0: // If there is no more control flow to process, then this is the michael@0: // last return in the function. michael@0: return ControlStatus_Ended; michael@0: } michael@0: michael@0: return processCfgStack(); michael@0: } michael@0: michael@0: // Processes the top of the CFG stack. This is used from two places: michael@0: // (1) processControlEnd(), whereby a break, continue, or return may interrupt michael@0: // an in-progress CFG structure before reaching its actual termination michael@0: // point in the bytecode. michael@0: // (2) traverseBytecode(), whereby we reach the last instruction in a CFG michael@0: // structure. michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processCfgStack() michael@0: { michael@0: ControlStatus status = processCfgEntry(cfgStack_.back()); michael@0: michael@0: // If this terminated a CFG structure, act like processControlEnd() and michael@0: // keep propagating upward. michael@0: while (status == ControlStatus_Ended) { michael@0: popCfgStack(); michael@0: if (cfgStack_.empty()) michael@0: return status; michael@0: status = processCfgEntry(cfgStack_.back()); michael@0: } michael@0: michael@0: // If some join took place, the current structure is finished. michael@0: if (status == ControlStatus_Joined) michael@0: popCfgStack(); michael@0: michael@0: return status; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processCfgEntry(CFGState &state) michael@0: { michael@0: switch (state.state) { michael@0: case CFGState::IF_TRUE: michael@0: case CFGState::IF_TRUE_EMPTY_ELSE: michael@0: return processIfEnd(state); michael@0: michael@0: case CFGState::IF_ELSE_TRUE: michael@0: return processIfElseTrueEnd(state); michael@0: michael@0: case CFGState::IF_ELSE_FALSE: michael@0: return processIfElseFalseEnd(state); michael@0: michael@0: case CFGState::DO_WHILE_LOOP_BODY: michael@0: return processDoWhileBodyEnd(state); michael@0: michael@0: case CFGState::DO_WHILE_LOOP_COND: michael@0: return processDoWhileCondEnd(state); michael@0: michael@0: case CFGState::WHILE_LOOP_COND: michael@0: return processWhileCondEnd(state); michael@0: michael@0: case CFGState::WHILE_LOOP_BODY: michael@0: return processWhileBodyEnd(state); michael@0: michael@0: case CFGState::FOR_LOOP_COND: michael@0: return processForCondEnd(state); michael@0: michael@0: case CFGState::FOR_LOOP_BODY: michael@0: return processForBodyEnd(state); michael@0: michael@0: case CFGState::FOR_LOOP_UPDATE: michael@0: return processForUpdateEnd(state); michael@0: michael@0: case CFGState::TABLE_SWITCH: michael@0: return processNextTableSwitchCase(state); michael@0: michael@0: case CFGState::COND_SWITCH_CASE: michael@0: return processCondSwitchCase(state); michael@0: michael@0: case CFGState::COND_SWITCH_BODY: michael@0: return processCondSwitchBody(state); michael@0: michael@0: case CFGState::AND_OR: michael@0: return processAndOrEnd(state); michael@0: michael@0: case CFGState::LABEL: michael@0: return processLabelEnd(state); michael@0: michael@0: case CFGState::TRY: michael@0: return processTryEnd(state); michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unknown cfgstate"); michael@0: } michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processIfEnd(CFGState &state) michael@0: { michael@0: if (current) { michael@0: // Here, the false block is the join point. Create an edge from the michael@0: // current block to the false block. Note that a RETURN opcode michael@0: // could have already ended the block. michael@0: current->end(MGoto::New(alloc(), state.branch.ifFalse)); michael@0: michael@0: if (!state.branch.ifFalse->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: if (!setCurrentAndSpecializePhis(state.branch.ifFalse)) michael@0: return ControlStatus_Error; michael@0: graph().moveBlockToEnd(current); michael@0: pc = current->pc(); michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processIfElseTrueEnd(CFGState &state) michael@0: { michael@0: // We've reached the end of the true branch of an if-else. Don't michael@0: // create an edge yet, just transition to parsing the false branch. michael@0: state.state = CFGState::IF_ELSE_FALSE; michael@0: state.branch.ifTrue = current; michael@0: state.stopAt = state.branch.falseEnd; michael@0: pc = state.branch.ifFalse->pc(); michael@0: if (!setCurrentAndSpecializePhis(state.branch.ifFalse)) michael@0: return ControlStatus_Error; michael@0: graph().moveBlockToEnd(current); michael@0: michael@0: if (state.branch.test) michael@0: filterTypesAtTest(state.branch.test); michael@0: michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processIfElseFalseEnd(CFGState &state) michael@0: { michael@0: // Update the state to have the latest block from the false path. michael@0: state.branch.ifFalse = current; michael@0: michael@0: // To create the join node, we need an incoming edge that has not been michael@0: // terminated yet. michael@0: MBasicBlock *pred = state.branch.ifTrue michael@0: ? state.branch.ifTrue michael@0: : state.branch.ifFalse; michael@0: MBasicBlock *other = (pred == state.branch.ifTrue) ? state.branch.ifFalse : state.branch.ifTrue; michael@0: michael@0: if (!pred) michael@0: return ControlStatus_Ended; michael@0: michael@0: // Create a new block to represent the join. michael@0: MBasicBlock *join = newBlock(pred, state.branch.falseEnd); michael@0: if (!join) michael@0: return ControlStatus_Error; michael@0: michael@0: // Create edges from the true and false blocks as needed. michael@0: pred->end(MGoto::New(alloc(), join)); michael@0: michael@0: if (other) { michael@0: other->end(MGoto::New(alloc(), join)); michael@0: if (!join->addPredecessor(alloc(), other)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: // Ignore unreachable remainder of false block if existent. michael@0: if (!setCurrentAndSpecializePhis(join)) michael@0: return ControlStatus_Error; michael@0: pc = current->pc(); michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processBrokenLoop(CFGState &state) michael@0: { michael@0: JS_ASSERT(!current); michael@0: michael@0: JS_ASSERT(loopDepth_); michael@0: loopDepth_--; michael@0: michael@0: // A broken loop is not a real loop (it has no header or backedge), so michael@0: // reset the loop depth. michael@0: for (MBasicBlockIterator i(graph().begin(state.loop.entry)); i != graph().end(); i++) { michael@0: if (i->loopDepth() > loopDepth_) michael@0: i->setLoopDepth(i->loopDepth() - 1); michael@0: } michael@0: michael@0: // If the loop started with a condition (while/for) then even if the michael@0: // structure never actually loops, the condition itself can still fail and michael@0: // thus we must resume at the successor, if one exists. michael@0: if (!setCurrentAndSpecializePhis(state.loop.successor)) michael@0: return ControlStatus_Error; michael@0: if (current) { michael@0: JS_ASSERT(current->loopDepth() == loopDepth_); michael@0: graph().moveBlockToEnd(current); michael@0: } michael@0: michael@0: // Join the breaks together and continue parsing. michael@0: if (state.loop.breaks) { michael@0: MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc); michael@0: if (!block) michael@0: return ControlStatus_Error; michael@0: michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), block)); michael@0: if (!block->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: if (!setCurrentAndSpecializePhis(block)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: // If the loop is not gated on a condition, and has only returns, we'll michael@0: // reach this case. For example: michael@0: // do { ... return; } while (); michael@0: if (!current) michael@0: return ControlStatus_Ended; michael@0: michael@0: // Otherwise, the loop is gated on a condition and/or has breaks so keep michael@0: // parsing at the successor. michael@0: pc = current->pc(); michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::finishLoop(CFGState &state, MBasicBlock *successor) michael@0: { michael@0: JS_ASSERT(current); michael@0: michael@0: JS_ASSERT(loopDepth_); michael@0: loopDepth_--; michael@0: JS_ASSERT_IF(successor, successor->loopDepth() == loopDepth_); michael@0: michael@0: // Compute phis in the loop header and propagate them throughout the loop, michael@0: // including the successor. michael@0: AbortReason r = state.loop.entry->setBackedge(current); michael@0: if (r == AbortReason_Alloc) michael@0: return ControlStatus_Error; michael@0: if (r == AbortReason_Disable) { michael@0: // If there are types for variables on the backedge that were not michael@0: // present at the original loop header, then uses of the variables' michael@0: // phis may have generated incorrect nodes. The new types have been michael@0: // incorporated into the header phis, so remove all blocks for the michael@0: // loop body and restart with the new types. michael@0: return restartLoop(state); michael@0: } michael@0: michael@0: if (successor) { michael@0: graph().moveBlockToEnd(successor); michael@0: successor->inheritPhis(state.loop.entry); michael@0: } michael@0: michael@0: if (state.loop.breaks) { michael@0: // Propagate phis placed in the header to individual break exit points. michael@0: DeferredEdge *edge = state.loop.breaks; michael@0: while (edge) { michael@0: edge->block->inheritPhis(state.loop.entry); michael@0: edge = edge->next; michael@0: } michael@0: michael@0: // Create a catch block to join all break exits. michael@0: MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc); michael@0: if (!block) michael@0: return ControlStatus_Error; michael@0: michael@0: if (successor) { michael@0: // Finally, create an unconditional edge from the successor to the michael@0: // catch block. michael@0: successor->end(MGoto::New(alloc(), block)); michael@0: if (!block->addPredecessor(alloc(), successor)) michael@0: return ControlStatus_Error; michael@0: } michael@0: successor = block; michael@0: } michael@0: michael@0: if (!setCurrentAndSpecializePhis(successor)) michael@0: return ControlStatus_Error; michael@0: michael@0: // An infinite loop (for (;;) { }) will not have a successor. michael@0: if (!current) michael@0: return ControlStatus_Ended; michael@0: michael@0: pc = current->pc(); michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::restartLoop(CFGState state) michael@0: { michael@0: spew("New types at loop header, restarting loop body"); michael@0: michael@0: if (js_JitOptions.limitScriptSize) { michael@0: if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS) michael@0: return ControlStatus_Abort; michael@0: } michael@0: michael@0: MBasicBlock *header = state.loop.entry; michael@0: michael@0: // Remove all blocks in the loop body other than the header, which has phis michael@0: // of the appropriate type and incoming edges to preserve. michael@0: graph().removeBlocksAfter(header); michael@0: michael@0: // Remove all instructions from the header itself, and all resume points michael@0: // except the entry resume point. michael@0: header->discardAllInstructions(); michael@0: header->discardAllResumePoints(/* discardEntry = */ false); michael@0: header->setStackDepth(header->getPredecessor(0)->stackDepth()); michael@0: michael@0: popCfgStack(); michael@0: michael@0: loopDepth_++; michael@0: michael@0: if (!pushLoop(state.loop.initialState, state.loop.initialStopAt, header, state.loop.osr, michael@0: state.loop.loopHead, state.loop.initialPc, michael@0: state.loop.bodyStart, state.loop.bodyEnd, michael@0: state.loop.exitpc, state.loop.continuepc)) michael@0: { michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: CFGState &nstate = cfgStack_.back(); michael@0: michael@0: nstate.loop.condpc = state.loop.condpc; michael@0: nstate.loop.updatepc = state.loop.updatepc; michael@0: nstate.loop.updateEnd = state.loop.updateEnd; michael@0: michael@0: // Don't specializePhis(), as the header has been visited before and the michael@0: // phis have already had their type set. michael@0: setCurrent(header); michael@0: michael@0: if (!jsop_loophead(nstate.loop.loopHead)) michael@0: return ControlStatus_Error; michael@0: michael@0: pc = nstate.loop.initialPc; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processDoWhileBodyEnd(CFGState &state) michael@0: { michael@0: if (!processDeferredContinues(state)) michael@0: return ControlStatus_Error; michael@0: michael@0: // No current means control flow cannot reach the condition, so this will michael@0: // never loop. michael@0: if (!current) michael@0: return processBrokenLoop(state); michael@0: michael@0: MBasicBlock *header = newBlock(current, state.loop.updatepc); michael@0: if (!header) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), header)); michael@0: michael@0: state.state = CFGState::DO_WHILE_LOOP_COND; michael@0: state.stopAt = state.loop.updateEnd; michael@0: pc = state.loop.updatepc; michael@0: if (!setCurrentAndSpecializePhis(header)) michael@0: return ControlStatus_Error; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processDoWhileCondEnd(CFGState &state) michael@0: { michael@0: JS_ASSERT(JSOp(*pc) == JSOP_IFNE); michael@0: michael@0: // We're guaranteed a |current|, it's impossible to break or return from michael@0: // inside the conditional expression. michael@0: JS_ASSERT(current); michael@0: michael@0: // Pop the last value, and create the successor block. michael@0: MDefinition *vins = current->pop(); michael@0: MBasicBlock *successor = newBlock(current, GetNextPc(pc), loopDepth_ - 1); michael@0: if (!successor) michael@0: return ControlStatus_Error; michael@0: michael@0: // Test for do {} while(false) and don't create a loop in that case. michael@0: if (vins->isConstant()) { michael@0: MConstant *cte = vins->toConstant(); michael@0: if (cte->value().isBoolean() && !cte->value().toBoolean()) { michael@0: current->end(MGoto::New(alloc(), successor)); michael@0: current = nullptr; michael@0: michael@0: state.loop.successor = successor; michael@0: return processBrokenLoop(state); michael@0: } michael@0: } michael@0: michael@0: // Create the test instruction and end the current block. michael@0: MTest *test = MTest::New(alloc(), vins, state.loop.entry, successor); michael@0: current->end(test); michael@0: return finishLoop(state, successor); michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processWhileCondEnd(CFGState &state) michael@0: { michael@0: JS_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFEQ); michael@0: michael@0: // Balance the stack past the IFNE. michael@0: MDefinition *ins = current->pop(); michael@0: michael@0: // Create the body and successor blocks. michael@0: MBasicBlock *body = newBlock(current, state.loop.bodyStart); michael@0: state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1); michael@0: if (!body || !state.loop.successor) michael@0: return ControlStatus_Error; michael@0: michael@0: MTest *test; michael@0: if (JSOp(*pc) == JSOP_IFNE) michael@0: test = MTest::New(alloc(), ins, body, state.loop.successor); michael@0: else michael@0: test = MTest::New(alloc(), ins, state.loop.successor, body); michael@0: current->end(test); michael@0: michael@0: state.state = CFGState::WHILE_LOOP_BODY; michael@0: state.stopAt = state.loop.bodyEnd; michael@0: pc = state.loop.bodyStart; michael@0: if (!setCurrentAndSpecializePhis(body)) michael@0: return ControlStatus_Error; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processWhileBodyEnd(CFGState &state) michael@0: { michael@0: if (!processDeferredContinues(state)) michael@0: return ControlStatus_Error; michael@0: michael@0: if (!current) michael@0: return processBrokenLoop(state); michael@0: michael@0: current->end(MGoto::New(alloc(), state.loop.entry)); michael@0: return finishLoop(state, state.loop.successor); michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processForCondEnd(CFGState &state) michael@0: { michael@0: JS_ASSERT(JSOp(*pc) == JSOP_IFNE); michael@0: michael@0: // Balance the stack past the IFNE. michael@0: MDefinition *ins = current->pop(); michael@0: michael@0: // Create the body and successor blocks. michael@0: MBasicBlock *body = newBlock(current, state.loop.bodyStart); michael@0: state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1); michael@0: if (!body || !state.loop.successor) michael@0: return ControlStatus_Error; michael@0: michael@0: MTest *test = MTest::New(alloc(), ins, body, state.loop.successor); michael@0: current->end(test); michael@0: michael@0: state.state = CFGState::FOR_LOOP_BODY; michael@0: state.stopAt = state.loop.bodyEnd; michael@0: pc = state.loop.bodyStart; michael@0: if (!setCurrentAndSpecializePhis(body)) michael@0: return ControlStatus_Error; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processForBodyEnd(CFGState &state) michael@0: { michael@0: if (!processDeferredContinues(state)) michael@0: return ControlStatus_Error; michael@0: michael@0: // If there is no updatepc, just go right to processing what would be the michael@0: // end of the update clause. Otherwise, |current| might be nullptr; if this is michael@0: // the case, the udpate is unreachable anyway. michael@0: if (!state.loop.updatepc || !current) michael@0: return processForUpdateEnd(state); michael@0: michael@0: pc = state.loop.updatepc; michael@0: michael@0: state.state = CFGState::FOR_LOOP_UPDATE; michael@0: state.stopAt = state.loop.updateEnd; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processForUpdateEnd(CFGState &state) michael@0: { michael@0: // If there is no current, we couldn't reach the loop edge and there was no michael@0: // update clause. michael@0: if (!current) michael@0: return processBrokenLoop(state); michael@0: michael@0: current->end(MGoto::New(alloc(), state.loop.entry)); michael@0: return finishLoop(state, state.loop.successor); michael@0: } michael@0: michael@0: IonBuilder::DeferredEdge * michael@0: IonBuilder::filterDeadDeferredEdges(DeferredEdge *edge) michael@0: { michael@0: DeferredEdge *head = edge, *prev = nullptr; michael@0: michael@0: while (edge) { michael@0: if (edge->block->isDead()) { michael@0: if (prev) michael@0: prev->next = edge->next; michael@0: else michael@0: head = edge->next; michael@0: } else { michael@0: prev = edge; michael@0: } michael@0: edge = edge->next; michael@0: } michael@0: michael@0: // There must be at least one deferred edge from a block that was not michael@0: // deleted; blocks are deleted when restarting processing of a loop, and michael@0: // the final version of the loop body will have edges from live blocks. michael@0: JS_ASSERT(head); michael@0: michael@0: return head; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::processDeferredContinues(CFGState &state) michael@0: { michael@0: // If there are any continues for this loop, and there is an update block, michael@0: // then we need to create a new basic block to house the update. michael@0: if (state.loop.continues) { michael@0: DeferredEdge *edge = filterDeadDeferredEdges(state.loop.continues); michael@0: michael@0: MBasicBlock *update = newBlock(edge->block, loops_.back().continuepc); michael@0: if (!update) michael@0: return false; michael@0: michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), update)); michael@0: if (!update->addPredecessor(alloc(), current)) michael@0: return false; michael@0: } michael@0: michael@0: // No need to use addPredecessor for first edge, michael@0: // because it is already predecessor. michael@0: edge->block->end(MGoto::New(alloc(), update)); michael@0: edge = edge->next; michael@0: michael@0: // Remaining edges michael@0: while (edge) { michael@0: edge->block->end(MGoto::New(alloc(), update)); michael@0: if (!update->addPredecessor(alloc(), edge->block)) michael@0: return false; michael@0: edge = edge->next; michael@0: } michael@0: state.loop.continues = nullptr; michael@0: michael@0: if (!setCurrentAndSpecializePhis(update)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::createBreakCatchBlock(DeferredEdge *edge, jsbytecode *pc) michael@0: { michael@0: edge = filterDeadDeferredEdges(edge); michael@0: michael@0: // Create block, using the first break statement as predecessor michael@0: MBasicBlock *successor = newBlock(edge->block, pc); michael@0: if (!successor) michael@0: return nullptr; michael@0: michael@0: // No need to use addPredecessor for first edge, michael@0: // because it is already predecessor. michael@0: edge->block->end(MGoto::New(alloc(), successor)); michael@0: edge = edge->next; michael@0: michael@0: // Finish up remaining breaks. michael@0: while (edge) { michael@0: edge->block->end(MGoto::New(alloc(), successor)); michael@0: if (!successor->addPredecessor(alloc(), edge->block)) michael@0: return nullptr; michael@0: edge = edge->next; michael@0: } michael@0: michael@0: return successor; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processNextTableSwitchCase(CFGState &state) michael@0: { michael@0: JS_ASSERT(state.state == CFGState::TABLE_SWITCH); michael@0: michael@0: state.tableswitch.currentBlock++; michael@0: michael@0: // Test if there are still unprocessed successors (cases/default) michael@0: if (state.tableswitch.currentBlock >= state.tableswitch.ins->numBlocks()) michael@0: return processSwitchEnd(state.tableswitch.breaks, state.tableswitch.exitpc); michael@0: michael@0: // Get the next successor michael@0: MBasicBlock *successor = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock); michael@0: michael@0: // Add current block as predecessor if available. michael@0: // This means the previous case didn't have a break statement. michael@0: // So flow will continue in this block. michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), successor)); michael@0: if (!successor->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: // Insert successor after the current block, to maintain RPO. michael@0: graph().moveBlockToEnd(successor); michael@0: michael@0: // If this is the last successor the block should stop at the end of the tableswitch michael@0: // Else it should stop at the start of the next successor michael@0: if (state.tableswitch.currentBlock+1 < state.tableswitch.ins->numBlocks()) michael@0: state.stopAt = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock+1)->pc(); michael@0: else michael@0: state.stopAt = state.tableswitch.exitpc; michael@0: michael@0: if (!setCurrentAndSpecializePhis(successor)) michael@0: return ControlStatus_Error; michael@0: pc = current->pc(); michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processAndOrEnd(CFGState &state) michael@0: { michael@0: // We just processed the RHS of an && or || expression. michael@0: // Now jump to the join point (the false block). michael@0: current->end(MGoto::New(alloc(), state.branch.ifFalse)); michael@0: michael@0: if (!state.branch.ifFalse->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: michael@0: if (!setCurrentAndSpecializePhis(state.branch.ifFalse)) michael@0: return ControlStatus_Error; michael@0: graph().moveBlockToEnd(current); michael@0: pc = current->pc(); michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processLabelEnd(CFGState &state) michael@0: { michael@0: JS_ASSERT(state.state == CFGState::LABEL); michael@0: michael@0: // If there are no breaks and no current, controlflow is terminated. michael@0: if (!state.label.breaks && !current) michael@0: return ControlStatus_Ended; michael@0: michael@0: // If there are no breaks to this label, there's nothing to do. michael@0: if (!state.label.breaks) michael@0: return ControlStatus_Joined; michael@0: michael@0: MBasicBlock *successor = createBreakCatchBlock(state.label.breaks, state.stopAt); michael@0: if (!successor) michael@0: return ControlStatus_Error; michael@0: michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), successor)); michael@0: if (!successor->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: pc = state.stopAt; michael@0: if (!setCurrentAndSpecializePhis(successor)) michael@0: return ControlStatus_Error; michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processTryEnd(CFGState &state) michael@0: { michael@0: JS_ASSERT(state.state == CFGState::TRY); michael@0: michael@0: if (!state.try_.successor) { michael@0: JS_ASSERT(!current); michael@0: return ControlStatus_Ended; michael@0: } michael@0: michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), state.try_.successor)); michael@0: michael@0: if (!state.try_.successor->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: // Start parsing the code after this try-catch statement. michael@0: if (!setCurrentAndSpecializePhis(state.try_.successor)) michael@0: return ControlStatus_Error; michael@0: graph().moveBlockToEnd(current); michael@0: pc = current->pc(); michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processBreak(JSOp op, jssrcnote *sn) michael@0: { michael@0: JS_ASSERT(op == JSOP_GOTO); michael@0: michael@0: JS_ASSERT(SN_TYPE(sn) == SRC_BREAK || michael@0: SN_TYPE(sn) == SRC_BREAK2LABEL); michael@0: michael@0: // Find the break target. michael@0: jsbytecode *target = pc + GetJumpOffset(pc); michael@0: DebugOnly found = false; michael@0: michael@0: if (SN_TYPE(sn) == SRC_BREAK2LABEL) { michael@0: for (size_t i = labels_.length() - 1; i < labels_.length(); i--) { michael@0: CFGState &cfg = cfgStack_[labels_[i].cfgEntry]; michael@0: JS_ASSERT(cfg.state == CFGState::LABEL); michael@0: if (cfg.stopAt == target) { michael@0: cfg.label.breaks = new(alloc()) DeferredEdge(current, cfg.label.breaks); michael@0: found = true; michael@0: break; michael@0: } michael@0: } michael@0: } else { michael@0: for (size_t i = loops_.length() - 1; i < loops_.length(); i--) { michael@0: CFGState &cfg = cfgStack_[loops_[i].cfgEntry]; michael@0: JS_ASSERT(cfg.isLoop()); michael@0: if (cfg.loop.exitpc == target) { michael@0: cfg.loop.breaks = new(alloc()) DeferredEdge(current, cfg.loop.breaks); michael@0: found = true; michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: JS_ASSERT(found); michael@0: michael@0: setCurrent(nullptr); michael@0: pc += js_CodeSpec[op].length; michael@0: return processControlEnd(); michael@0: } michael@0: michael@0: static inline jsbytecode * michael@0: EffectiveContinue(jsbytecode *pc) michael@0: { michael@0: if (JSOp(*pc) == JSOP_GOTO) michael@0: return pc + GetJumpOffset(pc); michael@0: return pc; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processContinue(JSOp op) michael@0: { michael@0: JS_ASSERT(op == JSOP_GOTO); michael@0: michael@0: // Find the target loop. michael@0: CFGState *found = nullptr; michael@0: jsbytecode *target = pc + GetJumpOffset(pc); michael@0: for (size_t i = loops_.length() - 1; i < loops_.length(); i--) { michael@0: if (loops_[i].continuepc == target || michael@0: EffectiveContinue(loops_[i].continuepc) == target) michael@0: { michael@0: found = &cfgStack_[loops_[i].cfgEntry]; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // There must always be a valid target loop structure. If not, there's michael@0: // probably an off-by-something error in which pc we track. michael@0: JS_ASSERT(found); michael@0: CFGState &state = *found; michael@0: michael@0: state.loop.continues = new(alloc()) DeferredEdge(current, state.loop.continues); michael@0: michael@0: setCurrent(nullptr); michael@0: pc += js_CodeSpec[op].length; michael@0: return processControlEnd(); michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processSwitchBreak(JSOp op) michael@0: { michael@0: JS_ASSERT(op == JSOP_GOTO); michael@0: michael@0: // Find the target switch. michael@0: CFGState *found = nullptr; michael@0: jsbytecode *target = pc + GetJumpOffset(pc); michael@0: for (size_t i = switches_.length() - 1; i < switches_.length(); i--) { michael@0: if (switches_[i].continuepc == target) { michael@0: found = &cfgStack_[switches_[i].cfgEntry]; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // There must always be a valid target loop structure. If not, there's michael@0: // probably an off-by-something error in which pc we track. michael@0: JS_ASSERT(found); michael@0: CFGState &state = *found; michael@0: michael@0: DeferredEdge **breaks = nullptr; michael@0: switch (state.state) { michael@0: case CFGState::TABLE_SWITCH: michael@0: breaks = &state.tableswitch.breaks; michael@0: break; michael@0: case CFGState::COND_SWITCH_BODY: michael@0: breaks = &state.condswitch.breaks; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected switch state."); michael@0: } michael@0: michael@0: *breaks = new(alloc()) DeferredEdge(current, *breaks); michael@0: michael@0: setCurrent(nullptr); michael@0: pc += js_CodeSpec[op].length; michael@0: return processControlEnd(); michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processSwitchEnd(DeferredEdge *breaks, jsbytecode *exitpc) michael@0: { michael@0: // No break statements, no current. michael@0: // This means that control flow is cut-off from this point michael@0: // (e.g. all cases have return statements). michael@0: if (!breaks && !current) michael@0: return ControlStatus_Ended; michael@0: michael@0: // Create successor block. michael@0: // If there are breaks, create block with breaks as predecessor michael@0: // Else create a block with current as predecessor michael@0: MBasicBlock *successor = nullptr; michael@0: if (breaks) michael@0: successor = createBreakCatchBlock(breaks, exitpc); michael@0: else michael@0: successor = newBlock(current, exitpc); michael@0: michael@0: if (!successor) michael@0: return ControlStatus_Ended; michael@0: michael@0: // If there is current, the current block flows into this one. michael@0: // So current is also a predecessor to this block michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), successor)); michael@0: if (breaks) { michael@0: if (!successor->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: } michael@0: michael@0: pc = exitpc; michael@0: if (!setCurrentAndSpecializePhis(successor)) michael@0: return ControlStatus_Error; michael@0: return ControlStatus_Joined; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::maybeLoop(JSOp op, jssrcnote *sn) michael@0: { michael@0: // This function looks at the opcode and source note and tries to michael@0: // determine the structure of the loop. For some opcodes, like michael@0: // POP/NOP which are not explicitly control flow, this source note is michael@0: // optional. For opcodes with control flow, like GOTO, an unrecognized michael@0: // or not-present source note is a compilation failure. michael@0: switch (op) { michael@0: case JSOP_POP: michael@0: // for (init; ; update?) ... michael@0: if (sn && SN_TYPE(sn) == SRC_FOR) { michael@0: current->pop(); michael@0: return forLoop(op, sn); michael@0: } michael@0: break; michael@0: michael@0: case JSOP_NOP: michael@0: if (sn) { michael@0: // do { } while (cond) michael@0: if (SN_TYPE(sn) == SRC_WHILE) michael@0: return doWhileLoop(op, sn); michael@0: // Build a mapping such that given a basic block, whose successor michael@0: // has a phi michael@0: michael@0: // for (; ; update?) michael@0: if (SN_TYPE(sn) == SRC_FOR) michael@0: return forLoop(op, sn); michael@0: } michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: } michael@0: michael@0: return ControlStatus_None; michael@0: } michael@0: michael@0: void michael@0: IonBuilder::assertValidLoopHeadOp(jsbytecode *pc) michael@0: { michael@0: #ifdef DEBUG michael@0: JS_ASSERT(JSOp(*pc) == JSOP_LOOPHEAD); michael@0: michael@0: // Make sure this is the next opcode after the loop header, michael@0: // unless the for loop is unconditional. michael@0: CFGState &state = cfgStack_.back(); michael@0: JS_ASSERT_IF((JSOp)*(state.loop.entry->pc()) == JSOP_GOTO, michael@0: GetNextPc(state.loop.entry->pc()) == pc); michael@0: michael@0: // do-while loops have a source note. michael@0: jssrcnote *sn = info().getNote(gsn, pc); michael@0: if (sn) { michael@0: jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 0); michael@0: michael@0: jsbytecode *expected_ifne; michael@0: switch (state.state) { michael@0: case CFGState::DO_WHILE_LOOP_BODY: michael@0: expected_ifne = state.loop.updateEnd; michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("JSOP_LOOPHEAD unexpected source note"); michael@0: } michael@0: michael@0: // Make sure this loop goes to the same ifne as the loop header's michael@0: // source notes or GOTO. michael@0: JS_ASSERT(ifne == expected_ifne); michael@0: } else { michael@0: JS_ASSERT(state.state != CFGState::DO_WHILE_LOOP_BODY); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::doWhileLoop(JSOp op, jssrcnote *sn) michael@0: { michael@0: // do { } while() loops have the following structure: michael@0: // NOP ; SRC_WHILE (offset to COND) michael@0: // LOOPHEAD ; SRC_WHILE (offset to IFNE) michael@0: // LOOPENTRY michael@0: // ... ; body michael@0: // ... michael@0: // COND ; start of condition michael@0: // ... michael@0: // IFNE -> ; goes to LOOPHEAD michael@0: int condition_offset = js_GetSrcNoteOffset(sn, 0); michael@0: jsbytecode *conditionpc = pc + condition_offset; michael@0: michael@0: jssrcnote *sn2 = info().getNote(gsn, pc+1); michael@0: int offset = js_GetSrcNoteOffset(sn2, 0); michael@0: jsbytecode *ifne = pc + offset + 1; michael@0: JS_ASSERT(ifne > pc); michael@0: michael@0: // Verify that the IFNE goes back to a loophead op. michael@0: jsbytecode *loopHead = GetNextPc(pc); michael@0: JS_ASSERT(JSOp(*loopHead) == JSOP_LOOPHEAD); michael@0: JS_ASSERT(loopHead == ifne + GetJumpOffset(ifne)); michael@0: michael@0: jsbytecode *loopEntry = GetNextPc(loopHead); michael@0: bool canOsr = LoopEntryCanIonOsr(loopEntry); michael@0: bool osr = info().hasOsrAt(loopEntry); michael@0: michael@0: if (osr) { michael@0: MBasicBlock *preheader = newOsrPreheader(current, loopEntry); michael@0: if (!preheader) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), preheader)); michael@0: if (!setCurrentAndSpecializePhis(preheader)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: unsigned stackPhiCount = 0; michael@0: MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount); michael@0: if (!header) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), header)); michael@0: michael@0: jsbytecode *loophead = GetNextPc(pc); michael@0: jsbytecode *bodyStart = GetNextPc(loophead); michael@0: jsbytecode *bodyEnd = conditionpc; michael@0: jsbytecode *exitpc = GetNextPc(ifne); michael@0: if (!analyzeNewLoopTypes(header, bodyStart, exitpc)) michael@0: return ControlStatus_Error; michael@0: if (!pushLoop(CFGState::DO_WHILE_LOOP_BODY, conditionpc, header, osr, michael@0: loopHead, bodyStart, bodyStart, bodyEnd, exitpc, conditionpc)) michael@0: { michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: CFGState &state = cfgStack_.back(); michael@0: state.loop.updatepc = conditionpc; michael@0: state.loop.updateEnd = ifne; michael@0: michael@0: if (!setCurrentAndSpecializePhis(header)) michael@0: return ControlStatus_Error; michael@0: if (!jsop_loophead(loophead)) michael@0: return ControlStatus_Error; michael@0: michael@0: pc = bodyStart; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::whileOrForInLoop(jssrcnote *sn) michael@0: { michael@0: // while (cond) { } loops have the following structure: michael@0: // GOTO cond ; SRC_WHILE (offset to IFNE) michael@0: // LOOPHEAD michael@0: // ... michael@0: // cond: michael@0: // LOOPENTRY michael@0: // ... michael@0: // IFNE ; goes to LOOPHEAD michael@0: // for (x in y) { } loops are similar; the cond will be a MOREITER. michael@0: JS_ASSERT(SN_TYPE(sn) == SRC_FOR_OF || SN_TYPE(sn) == SRC_FOR_IN || SN_TYPE(sn) == SRC_WHILE); michael@0: int ifneOffset = js_GetSrcNoteOffset(sn, 0); michael@0: jsbytecode *ifne = pc + ifneOffset; michael@0: JS_ASSERT(ifne > pc); michael@0: michael@0: // Verify that the IFNE goes back to a loophead op. michael@0: JS_ASSERT(JSOp(*GetNextPc(pc)) == JSOP_LOOPHEAD); michael@0: JS_ASSERT(GetNextPc(pc) == ifne + GetJumpOffset(ifne)); michael@0: michael@0: jsbytecode *loopEntry = pc + GetJumpOffset(pc); michael@0: bool canOsr = LoopEntryCanIonOsr(loopEntry); michael@0: bool osr = info().hasOsrAt(loopEntry); michael@0: michael@0: if (osr) { michael@0: MBasicBlock *preheader = newOsrPreheader(current, loopEntry); michael@0: if (!preheader) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), preheader)); michael@0: if (!setCurrentAndSpecializePhis(preheader)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: unsigned stackPhiCount; michael@0: if (SN_TYPE(sn) == SRC_FOR_OF) michael@0: stackPhiCount = 2; michael@0: else if (SN_TYPE(sn) == SRC_FOR_IN) michael@0: stackPhiCount = 1; michael@0: else michael@0: stackPhiCount = 0; michael@0: michael@0: MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount); michael@0: if (!header) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), header)); michael@0: michael@0: // Skip past the JSOP_LOOPHEAD for the body start. michael@0: jsbytecode *loopHead = GetNextPc(pc); michael@0: jsbytecode *bodyStart = GetNextPc(loopHead); michael@0: jsbytecode *bodyEnd = pc + GetJumpOffset(pc); michael@0: jsbytecode *exitpc = GetNextPc(ifne); michael@0: if (!analyzeNewLoopTypes(header, bodyStart, exitpc)) michael@0: return ControlStatus_Error; michael@0: if (!pushLoop(CFGState::WHILE_LOOP_COND, ifne, header, osr, michael@0: loopHead, bodyEnd, bodyStart, bodyEnd, exitpc)) michael@0: { michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: // Parse the condition first. michael@0: if (!setCurrentAndSpecializePhis(header)) michael@0: return ControlStatus_Error; michael@0: if (!jsop_loophead(loopHead)) michael@0: return ControlStatus_Error; michael@0: michael@0: pc = bodyEnd; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::forLoop(JSOp op, jssrcnote *sn) michael@0: { michael@0: // Skip the NOP or POP. michael@0: JS_ASSERT(op == JSOP_POP || op == JSOP_NOP); michael@0: pc = GetNextPc(pc); michael@0: michael@0: jsbytecode *condpc = pc + js_GetSrcNoteOffset(sn, 0); michael@0: jsbytecode *updatepc = pc + js_GetSrcNoteOffset(sn, 1); michael@0: jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 2); michael@0: jsbytecode *exitpc = GetNextPc(ifne); michael@0: michael@0: // for loops have the following structures: michael@0: // michael@0: // NOP or POP michael@0: // [GOTO cond | NOP] michael@0: // LOOPHEAD michael@0: // body: michael@0: // ; [body] michael@0: // [increment:] michael@0: // ; [increment] michael@0: // [cond:] michael@0: // LOOPENTRY michael@0: // GOTO body michael@0: // michael@0: // If there is a condition (condpc != ifne), this acts similar to a while michael@0: // loop otherwise, it acts like a do-while loop. michael@0: jsbytecode *bodyStart = pc; michael@0: jsbytecode *bodyEnd = updatepc; michael@0: jsbytecode *loopEntry = condpc; michael@0: if (condpc != ifne) { michael@0: JS_ASSERT(JSOp(*bodyStart) == JSOP_GOTO); michael@0: JS_ASSERT(bodyStart + GetJumpOffset(bodyStart) == condpc); michael@0: bodyStart = GetNextPc(bodyStart); michael@0: } else { michael@0: // No loop condition, such as for(j = 0; ; j++) michael@0: if (op != JSOP_NOP) { michael@0: // If the loop starts with POP, we have to skip a NOP. michael@0: JS_ASSERT(JSOp(*bodyStart) == JSOP_NOP); michael@0: bodyStart = GetNextPc(bodyStart); michael@0: } michael@0: loopEntry = GetNextPc(bodyStart); michael@0: } michael@0: jsbytecode *loopHead = bodyStart; michael@0: JS_ASSERT(JSOp(*bodyStart) == JSOP_LOOPHEAD); michael@0: JS_ASSERT(ifne + GetJumpOffset(ifne) == bodyStart); michael@0: bodyStart = GetNextPc(bodyStart); michael@0: michael@0: bool osr = info().hasOsrAt(loopEntry); michael@0: bool canOsr = LoopEntryCanIonOsr(loopEntry); michael@0: michael@0: if (osr) { michael@0: MBasicBlock *preheader = newOsrPreheader(current, loopEntry); michael@0: if (!preheader) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), preheader)); michael@0: if (!setCurrentAndSpecializePhis(preheader)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: unsigned stackPhiCount = 0; michael@0: MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount); michael@0: if (!header) michael@0: return ControlStatus_Error; michael@0: current->end(MGoto::New(alloc(), header)); michael@0: michael@0: // If there is no condition, we immediately parse the body. Otherwise, we michael@0: // parse the condition. michael@0: jsbytecode *stopAt; michael@0: CFGState::State initial; michael@0: if (condpc != ifne) { michael@0: pc = condpc; michael@0: stopAt = ifne; michael@0: initial = CFGState::FOR_LOOP_COND; michael@0: } else { michael@0: pc = bodyStart; michael@0: stopAt = bodyEnd; michael@0: initial = CFGState::FOR_LOOP_BODY; michael@0: } michael@0: michael@0: if (!analyzeNewLoopTypes(header, bodyStart, exitpc)) michael@0: return ControlStatus_Error; michael@0: if (!pushLoop(initial, stopAt, header, osr, michael@0: loopHead, pc, bodyStart, bodyEnd, exitpc, updatepc)) michael@0: { michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: CFGState &state = cfgStack_.back(); michael@0: state.loop.condpc = (condpc != ifne) ? condpc : nullptr; michael@0: state.loop.updatepc = (updatepc != condpc) ? updatepc : nullptr; michael@0: if (state.loop.updatepc) michael@0: state.loop.updateEnd = condpc; michael@0: michael@0: if (!setCurrentAndSpecializePhis(header)) michael@0: return ControlStatus_Error; michael@0: if (!jsop_loophead(loopHead)) michael@0: return ControlStatus_Error; michael@0: michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: int michael@0: IonBuilder::CmpSuccessors(const void *a, const void *b) michael@0: { michael@0: const MBasicBlock *a0 = * (MBasicBlock * const *)a; michael@0: const MBasicBlock *b0 = * (MBasicBlock * const *)b; michael@0: if (a0->pc() == b0->pc()) michael@0: return 0; michael@0: michael@0: return (a0->pc() > b0->pc()) ? 1 : -1; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::tableSwitch(JSOp op, jssrcnote *sn) michael@0: { michael@0: // TableSwitch op contains the following data michael@0: // (length between data is JUMP_OFFSET_LEN) michael@0: // michael@0: // 0: Offset of default case michael@0: // 1: Lowest number in tableswitch michael@0: // 2: Highest number in tableswitch michael@0: // 3: Offset of case low michael@0: // 4: Offset of case low+1 michael@0: // .: ... michael@0: // .: Offset of case high michael@0: michael@0: JS_ASSERT(op == JSOP_TABLESWITCH); michael@0: JS_ASSERT(SN_TYPE(sn) == SRC_TABLESWITCH); michael@0: michael@0: // Pop input. michael@0: MDefinition *ins = current->pop(); michael@0: michael@0: // Get the default and exit pc michael@0: jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0); michael@0: jsbytecode *defaultpc = pc + GET_JUMP_OFFSET(pc); michael@0: michael@0: JS_ASSERT(defaultpc > pc && defaultpc <= exitpc); michael@0: michael@0: // Get the low and high from the tableswitch michael@0: jsbytecode *pc2 = pc; michael@0: pc2 += JUMP_OFFSET_LEN; michael@0: int low = GET_JUMP_OFFSET(pc2); michael@0: pc2 += JUMP_OFFSET_LEN; michael@0: int high = GET_JUMP_OFFSET(pc2); michael@0: pc2 += JUMP_OFFSET_LEN; michael@0: michael@0: // Create MIR instruction michael@0: MTableSwitch *tableswitch = MTableSwitch::New(alloc(), ins, low, high); michael@0: michael@0: // Create default case michael@0: MBasicBlock *defaultcase = newBlock(current, defaultpc); michael@0: if (!defaultcase) michael@0: return ControlStatus_Error; michael@0: tableswitch->addDefault(defaultcase); michael@0: tableswitch->addBlock(defaultcase); michael@0: michael@0: // Create cases michael@0: jsbytecode *casepc = nullptr; michael@0: for (int i = 0; i < high-low+1; i++) { michael@0: casepc = pc + GET_JUMP_OFFSET(pc2); michael@0: michael@0: JS_ASSERT(casepc >= pc && casepc <= exitpc); michael@0: michael@0: MBasicBlock *caseblock = newBlock(current, casepc); michael@0: if (!caseblock) michael@0: return ControlStatus_Error; michael@0: michael@0: // If the casepc equals the current pc, it is not a written case, michael@0: // but a filled gap. That way we can use a tableswitch instead of michael@0: // condswitch, even if not all numbers are consecutive. michael@0: // In that case this block goes to the default case michael@0: if (casepc == pc) { michael@0: caseblock->end(MGoto::New(alloc(), defaultcase)); michael@0: if (!defaultcase->addPredecessor(alloc(), caseblock)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: tableswitch->addCase(tableswitch->addSuccessor(caseblock)); michael@0: michael@0: // If this is an actual case (not filled gap), michael@0: // add this block to the list that still needs to get processed michael@0: if (casepc != pc) michael@0: tableswitch->addBlock(caseblock); michael@0: michael@0: pc2 += JUMP_OFFSET_LEN; michael@0: } michael@0: michael@0: // Move defaultcase to the end, to maintain RPO. michael@0: graph().moveBlockToEnd(defaultcase); michael@0: michael@0: JS_ASSERT(tableswitch->numCases() == (uint32_t)(high - low + 1)); michael@0: JS_ASSERT(tableswitch->numSuccessors() > 0); michael@0: michael@0: // Sort the list of blocks that still needs to get processed by pc michael@0: qsort(tableswitch->blocks(), tableswitch->numBlocks(), michael@0: sizeof(MBasicBlock*), CmpSuccessors); michael@0: michael@0: // Create info michael@0: ControlFlowInfo switchinfo(cfgStack_.length(), exitpc); michael@0: if (!switches_.append(switchinfo)) michael@0: return ControlStatus_Error; michael@0: michael@0: // Use a state to retrieve some information michael@0: CFGState state = CFGState::TableSwitch(exitpc, tableswitch); michael@0: michael@0: // Save the MIR instruction as last instruction of this block. michael@0: current->end(tableswitch); michael@0: michael@0: // If there is only one successor the block should stop at the end of the switch michael@0: // Else it should stop at the start of the next successor michael@0: if (tableswitch->numBlocks() > 1) michael@0: state.stopAt = tableswitch->getBlock(1)->pc(); michael@0: if (!setCurrentAndSpecializePhis(tableswitch->getBlock(0))) michael@0: return ControlStatus_Error; michael@0: michael@0: if (!cfgStack_.append(state)) michael@0: return ControlStatus_Error; michael@0: michael@0: pc = current->pc(); michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::filterTypesAtTest(MTest *test) michael@0: { michael@0: JS_ASSERT(test->ifTrue() == current || test->ifFalse() == current); michael@0: michael@0: bool trueBranch = test->ifTrue() == current; michael@0: michael@0: MDefinition *subject = nullptr; michael@0: bool removeUndefined; michael@0: bool removeNull; michael@0: michael@0: test->filtersUndefinedOrNull(trueBranch, &subject, &removeUndefined, &removeNull); michael@0: michael@0: // The test filters no undefined or null. michael@0: if (!subject) michael@0: return true; michael@0: michael@0: // There is no TypeSet that can get filtered. michael@0: if (!subject->resultTypeSet() || subject->resultTypeSet()->unknown()) michael@0: return true; michael@0: michael@0: // Only do this optimization if the typeset does contains null or undefined. michael@0: if ((!(removeUndefined && subject->resultTypeSet()->hasType(types::Type::UndefinedType())) && michael@0: !(removeNull && subject->resultTypeSet()->hasType(types::Type::NullType())))) michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: // Find all values on the stack that correspond to the subject michael@0: // and replace it with a MIR with filtered TypeSet information. michael@0: // Create the replacement MIR lazily upon first occurence. michael@0: MDefinition *replace = nullptr; michael@0: for (uint32_t i = 0; i < current->stackDepth(); i++) { michael@0: if (current->getSlot(i) != subject) michael@0: continue; michael@0: michael@0: // Create replacement MIR with filtered TypesSet. michael@0: if (!replace) { michael@0: types::TemporaryTypeSet *type = michael@0: subject->resultTypeSet()->filter(alloc_->lifoAlloc(), removeUndefined, michael@0: removeNull); michael@0: if (!type) michael@0: return false; michael@0: michael@0: replace = ensureDefiniteTypeSet(subject, type); michael@0: // Make sure we don't hoist it above the MTest, we can use the michael@0: // 'dependency' of an MInstruction. This is normally used by michael@0: // Alias Analysis, but won't get overwritten, since this michael@0: // instruction doesn't have an AliasSet. michael@0: replace->setDependency(test); michael@0: } michael@0: michael@0: current->setSlot(i, replace); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_label() michael@0: { michael@0: JS_ASSERT(JSOp(*pc) == JSOP_LABEL); michael@0: michael@0: jsbytecode *endpc = pc + GET_JUMP_OFFSET(pc); michael@0: JS_ASSERT(endpc > pc); michael@0: michael@0: ControlFlowInfo label(cfgStack_.length(), endpc); michael@0: if (!labels_.append(label)) michael@0: return false; michael@0: michael@0: return cfgStack_.append(CFGState::Label(endpc)); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_condswitch() michael@0: { michael@0: // CondSwitch op looks as follows: michael@0: // condswitch [length +exit_pc; first case offset +next-case ] michael@0: // { michael@0: // { michael@0: // ... any code ... michael@0: // case (+jump) [pcdelta offset +next-case] michael@0: // }+ michael@0: // default (+jump) michael@0: // ... jump targets ... michael@0: // } michael@0: // michael@0: // The default case is always emitted even if there is no default case in michael@0: // the source. The last case statement pcdelta source note might have a 0 michael@0: // offset on the last case (not all the time). michael@0: // michael@0: // A conditional evaluate the condition of each case and compare it to the michael@0: // switch value with a strict equality. Cases conditions are iterated michael@0: // linearly until one is matching. If one case succeeds, the flow jumps into michael@0: // the corresponding body block. The body block might alias others and michael@0: // might continue in the next body block if the body is not terminated with michael@0: // a break. michael@0: // michael@0: // Algorithm: michael@0: // 1/ Loop over the case chain to reach the default target michael@0: // & Estimate the number of uniq bodies. michael@0: // 2/ Generate code for all cases (see processCondSwitchCase). michael@0: // 3/ Generate code for all bodies (see processCondSwitchBody). michael@0: michael@0: JS_ASSERT(JSOp(*pc) == JSOP_CONDSWITCH); michael@0: jssrcnote *sn = info().getNote(gsn, pc); michael@0: JS_ASSERT(SN_TYPE(sn) == SRC_CONDSWITCH); michael@0: michael@0: // Get the exit pc michael@0: jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0); michael@0: jsbytecode *firstCase = pc + js_GetSrcNoteOffset(sn, 1); michael@0: michael@0: // Iterate all cases in the conditional switch. michael@0: // - Stop at the default case. (always emitted after the last case) michael@0: // - Estimate the number of uniq bodies. This estimation might be off by 1 michael@0: // if the default body alias a case body. michael@0: jsbytecode *curCase = firstCase; michael@0: jsbytecode *lastTarget = GetJumpOffset(curCase) + curCase; michael@0: size_t nbBodies = 2; // default target and the first body. michael@0: michael@0: JS_ASSERT(pc < curCase && curCase <= exitpc); michael@0: while (JSOp(*curCase) == JSOP_CASE) { michael@0: // Fetch the next case. michael@0: jssrcnote *caseSn = info().getNote(gsn, curCase); michael@0: JS_ASSERT(caseSn && SN_TYPE(caseSn) == SRC_NEXTCASE); michael@0: ptrdiff_t off = js_GetSrcNoteOffset(caseSn, 0); michael@0: curCase = off ? curCase + off : GetNextPc(curCase); michael@0: JS_ASSERT(pc < curCase && curCase <= exitpc); michael@0: michael@0: // Count non-aliased cases. michael@0: jsbytecode *curTarget = GetJumpOffset(curCase) + curCase; michael@0: if (lastTarget < curTarget) michael@0: nbBodies++; michael@0: lastTarget = curTarget; michael@0: } michael@0: michael@0: // The current case now be the default case which jump to the body of the michael@0: // default case, which might be behind the last target. michael@0: JS_ASSERT(JSOp(*curCase) == JSOP_DEFAULT); michael@0: jsbytecode *defaultTarget = GetJumpOffset(curCase) + curCase; michael@0: JS_ASSERT(curCase < defaultTarget && defaultTarget <= exitpc); michael@0: michael@0: // Allocate the current graph state. michael@0: CFGState state = CFGState::CondSwitch(this, exitpc, defaultTarget); michael@0: if (!state.condswitch.bodies || !state.condswitch.bodies->init(alloc(), nbBodies)) michael@0: return ControlStatus_Error; michael@0: michael@0: // We loop on case conditions with processCondSwitchCase. michael@0: JS_ASSERT(JSOp(*firstCase) == JSOP_CASE); michael@0: state.stopAt = firstCase; michael@0: state.state = CFGState::COND_SWITCH_CASE; michael@0: michael@0: return cfgStack_.append(state); michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::CondSwitch(IonBuilder *builder, jsbytecode *exitpc, jsbytecode *defaultTarget) michael@0: { michael@0: CFGState state; michael@0: state.state = COND_SWITCH_CASE; michael@0: state.stopAt = nullptr; michael@0: state.condswitch.bodies = (FixedList *)builder->alloc_->allocate( michael@0: sizeof(FixedList)); michael@0: state.condswitch.currentIdx = 0; michael@0: state.condswitch.defaultTarget = defaultTarget; michael@0: state.condswitch.defaultIdx = uint32_t(-1); michael@0: state.condswitch.exitpc = exitpc; michael@0: state.condswitch.breaks = nullptr; michael@0: return state; michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::Label(jsbytecode *exitpc) michael@0: { michael@0: CFGState state; michael@0: state.state = LABEL; michael@0: state.stopAt = exitpc; michael@0: state.label.breaks = nullptr; michael@0: return state; michael@0: } michael@0: michael@0: IonBuilder::CFGState michael@0: IonBuilder::CFGState::Try(jsbytecode *exitpc, MBasicBlock *successor) michael@0: { michael@0: CFGState state; michael@0: state.state = TRY; michael@0: state.stopAt = exitpc; michael@0: state.try_.successor = successor; michael@0: return state; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processCondSwitchCase(CFGState &state) michael@0: { michael@0: JS_ASSERT(state.state == CFGState::COND_SWITCH_CASE); michael@0: JS_ASSERT(!state.condswitch.breaks); michael@0: JS_ASSERT(current); michael@0: JS_ASSERT(JSOp(*pc) == JSOP_CASE); michael@0: FixedList &bodies = *state.condswitch.bodies; michael@0: jsbytecode *defaultTarget = state.condswitch.defaultTarget; michael@0: uint32_t ¤tIdx = state.condswitch.currentIdx; michael@0: jsbytecode *lastTarget = currentIdx ? bodies[currentIdx - 1]->pc() : nullptr; michael@0: michael@0: // Fetch the following case in which we will continue. michael@0: jssrcnote *sn = info().getNote(gsn, pc); michael@0: ptrdiff_t off = js_GetSrcNoteOffset(sn, 0); michael@0: jsbytecode *casePc = off ? pc + off : GetNextPc(pc); michael@0: bool caseIsDefault = JSOp(*casePc) == JSOP_DEFAULT; michael@0: JS_ASSERT(JSOp(*casePc) == JSOP_CASE || caseIsDefault); michael@0: michael@0: // Allocate the block of the matching case. michael@0: bool bodyIsNew = false; michael@0: MBasicBlock *bodyBlock = nullptr; michael@0: jsbytecode *bodyTarget = pc + GetJumpOffset(pc); michael@0: if (lastTarget < bodyTarget) { michael@0: // If the default body is in the middle or aliasing the current target. michael@0: if (lastTarget < defaultTarget && defaultTarget <= bodyTarget) { michael@0: JS_ASSERT(state.condswitch.defaultIdx == uint32_t(-1)); michael@0: state.condswitch.defaultIdx = currentIdx; michael@0: bodies[currentIdx] = nullptr; michael@0: // If the default body does not alias any and it would be allocated michael@0: // later and stored in the defaultIdx location. michael@0: if (defaultTarget < bodyTarget) michael@0: currentIdx++; michael@0: } michael@0: michael@0: bodyIsNew = true; michael@0: // Pop switch and case operands. michael@0: bodyBlock = newBlockPopN(current, bodyTarget, 2); michael@0: bodies[currentIdx++] = bodyBlock; michael@0: } else { michael@0: // This body alias the previous one. michael@0: JS_ASSERT(lastTarget == bodyTarget); michael@0: JS_ASSERT(currentIdx > 0); michael@0: bodyBlock = bodies[currentIdx - 1]; michael@0: } michael@0: michael@0: if (!bodyBlock) michael@0: return ControlStatus_Error; michael@0: michael@0: lastTarget = bodyTarget; michael@0: michael@0: // Allocate the block of the non-matching case. This can either be a normal michael@0: // case or the default case. michael@0: bool caseIsNew = false; michael@0: MBasicBlock *caseBlock = nullptr; michael@0: if (!caseIsDefault) { michael@0: caseIsNew = true; michael@0: // Pop the case operand. michael@0: caseBlock = newBlockPopN(current, GetNextPc(pc), 1); michael@0: } else { michael@0: // The non-matching case is the default case, which jump directly to its michael@0: // body. Skip the creation of a default case block and directly create michael@0: // the default body if it does not alias any previous body. michael@0: michael@0: if (state.condswitch.defaultIdx == uint32_t(-1)) { michael@0: // The default target is the last target. michael@0: JS_ASSERT(lastTarget < defaultTarget); michael@0: state.condswitch.defaultIdx = currentIdx++; michael@0: caseIsNew = true; michael@0: } else if (bodies[state.condswitch.defaultIdx] == nullptr) { michael@0: // The default target is in the middle and it does not alias any michael@0: // case target. michael@0: JS_ASSERT(defaultTarget < lastTarget); michael@0: caseIsNew = true; michael@0: } else { michael@0: // The default target is in the middle and it alias a case target. michael@0: JS_ASSERT(defaultTarget <= lastTarget); michael@0: caseBlock = bodies[state.condswitch.defaultIdx]; michael@0: } michael@0: michael@0: // Allocate and register the default body. michael@0: if (caseIsNew) { michael@0: // Pop the case & switch operands. michael@0: caseBlock = newBlockPopN(current, defaultTarget, 2); michael@0: bodies[state.condswitch.defaultIdx] = caseBlock; michael@0: } michael@0: } michael@0: michael@0: if (!caseBlock) michael@0: return ControlStatus_Error; michael@0: michael@0: // Terminate the last case condition block by emitting the code michael@0: // corresponding to JSOP_CASE bytecode. michael@0: if (bodyBlock != caseBlock) { michael@0: MDefinition *caseOperand = current->pop(); michael@0: MDefinition *switchOperand = current->peek(-1); michael@0: MCompare *cmpResult = MCompare::New(alloc(), switchOperand, caseOperand, JSOP_STRICTEQ); michael@0: cmpResult->infer(inspector, pc); michael@0: JS_ASSERT(!cmpResult->isEffectful()); michael@0: current->add(cmpResult); michael@0: current->end(MTest::New(alloc(), cmpResult, bodyBlock, caseBlock)); michael@0: michael@0: // Add last case as predecessor of the body if the body is aliasing michael@0: // the previous case body. michael@0: if (!bodyIsNew && !bodyBlock->addPredecessorPopN(alloc(), current, 1)) michael@0: return ControlStatus_Error; michael@0: michael@0: // Add last case as predecessor of the non-matching case if the michael@0: // non-matching case is an aliased default case. We need to pop the michael@0: // switch operand as we skip the default case block and use the default michael@0: // body block directly. michael@0: JS_ASSERT_IF(!caseIsNew, caseIsDefault); michael@0: if (!caseIsNew && !caseBlock->addPredecessorPopN(alloc(), current, 1)) michael@0: return ControlStatus_Error; michael@0: } else { michael@0: // The default case alias the last case body. michael@0: JS_ASSERT(caseIsDefault); michael@0: current->pop(); // Case operand michael@0: current->pop(); // Switch operand michael@0: current->end(MGoto::New(alloc(), bodyBlock)); michael@0: if (!bodyIsNew && !bodyBlock->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: if (caseIsDefault) { michael@0: // The last case condition is finished. Loop in processCondSwitchBody, michael@0: // with potential stops in processSwitchBreak. Check that the bodies michael@0: // fixed list is over-estimate by at most 1, and shrink the size such as michael@0: // length can be used as an upper bound while iterating bodies. michael@0: JS_ASSERT(currentIdx == bodies.length() || currentIdx + 1 == bodies.length()); michael@0: bodies.shrink(bodies.length() - currentIdx); michael@0: michael@0: // Handle break statements in processSwitchBreak while processing michael@0: // bodies. michael@0: ControlFlowInfo breakInfo(cfgStack_.length() - 1, state.condswitch.exitpc); michael@0: if (!switches_.append(breakInfo)) michael@0: return ControlStatus_Error; michael@0: michael@0: // Jump into the first body. michael@0: currentIdx = 0; michael@0: setCurrent(nullptr); michael@0: state.state = CFGState::COND_SWITCH_BODY; michael@0: return processCondSwitchBody(state); michael@0: } michael@0: michael@0: // Continue until the case condition. michael@0: if (!setCurrentAndSpecializePhis(caseBlock)) michael@0: return ControlStatus_Error; michael@0: pc = current->pc(); michael@0: state.stopAt = casePc; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processCondSwitchBody(CFGState &state) michael@0: { michael@0: JS_ASSERT(state.state == CFGState::COND_SWITCH_BODY); michael@0: JS_ASSERT(pc <= state.condswitch.exitpc); michael@0: FixedList &bodies = *state.condswitch.bodies; michael@0: uint32_t ¤tIdx = state.condswitch.currentIdx; michael@0: michael@0: JS_ASSERT(currentIdx <= bodies.length()); michael@0: if (currentIdx == bodies.length()) { michael@0: JS_ASSERT_IF(current, pc == state.condswitch.exitpc); michael@0: return processSwitchEnd(state.condswitch.breaks, state.condswitch.exitpc); michael@0: } michael@0: michael@0: // Get the next body michael@0: MBasicBlock *nextBody = bodies[currentIdx++]; michael@0: JS_ASSERT_IF(current, pc == nextBody->pc()); michael@0: michael@0: // Fix the reverse post-order iteration. michael@0: graph().moveBlockToEnd(nextBody); michael@0: michael@0: // The last body continue into the new one. michael@0: if (current) { michael@0: current->end(MGoto::New(alloc(), nextBody)); michael@0: if (!nextBody->addPredecessor(alloc(), current)) michael@0: return ControlStatus_Error; michael@0: } michael@0: michael@0: // Continue in the next body. michael@0: if (!setCurrentAndSpecializePhis(nextBody)) michael@0: return ControlStatus_Error; michael@0: pc = current->pc(); michael@0: michael@0: if (currentIdx < bodies.length()) michael@0: state.stopAt = bodies[currentIdx]->pc(); michael@0: else michael@0: state.stopAt = state.condswitch.exitpc; michael@0: return ControlStatus_Jumped; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_andor(JSOp op) michael@0: { michael@0: JS_ASSERT(op == JSOP_AND || op == JSOP_OR); michael@0: michael@0: jsbytecode *rhsStart = pc + js_CodeSpec[op].length; michael@0: jsbytecode *joinStart = pc + GetJumpOffset(pc); michael@0: JS_ASSERT(joinStart > pc); michael@0: michael@0: // We have to leave the LHS on the stack. michael@0: MDefinition *lhs = current->peek(-1); michael@0: michael@0: MBasicBlock *evalRhs = newBlock(current, rhsStart); michael@0: MBasicBlock *join = newBlock(current, joinStart); michael@0: if (!evalRhs || !join) michael@0: return false; michael@0: michael@0: MTest *test = (op == JSOP_AND) michael@0: ? MTest::New(alloc(), lhs, evalRhs, join) michael@0: : MTest::New(alloc(), lhs, join, evalRhs); michael@0: test->infer(); michael@0: current->end(test); michael@0: michael@0: if (!cfgStack_.append(CFGState::AndOr(joinStart, join))) michael@0: return false; michael@0: michael@0: return setCurrentAndSpecializePhis(evalRhs); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_dup2() michael@0: { michael@0: uint32_t lhsSlot = current->stackDepth() - 2; michael@0: uint32_t rhsSlot = current->stackDepth() - 1; michael@0: current->pushSlot(lhsSlot); michael@0: current->pushSlot(rhsSlot); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_loophead(jsbytecode *pc) michael@0: { michael@0: assertValidLoopHeadOp(pc); michael@0: michael@0: current->add(MInterruptCheck::New(alloc())); michael@0: insertRecompileCheck(); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_ifeq(JSOp op) michael@0: { michael@0: // IFEQ always has a forward offset. michael@0: jsbytecode *trueStart = pc + js_CodeSpec[op].length; michael@0: jsbytecode *falseStart = pc + GetJumpOffset(pc); michael@0: JS_ASSERT(falseStart > pc); michael@0: michael@0: // We only handle cases that emit source notes. michael@0: jssrcnote *sn = info().getNote(gsn, pc); michael@0: if (!sn) michael@0: return abort("expected sourcenote"); michael@0: michael@0: MDefinition *ins = current->pop(); michael@0: michael@0: // Create true and false branches. michael@0: MBasicBlock *ifTrue = newBlock(current, trueStart); michael@0: MBasicBlock *ifFalse = newBlock(current, falseStart); michael@0: if (!ifTrue || !ifFalse) michael@0: return false; michael@0: michael@0: MTest *test = MTest::New(alloc(), ins, ifTrue, ifFalse); michael@0: current->end(test); michael@0: michael@0: // The bytecode for if/ternary gets emitted either like this: michael@0: // michael@0: // IFEQ X ; src note (IF_ELSE, COND) points to the GOTO michael@0: // ... michael@0: // GOTO Z michael@0: // X: ... ; else/else if michael@0: // ... michael@0: // Z: ; join michael@0: // michael@0: // Or like this: michael@0: // michael@0: // IFEQ X ; src note (IF) has no offset michael@0: // ... michael@0: // Z: ... ; join michael@0: // michael@0: // We want to parse the bytecode as if we were parsing the AST, so for the michael@0: // IF_ELSE/COND cases, we use the source note and follow the GOTO. For the michael@0: // IF case, the IFEQ offset is the join point. michael@0: switch (SN_TYPE(sn)) { michael@0: case SRC_IF: michael@0: if (!cfgStack_.append(CFGState::If(falseStart, test))) michael@0: return false; michael@0: break; michael@0: michael@0: case SRC_IF_ELSE: michael@0: case SRC_COND: michael@0: { michael@0: // Infer the join point from the JSOP_GOTO[X] sitting here, then michael@0: // assert as we much we can that this is the right GOTO. michael@0: jsbytecode *trueEnd = pc + js_GetSrcNoteOffset(sn, 0); michael@0: JS_ASSERT(trueEnd > pc); michael@0: JS_ASSERT(trueEnd < falseStart); michael@0: JS_ASSERT(JSOp(*trueEnd) == JSOP_GOTO); michael@0: JS_ASSERT(!info().getNote(gsn, trueEnd)); michael@0: michael@0: jsbytecode *falseEnd = trueEnd + GetJumpOffset(trueEnd); michael@0: JS_ASSERT(falseEnd > trueEnd); michael@0: JS_ASSERT(falseEnd >= falseStart); michael@0: michael@0: if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, test))) michael@0: return false; michael@0: break; michael@0: } michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected source note type"); michael@0: } michael@0: michael@0: // Switch to parsing the true branch. Note that no PC update is needed, michael@0: // it's the next instruction. michael@0: if (!setCurrentAndSpecializePhis(ifTrue)) michael@0: return false; michael@0: michael@0: // Filter the types in the true branch. michael@0: filterTypesAtTest(test); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_try() michael@0: { michael@0: JS_ASSERT(JSOp(*pc) == JSOP_TRY); michael@0: michael@0: if (!js_JitOptions.compileTryCatch) michael@0: return abort("Try-catch support disabled"); michael@0: michael@0: // Try-finally is not yet supported. michael@0: if (analysis().hasTryFinally()) michael@0: return abort("Has try-finally"); michael@0: michael@0: // Try-catch within inline frames is not yet supported. michael@0: JS_ASSERT(!isInlineBuilder()); michael@0: michael@0: // Try-catch during the arguments usage analysis is not yet supported. Code michael@0: // accessing the arguments within the 'catch' block is not accounted for. michael@0: if (info().executionMode() == ArgumentsUsageAnalysis) michael@0: return abort("Try-catch during arguments usage analysis"); michael@0: michael@0: graph().setHasTryBlock(); michael@0: michael@0: jssrcnote *sn = info().getNote(gsn, pc); michael@0: JS_ASSERT(SN_TYPE(sn) == SRC_TRY); michael@0: michael@0: // Get the pc of the last instruction in the try block. It's a JSOP_GOTO to michael@0: // jump over the catch block. michael@0: jsbytecode *endpc = pc + js_GetSrcNoteOffset(sn, 0); michael@0: JS_ASSERT(JSOp(*endpc) == JSOP_GOTO); michael@0: JS_ASSERT(GetJumpOffset(endpc) > 0); michael@0: michael@0: jsbytecode *afterTry = endpc + GetJumpOffset(endpc); michael@0: michael@0: // If controlflow in the try body is terminated (by a return or throw michael@0: // statement), the code after the try-statement may still be reachable michael@0: // via the catch block (which we don't compile) and OSR can enter it. michael@0: // For example: michael@0: // michael@0: // try { michael@0: // throw 3; michael@0: // } catch(e) { } michael@0: // michael@0: // for (var i=0; i<1000; i++) {} michael@0: // michael@0: // To handle this, we create two blocks: one for the try block and one michael@0: // for the code following the try-catch statement. Both blocks are michael@0: // connected to the graph with an MTest instruction that always jumps to michael@0: // the try block. This ensures the successor block always has a predecessor michael@0: // and later passes will optimize this MTest to a no-op. michael@0: // michael@0: // If the code after the try block is unreachable (control flow in both the michael@0: // try and catch blocks is terminated), only create the try block, to avoid michael@0: // parsing unreachable code. michael@0: michael@0: MBasicBlock *tryBlock = newBlock(current, GetNextPc(pc)); michael@0: if (!tryBlock) michael@0: return false; michael@0: michael@0: MBasicBlock *successor; michael@0: if (analysis().maybeInfo(afterTry)) { michael@0: successor = newBlock(current, afterTry); michael@0: if (!successor) michael@0: return false; michael@0: michael@0: // Add MTest(true, tryBlock, successorBlock). michael@0: MConstant *true_ = MConstant::New(alloc(), BooleanValue(true)); michael@0: current->add(true_); michael@0: current->end(MTest::New(alloc(), true_, tryBlock, successor)); michael@0: } else { michael@0: successor = nullptr; michael@0: current->end(MGoto::New(alloc(), tryBlock)); michael@0: } michael@0: michael@0: if (!cfgStack_.append(CFGState::Try(endpc, successor))) michael@0: return false; michael@0: michael@0: // The baseline compiler should not attempt to enter the catch block michael@0: // via OSR. michael@0: JS_ASSERT(info().osrPc() < endpc || info().osrPc() >= afterTry); michael@0: michael@0: // Start parsing the try block. michael@0: return setCurrentAndSpecializePhis(tryBlock); michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processReturn(JSOp op) michael@0: { michael@0: MDefinition *def; michael@0: switch (op) { michael@0: case JSOP_RETURN: michael@0: // Return the last instruction. michael@0: def = current->pop(); michael@0: break; michael@0: michael@0: case JSOP_RETRVAL: michael@0: // Return undefined eagerly if script doesn't use return value. michael@0: if (script()->noScriptRval()) { michael@0: MInstruction *ins = MConstant::New(alloc(), UndefinedValue()); michael@0: current->add(ins); michael@0: def = ins; michael@0: break; michael@0: } michael@0: michael@0: def = current->getSlot(info().returnValueSlot()); michael@0: break; michael@0: michael@0: default: michael@0: def = nullptr; michael@0: MOZ_ASSUME_UNREACHABLE("unknown return op"); michael@0: } michael@0: michael@0: if (instrumentedProfiling()) { michael@0: current->add(MProfilerStackOp::New(alloc(), script(), MProfilerStackOp::Exit, michael@0: inliningDepth_)); michael@0: } michael@0: MReturn *ret = MReturn::New(alloc(), def); michael@0: current->end(ret); michael@0: michael@0: if (!graph().addReturn(current)) michael@0: return ControlStatus_Error; michael@0: michael@0: // Make sure no one tries to use this block now. michael@0: setCurrent(nullptr); michael@0: return processControlEnd(); michael@0: } michael@0: michael@0: IonBuilder::ControlStatus michael@0: IonBuilder::processThrow() michael@0: { michael@0: MDefinition *def = current->pop(); michael@0: michael@0: // MThrow is not marked as effectful. This means when it throws and we michael@0: // are inside a try block, we could use an earlier resume point and this michael@0: // resume point may not be up-to-date, for example: michael@0: // michael@0: // (function() { michael@0: // try { michael@0: // var x = 1; michael@0: // foo(); // resume point michael@0: // x = 2; michael@0: // throw foo; michael@0: // } catch(e) { michael@0: // print(x); michael@0: // } michael@0: // ])(); michael@0: // michael@0: // If we use the resume point after the call, this will print 1 instead michael@0: // of 2. To fix this, we create a resume point right before the MThrow. michael@0: // michael@0: // Note that this is not a problem for instructions other than MThrow michael@0: // because they are either marked as effectful (have their own resume michael@0: // point) or cannot throw a catchable exception. michael@0: // michael@0: // We always install this resume point (instead of only when the function michael@0: // has a try block) in order to handle the Debugger onExceptionUnwind michael@0: // hook. When we need to handle the hook, we bail out to baseline right michael@0: // after the throw and propagate the exception when debug mode is on. This michael@0: // is opposed to the normal behavior of resuming directly in the michael@0: // associated catch block. michael@0: MNop *nop = MNop::New(alloc()); michael@0: current->add(nop); michael@0: michael@0: if (!resumeAfter(nop)) michael@0: return ControlStatus_Error; michael@0: michael@0: MThrow *ins = MThrow::New(alloc(), def); michael@0: current->end(ins); michael@0: michael@0: // Make sure no one tries to use this block now. michael@0: setCurrent(nullptr); michael@0: return processControlEnd(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::pushConstant(const Value &v) michael@0: { michael@0: current->push(constant(v)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_bitnot() michael@0: { michael@0: MDefinition *input = current->pop(); michael@0: MBitNot *ins = MBitNot::New(alloc(), input); michael@0: michael@0: current->add(ins); michael@0: ins->infer(); michael@0: michael@0: current->push(ins); michael@0: if (ins->isEffectful() && !resumeAfter(ins)) michael@0: return false; michael@0: return true; michael@0: } michael@0: bool michael@0: IonBuilder::jsop_bitop(JSOp op) michael@0: { michael@0: // Pop inputs. michael@0: MDefinition *right = current->pop(); michael@0: MDefinition *left = current->pop(); michael@0: michael@0: MBinaryBitwiseInstruction *ins; michael@0: switch (op) { michael@0: case JSOP_BITAND: michael@0: ins = MBitAnd::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_BITOR: michael@0: ins = MBitOr::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_BITXOR: michael@0: ins = MBitXor::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_LSH: michael@0: ins = MLsh::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_RSH: michael@0: ins = MRsh::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_URSH: michael@0: ins = MUrsh::New(alloc(), left, right); michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected bitop"); michael@0: } michael@0: michael@0: current->add(ins); michael@0: ins->infer(inspector, pc); michael@0: michael@0: current->push(ins); michael@0: if (ins->isEffectful() && !resumeAfter(ins)) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_binary(JSOp op, MDefinition *left, MDefinition *right) michael@0: { michael@0: // Do a string concatenation if adding two inputs that are int or string michael@0: // and at least one is a string. michael@0: if (op == JSOP_ADD && michael@0: ((left->type() == MIRType_String && michael@0: (right->type() == MIRType_String || michael@0: right->type() == MIRType_Int32 || michael@0: right->type() == MIRType_Double)) || michael@0: (left->type() == MIRType_Int32 && michael@0: right->type() == MIRType_String) || michael@0: (left->type() == MIRType_Double && michael@0: right->type() == MIRType_String))) michael@0: { michael@0: MConcat *ins = MConcat::New(alloc(), left, right); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: return maybeInsertResume(); michael@0: } michael@0: michael@0: MBinaryArithInstruction *ins; michael@0: switch (op) { michael@0: case JSOP_ADD: michael@0: ins = MAdd::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_SUB: michael@0: ins = MSub::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_MUL: michael@0: ins = MMul::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_DIV: michael@0: ins = MDiv::New(alloc(), left, right); michael@0: break; michael@0: michael@0: case JSOP_MOD: michael@0: ins = MMod::New(alloc(), left, right); michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected binary opcode"); michael@0: } michael@0: michael@0: current->add(ins); michael@0: ins->infer(alloc(), inspector, pc); michael@0: current->push(ins); michael@0: michael@0: if (ins->isEffectful()) michael@0: return resumeAfter(ins); michael@0: return maybeInsertResume(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_binary(JSOp op) michael@0: { michael@0: MDefinition *right = current->pop(); michael@0: MDefinition *left = current->pop(); michael@0: michael@0: return jsop_binary(op, left, right); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_pos() michael@0: { michael@0: if (IsNumberType(current->peek(-1)->type())) { michael@0: // Already int32 or double. Set the operand as implicitly used so it michael@0: // doesn't get optimized out if it has no other uses, as we could bail michael@0: // out. michael@0: current->peek(-1)->setImplicitlyUsedUnchecked(); michael@0: return true; michael@0: } michael@0: michael@0: // Compile +x as x * 1. michael@0: MDefinition *value = current->pop(); michael@0: MConstant *one = MConstant::New(alloc(), Int32Value(1)); michael@0: current->add(one); michael@0: michael@0: return jsop_binary(JSOP_MUL, value, one); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_neg() michael@0: { michael@0: // Since JSOP_NEG does not use a slot, we cannot push the MConstant. michael@0: // The MConstant is therefore passed to JSOP_MUL without slot traffic. michael@0: MConstant *negator = MConstant::New(alloc(), Int32Value(-1)); michael@0: current->add(negator); michael@0: michael@0: MDefinition *right = current->pop(); michael@0: michael@0: if (!jsop_binary(JSOP_MUL, negator, right)) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: class AutoAccumulateReturns michael@0: { michael@0: MIRGraph &graph_; michael@0: MIRGraphReturns *prev_; michael@0: michael@0: public: michael@0: AutoAccumulateReturns(MIRGraph &graph, MIRGraphReturns &returns) michael@0: : graph_(graph) michael@0: { michael@0: prev_ = graph_.returnAccumulator(); michael@0: graph_.setReturnAccumulator(&returns); michael@0: } michael@0: ~AutoAccumulateReturns() { michael@0: graph_.setReturnAccumulator(prev_); michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: IonBuilder::inlineScriptedCall(CallInfo &callInfo, JSFunction *target) michael@0: { michael@0: JS_ASSERT(target->hasScript()); michael@0: JS_ASSERT(IsIonInlinablePC(pc)); michael@0: michael@0: callInfo.setImplicitlyUsedUnchecked(); michael@0: michael@0: // Ensure sufficient space in the slots: needed for inlining from FUNAPPLY. michael@0: uint32_t depth = current->stackDepth() + callInfo.numFormals(); michael@0: if (depth > current->nslots()) { michael@0: if (!current->increaseSlots(depth - current->nslots())) michael@0: return false; michael@0: } michael@0: michael@0: // Create new |this| on the caller-side for inlined constructors. michael@0: if (callInfo.constructing()) { michael@0: MDefinition *thisDefn = createThis(target, callInfo.fun()); michael@0: if (!thisDefn) michael@0: return false; michael@0: callInfo.setThis(thisDefn); michael@0: } michael@0: michael@0: // Capture formals in the outer resume point. michael@0: callInfo.pushFormals(current); michael@0: michael@0: MResumePoint *outerResumePoint = michael@0: MResumePoint::New(alloc(), current, pc, callerResumePoint_, MResumePoint::Outer); michael@0: if (!outerResumePoint) michael@0: return false; michael@0: michael@0: // Pop formals again, except leave |fun| on stack for duration of call. michael@0: callInfo.popFormals(current); michael@0: current->push(callInfo.fun()); michael@0: michael@0: JSScript *calleeScript = target->nonLazyScript(); michael@0: BaselineInspector inspector(calleeScript); michael@0: michael@0: // Improve type information of |this| when not set. michael@0: if (callInfo.constructing() && michael@0: !callInfo.thisArg()->resultTypeSet() && michael@0: calleeScript->types) michael@0: { michael@0: types::StackTypeSet *types = types::TypeScript::ThisTypes(calleeScript); michael@0: if (!types->unknown()) { michael@0: types::TemporaryTypeSet *clonedTypes = types->clone(alloc_->lifoAlloc()); michael@0: if (!clonedTypes) michael@0: return oom(); michael@0: MTypeBarrier *barrier = MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes); michael@0: current->add(barrier); michael@0: callInfo.setThis(barrier); michael@0: } michael@0: } michael@0: michael@0: // Start inlining. michael@0: LifoAlloc *lifoAlloc = alloc_->lifoAlloc(); michael@0: CompileInfo *info = lifoAlloc->new_(calleeScript, target, michael@0: (jsbytecode *)nullptr, callInfo.constructing(), michael@0: this->info().executionMode(), michael@0: /* needsArgsObj = */ false); michael@0: if (!info) michael@0: return false; michael@0: michael@0: MIRGraphReturns returns(alloc()); michael@0: AutoAccumulateReturns aar(graph(), returns); michael@0: michael@0: // Build the graph. michael@0: IonBuilder inlineBuilder(analysisContext, compartment, options, &alloc(), &graph(), constraints(), michael@0: &inspector, info, &optimizationInfo(), nullptr, inliningDepth_ + 1, michael@0: loopDepth_); michael@0: if (!inlineBuilder.buildInline(this, outerResumePoint, callInfo)) { michael@0: if (analysisContext && analysisContext->isExceptionPending()) { michael@0: IonSpew(IonSpew_Abort, "Inline builder raised exception."); michael@0: abortReason_ = AbortReason_Error; michael@0: return false; michael@0: } michael@0: michael@0: // Inlining the callee failed. Mark the callee as uninlineable only if michael@0: // the inlining was aborted for a non-exception reason. michael@0: if (inlineBuilder.abortReason_ == AbortReason_Disable) { michael@0: calleeScript->setUninlineable(); michael@0: abortReason_ = AbortReason_Inlining; michael@0: } else if (inlineBuilder.abortReason_ == AbortReason_Inlining) { michael@0: abortReason_ = AbortReason_Inlining; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: // Create return block. michael@0: jsbytecode *postCall = GetNextPc(pc); michael@0: MBasicBlock *returnBlock = newBlock(nullptr, postCall); michael@0: if (!returnBlock) michael@0: return false; michael@0: returnBlock->setCallerResumePoint(callerResumePoint_); michael@0: michael@0: // When profiling add InlineExit instruction to indicate end of inlined function. michael@0: if (instrumentedProfiling()) michael@0: returnBlock->add(MProfilerStackOp::New(alloc(), nullptr, MProfilerStackOp::InlineExit)); michael@0: michael@0: // Inherit the slots from current and pop |fun|. michael@0: returnBlock->inheritSlots(current); michael@0: returnBlock->pop(); michael@0: michael@0: // Accumulate return values. michael@0: if (returns.empty()) { michael@0: // Inlining of functions that have no exit is not supported. michael@0: calleeScript->setUninlineable(); michael@0: abortReason_ = AbortReason_Inlining; michael@0: return false; michael@0: } michael@0: MDefinition *retvalDefn = patchInlinedReturns(callInfo, returns, returnBlock); michael@0: if (!retvalDefn) michael@0: return false; michael@0: returnBlock->push(retvalDefn); michael@0: michael@0: // Initialize entry slots now that the stack has been fixed up. michael@0: if (!returnBlock->initEntrySlots(alloc())) michael@0: return false; michael@0: michael@0: return setCurrentAndSpecializePhis(returnBlock); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::patchInlinedReturn(CallInfo &callInfo, MBasicBlock *exit, MBasicBlock *bottom) michael@0: { michael@0: // Replaces the MReturn in the exit block with an MGoto. michael@0: MDefinition *rdef = exit->lastIns()->toReturn()->input(); michael@0: exit->discardLastIns(); michael@0: michael@0: // Constructors must be patched by the caller to always return an object. michael@0: if (callInfo.constructing()) { michael@0: if (rdef->type() == MIRType_Value) { michael@0: // Unknown return: dynamically detect objects. michael@0: MReturnFromCtor *filter = MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg()); michael@0: exit->add(filter); michael@0: rdef = filter; michael@0: } else if (rdef->type() != MIRType_Object) { michael@0: // Known non-object return: force |this|. michael@0: rdef = callInfo.thisArg(); michael@0: } michael@0: } else if (callInfo.isSetter()) { michael@0: // Setters return their argument, not whatever value is returned. michael@0: rdef = callInfo.getArg(0); michael@0: } michael@0: michael@0: MGoto *replacement = MGoto::New(alloc(), bottom); michael@0: exit->end(replacement); michael@0: if (!bottom->addPredecessorWithoutPhis(exit)) michael@0: return nullptr; michael@0: michael@0: return rdef; michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::patchInlinedReturns(CallInfo &callInfo, MIRGraphReturns &returns, MBasicBlock *bottom) michael@0: { michael@0: // Replaces MReturns with MGotos, returning the MDefinition michael@0: // representing the return value, or nullptr. michael@0: JS_ASSERT(returns.length() > 0); michael@0: michael@0: if (returns.length() == 1) michael@0: return patchInlinedReturn(callInfo, returns[0], bottom); michael@0: michael@0: // Accumulate multiple returns with a phi. michael@0: MPhi *phi = MPhi::New(alloc(), bottom->stackDepth()); michael@0: if (!phi->reserveLength(returns.length())) michael@0: return nullptr; michael@0: michael@0: for (size_t i = 0; i < returns.length(); i++) { michael@0: MDefinition *rdef = patchInlinedReturn(callInfo, returns[i], bottom); michael@0: if (!rdef) michael@0: return nullptr; michael@0: phi->addInput(rdef); michael@0: } michael@0: michael@0: bottom->addPhi(phi); michael@0: return phi; michael@0: } michael@0: michael@0: IonBuilder::InliningDecision michael@0: IonBuilder::makeInliningDecision(JSFunction *target, CallInfo &callInfo) michael@0: { michael@0: // When there is no target, inlining is impossible. michael@0: if (target == nullptr) michael@0: return InliningDecision_DontInline; michael@0: michael@0: // Never inline during the arguments usage analysis. michael@0: if (info().executionMode() == ArgumentsUsageAnalysis) michael@0: return InliningDecision_DontInline; michael@0: michael@0: // Native functions provide their own detection in inlineNativeCall(). michael@0: if (target->isNative()) michael@0: return InliningDecision_Inline; michael@0: michael@0: // Determine whether inlining is possible at callee site michael@0: InliningDecision decision = canInlineTarget(target, callInfo); michael@0: if (decision != InliningDecision_Inline) michael@0: return decision; michael@0: michael@0: // Heuristics! michael@0: JSScript *targetScript = target->nonLazyScript(); michael@0: michael@0: // Skip heuristics if we have an explicit hint to inline. michael@0: if (!targetScript->shouldInline()) { michael@0: // Cap the inlining depth. michael@0: if (js_JitOptions.isSmallFunction(targetScript)) { michael@0: if (inliningDepth_ >= optimizationInfo().smallFunctionMaxInlineDepth()) michael@0: return DontInline(targetScript, "Vetoed: exceeding allowed inline depth"); michael@0: } else { michael@0: if (inliningDepth_ >= optimizationInfo().maxInlineDepth()) michael@0: return DontInline(targetScript, "Vetoed: exceeding allowed inline depth"); michael@0: michael@0: if (targetScript->hasLoops()) michael@0: return DontInline(targetScript, "Vetoed: big function that contains a loop"); michael@0: michael@0: // Caller must not be excessively large. michael@0: if (script()->length() >= optimizationInfo().inliningMaxCallerBytecodeLength()) michael@0: return DontInline(targetScript, "Vetoed: caller excessively large"); michael@0: } michael@0: michael@0: // Callee must not be excessively large. michael@0: // This heuristic also applies to the callsite as a whole. michael@0: if (targetScript->length() > optimizationInfo().inlineMaxTotalBytecodeLength()) michael@0: return DontInline(targetScript, "Vetoed: callee excessively large"); michael@0: michael@0: // Callee must have been called a few times to have somewhat stable michael@0: // type information, except for definite properties analysis, michael@0: // as the caller has not run yet. michael@0: if (targetScript->getUseCount() < optimizationInfo().usesBeforeInlining() && michael@0: info().executionMode() != DefinitePropertiesAnalysis) michael@0: { michael@0: return DontInline(targetScript, "Vetoed: callee is insufficiently hot."); michael@0: } michael@0: } michael@0: michael@0: // TI calls ObjectStateChange to trigger invalidation of the caller. michael@0: types::TypeObjectKey *targetType = types::TypeObjectKey::get(target); michael@0: targetType->watchStateChangeForInlinedCall(constraints()); michael@0: michael@0: // We mustn't relazify functions that have been inlined, because there's michael@0: // no way to tell if it safe to do so. michael@0: script()->setHasBeenInlined(); michael@0: michael@0: return InliningDecision_Inline; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::selectInliningTargets(ObjectVector &targets, CallInfo &callInfo, BoolVector &choiceSet, michael@0: uint32_t *numInlineable) michael@0: { michael@0: *numInlineable = 0; michael@0: uint32_t totalSize = 0; michael@0: michael@0: // For each target, ask whether it may be inlined. michael@0: if (!choiceSet.reserve(targets.length())) michael@0: return false; michael@0: michael@0: for (size_t i = 0; i < targets.length(); i++) { michael@0: JSFunction *target = &targets[i]->as(); michael@0: bool inlineable; michael@0: InliningDecision decision = makeInliningDecision(target, callInfo); michael@0: switch (decision) { michael@0: case InliningDecision_Error: michael@0: return false; michael@0: case InliningDecision_DontInline: michael@0: inlineable = false; michael@0: break; michael@0: case InliningDecision_Inline: michael@0: inlineable = true; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unhandled InliningDecision value!"); michael@0: } michael@0: michael@0: // Enforce a maximum inlined bytecode limit at the callsite. michael@0: if (inlineable && target->isInterpreted()) { michael@0: totalSize += target->nonLazyScript()->length(); michael@0: if (totalSize > optimizationInfo().inlineMaxTotalBytecodeLength()) michael@0: inlineable = false; michael@0: } michael@0: michael@0: choiceSet.append(inlineable); michael@0: if (inlineable) michael@0: *numInlineable += 1; michael@0: } michael@0: michael@0: JS_ASSERT(choiceSet.length() == targets.length()); michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: CanInlineGetPropertyCache(MGetPropertyCache *cache, MDefinition *thisDef) michael@0: { michael@0: JS_ASSERT(cache->object()->type() == MIRType_Object); michael@0: if (cache->object() != thisDef) michael@0: return false; michael@0: michael@0: InlinePropertyTable *table = cache->propTable(); michael@0: if (!table) michael@0: return false; michael@0: if (table->numEntries() == 0) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: MGetPropertyCache * michael@0: IonBuilder::getInlineableGetPropertyCache(CallInfo &callInfo) michael@0: { michael@0: if (callInfo.constructing()) michael@0: return nullptr; michael@0: michael@0: MDefinition *thisDef = callInfo.thisArg(); michael@0: if (thisDef->type() != MIRType_Object) michael@0: return nullptr; michael@0: michael@0: MDefinition *funcDef = callInfo.fun(); michael@0: if (funcDef->type() != MIRType_Object) michael@0: return nullptr; michael@0: michael@0: // MGetPropertyCache with no uses may be optimized away. michael@0: if (funcDef->isGetPropertyCache()) { michael@0: MGetPropertyCache *cache = funcDef->toGetPropertyCache(); michael@0: if (cache->hasUses()) michael@0: return nullptr; michael@0: if (!CanInlineGetPropertyCache(cache, thisDef)) michael@0: return nullptr; michael@0: return cache; michael@0: } michael@0: michael@0: // Optimize away the following common pattern: michael@0: // MTypeBarrier[MIRType_Object] <- MGetPropertyCache michael@0: if (funcDef->isTypeBarrier()) { michael@0: MTypeBarrier *barrier = funcDef->toTypeBarrier(); michael@0: if (barrier->hasUses()) michael@0: return nullptr; michael@0: if (barrier->type() != MIRType_Object) michael@0: return nullptr; michael@0: if (!barrier->input()->isGetPropertyCache()) michael@0: return nullptr; michael@0: michael@0: MGetPropertyCache *cache = barrier->input()->toGetPropertyCache(); michael@0: if (cache->hasUses() && !cache->hasOneUse()) michael@0: return nullptr; michael@0: if (!CanInlineGetPropertyCache(cache, thisDef)) michael@0: return nullptr; michael@0: return cache; michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: IonBuilder::InliningStatus michael@0: IonBuilder::inlineSingleCall(CallInfo &callInfo, JSFunction *target) michael@0: { michael@0: // Expects formals to be popped and wrapped. michael@0: if (target->isNative()) michael@0: return inlineNativeCall(callInfo, target); michael@0: michael@0: if (!inlineScriptedCall(callInfo, target)) michael@0: return InliningStatus_Error; michael@0: return InliningStatus_Inlined; michael@0: } michael@0: michael@0: IonBuilder::InliningStatus michael@0: IonBuilder::inlineCallsite(ObjectVector &targets, ObjectVector &originals, michael@0: bool lambda, CallInfo &callInfo) michael@0: { michael@0: if (targets.empty()) michael@0: return InliningStatus_NotInlined; michael@0: michael@0: // Is the function provided by an MGetPropertyCache? michael@0: // If so, the cache may be movable to a fallback path, with a dispatch michael@0: // instruction guarding on the incoming TypeObject. michael@0: MGetPropertyCache *propCache = getInlineableGetPropertyCache(callInfo); michael@0: michael@0: // Inline single targets -- unless they derive from a cache, in which case michael@0: // avoiding the cache and guarding is still faster. michael@0: if (!propCache && targets.length() == 1) { michael@0: JSFunction *target = &targets[0]->as(); michael@0: InliningDecision decision = makeInliningDecision(target, callInfo); michael@0: switch (decision) { michael@0: case InliningDecision_Error: michael@0: return InliningStatus_Error; michael@0: case InliningDecision_DontInline: michael@0: return InliningStatus_NotInlined; michael@0: case InliningDecision_Inline: michael@0: break; michael@0: } michael@0: michael@0: // Inlining will elminate uses of the original callee, but it needs to michael@0: // be preserved in phis if we bail out. Mark the old callee definition as michael@0: // implicitly used to ensure this happens. michael@0: callInfo.fun()->setImplicitlyUsedUnchecked(); michael@0: michael@0: // If the callee is not going to be a lambda (which may vary across michael@0: // different invocations), then the callee definition can be replaced by a michael@0: // constant. michael@0: if (!lambda) { michael@0: // Replace the function with an MConstant. michael@0: MConstant *constFun = constant(ObjectValue(*target)); michael@0: callInfo.setFun(constFun); michael@0: } michael@0: michael@0: return inlineSingleCall(callInfo, target); michael@0: } michael@0: michael@0: // Choose a subset of the targets for polymorphic inlining. michael@0: BoolVector choiceSet(alloc()); michael@0: uint32_t numInlined; michael@0: if (!selectInliningTargets(targets, callInfo, choiceSet, &numInlined)) michael@0: return InliningStatus_Error; michael@0: if (numInlined == 0) michael@0: return InliningStatus_NotInlined; michael@0: michael@0: // Perform a polymorphic dispatch. michael@0: if (!inlineCalls(callInfo, targets, originals, choiceSet, propCache)) michael@0: return InliningStatus_Error; michael@0: michael@0: return InliningStatus_Inlined; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::inlineGenericFallback(JSFunction *target, CallInfo &callInfo, MBasicBlock *dispatchBlock, michael@0: bool clonedAtCallsite) michael@0: { michael@0: // Generate a new block with all arguments on-stack. michael@0: MBasicBlock *fallbackBlock = newBlock(dispatchBlock, pc); michael@0: if (!fallbackBlock) michael@0: return false; michael@0: michael@0: // Create a new CallInfo to track modified state within this block. michael@0: CallInfo fallbackInfo(alloc(), callInfo.constructing()); michael@0: if (!fallbackInfo.init(callInfo)) michael@0: return false; michael@0: fallbackInfo.popFormals(fallbackBlock); michael@0: michael@0: // Generate an MCall, which uses stateful |current|. michael@0: if (!setCurrentAndSpecializePhis(fallbackBlock)) michael@0: return false; michael@0: if (!makeCall(target, fallbackInfo, clonedAtCallsite)) michael@0: return false; michael@0: michael@0: // Pass return block to caller as |current|. michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::inlineTypeObjectFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock, michael@0: MTypeObjectDispatch *dispatch, MGetPropertyCache *cache, michael@0: MBasicBlock **fallbackTarget) michael@0: { michael@0: // Getting here implies the following: michael@0: // 1. The call function is an MGetPropertyCache, or an MGetPropertyCache michael@0: // followed by an MTypeBarrier. michael@0: JS_ASSERT(callInfo.fun()->isGetPropertyCache() || callInfo.fun()->isTypeBarrier()); michael@0: michael@0: // 2. The MGetPropertyCache has inlineable cases by guarding on the TypeObject. michael@0: JS_ASSERT(dispatch->numCases() > 0); michael@0: michael@0: // 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only michael@0: // have at most a single use. michael@0: JS_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses()); michael@0: JS_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse()); michael@0: michael@0: // This means that no resume points yet capture the MGetPropertyCache, michael@0: // so everything from the MGetPropertyCache up until the call is movable. michael@0: // We now move the MGetPropertyCache and friends into a fallback path. michael@0: michael@0: // Create a new CallInfo to track modified state within the fallback path. michael@0: CallInfo fallbackInfo(alloc(), callInfo.constructing()); michael@0: if (!fallbackInfo.init(callInfo)) michael@0: return false; michael@0: michael@0: // Capture stack prior to the call operation. This captures the function. michael@0: MResumePoint *preCallResumePoint = michael@0: MResumePoint::New(alloc(), dispatchBlock, pc, callerResumePoint_, MResumePoint::ResumeAt); michael@0: if (!preCallResumePoint) michael@0: return false; michael@0: michael@0: DebugOnly preCallFuncIndex = preCallResumePoint->numOperands() - callInfo.numFormals(); michael@0: JS_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) == fallbackInfo.fun()); michael@0: michael@0: // In the dispatch block, replace the function's slot entry with Undefined. michael@0: MConstant *undefined = MConstant::New(alloc(), UndefinedValue()); michael@0: dispatchBlock->add(undefined); michael@0: dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined); michael@0: michael@0: // Construct a block that does nothing but remove formals from the stack. michael@0: // This is effectively changing the entry resume point of the later fallback block. michael@0: MBasicBlock *prepBlock = newBlock(dispatchBlock, pc); michael@0: if (!prepBlock) michael@0: return false; michael@0: fallbackInfo.popFormals(prepBlock); michael@0: michael@0: // Construct a block into which the MGetPropertyCache can be moved. michael@0: // This is subtle: the pc and resume point are those of the MGetPropertyCache! michael@0: InlinePropertyTable *propTable = cache->propTable(); michael@0: JS_ASSERT(propTable->pc() != nullptr); michael@0: JS_ASSERT(propTable->priorResumePoint() != nullptr); michael@0: MBasicBlock *getPropBlock = newBlock(prepBlock, propTable->pc(), propTable->priorResumePoint()); michael@0: if (!getPropBlock) michael@0: return false; michael@0: michael@0: prepBlock->end(MGoto::New(alloc(), getPropBlock)); michael@0: michael@0: // Since the getPropBlock inherited the stack from right before the MGetPropertyCache, michael@0: // the target of the MGetPropertyCache is still on the stack. michael@0: DebugOnly checkObject = getPropBlock->pop(); michael@0: JS_ASSERT(checkObject == cache->object()); michael@0: michael@0: // Move the MGetPropertyCache and friends into the getPropBlock. michael@0: if (fallbackInfo.fun()->isGetPropertyCache()) { michael@0: JS_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache); michael@0: getPropBlock->addFromElsewhere(cache); michael@0: getPropBlock->push(cache); michael@0: } else { michael@0: MTypeBarrier *barrier = callInfo.fun()->toTypeBarrier(); michael@0: JS_ASSERT(barrier->type() == MIRType_Object); michael@0: JS_ASSERT(barrier->input()->isGetPropertyCache()); michael@0: JS_ASSERT(barrier->input()->toGetPropertyCache() == cache); michael@0: michael@0: getPropBlock->addFromElsewhere(cache); michael@0: getPropBlock->addFromElsewhere(barrier); michael@0: getPropBlock->push(barrier); michael@0: } michael@0: michael@0: // Construct an end block with the correct resume point. michael@0: MBasicBlock *preCallBlock = newBlock(getPropBlock, pc, preCallResumePoint); michael@0: if (!preCallBlock) michael@0: return false; michael@0: getPropBlock->end(MGoto::New(alloc(), preCallBlock)); michael@0: michael@0: // Now inline the MCallGeneric, using preCallBlock as the dispatch point. michael@0: if (!inlineGenericFallback(nullptr, fallbackInfo, preCallBlock, false)) michael@0: return false; michael@0: michael@0: // inlineGenericFallback() set the return block as |current|. michael@0: preCallBlock->end(MGoto::New(alloc(), current)); michael@0: *fallbackTarget = prepBlock; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::inlineCalls(CallInfo &callInfo, ObjectVector &targets, michael@0: ObjectVector &originals, BoolVector &choiceSet, michael@0: MGetPropertyCache *maybeCache) michael@0: { michael@0: // Only handle polymorphic inlining. michael@0: JS_ASSERT(IsIonInlinablePC(pc)); michael@0: JS_ASSERT(choiceSet.length() == targets.length()); michael@0: JS_ASSERT_IF(!maybeCache, targets.length() >= 2); michael@0: JS_ASSERT_IF(maybeCache, targets.length() >= 1); michael@0: michael@0: MBasicBlock *dispatchBlock = current; michael@0: callInfo.setImplicitlyUsedUnchecked(); michael@0: callInfo.pushFormals(dispatchBlock); michael@0: michael@0: // Patch any InlinePropertyTable to only contain functions that are inlineable. michael@0: // michael@0: // Note that we trim using originals, as callsite clones are not user michael@0: // visible. We don't patch the entries inside the table with the cloned michael@0: // targets, as the entries should only be used for comparison. michael@0: // michael@0: // The InlinePropertyTable will also be patched at the end to exclude native functions michael@0: // that vetoed inlining. michael@0: if (maybeCache) { michael@0: InlinePropertyTable *propTable = maybeCache->propTable(); michael@0: propTable->trimToTargets(originals); michael@0: if (propTable->numEntries() == 0) michael@0: maybeCache = nullptr; michael@0: } michael@0: michael@0: // Generate a dispatch based on guard kind. michael@0: MDispatchInstruction *dispatch; michael@0: if (maybeCache) { michael@0: dispatch = MTypeObjectDispatch::New(alloc(), maybeCache->object(), maybeCache->propTable()); michael@0: callInfo.fun()->setImplicitlyUsedUnchecked(); michael@0: } else { michael@0: dispatch = MFunctionDispatch::New(alloc(), callInfo.fun()); michael@0: } michael@0: michael@0: // Generate a return block to host the rval-collecting MPhi. michael@0: jsbytecode *postCall = GetNextPc(pc); michael@0: MBasicBlock *returnBlock = newBlock(nullptr, postCall); michael@0: if (!returnBlock) michael@0: return false; michael@0: returnBlock->setCallerResumePoint(callerResumePoint_); michael@0: michael@0: // Set up stack, used to manually create a post-call resume point. michael@0: returnBlock->inheritSlots(dispatchBlock); michael@0: callInfo.popFormals(returnBlock); michael@0: michael@0: MPhi *retPhi = MPhi::New(alloc(), returnBlock->stackDepth()); michael@0: returnBlock->addPhi(retPhi); michael@0: returnBlock->push(retPhi); michael@0: michael@0: // Create a resume point from current stack state. michael@0: returnBlock->initEntrySlots(alloc()); michael@0: michael@0: // Reserve the capacity for the phi. michael@0: // Note: this is an upperbound. Unreachable targets and uninlineable natives are also counted. michael@0: uint32_t count = 1; // Possible fallback block. michael@0: for (uint32_t i = 0; i < targets.length(); i++) { michael@0: if (choiceSet[i]) michael@0: count++; michael@0: } michael@0: retPhi->reserveLength(count); michael@0: michael@0: // During inlining the 'this' value is assigned a type set which is michael@0: // specialized to the type objects which can generate that inlining target. michael@0: // After inlining the original type set is restored. michael@0: types::TemporaryTypeSet *cacheObjectTypeSet = michael@0: maybeCache ? maybeCache->object()->resultTypeSet() : nullptr; michael@0: michael@0: // Inline each of the inlineable targets. michael@0: JS_ASSERT(targets.length() == originals.length()); michael@0: for (uint32_t i = 0; i < targets.length(); i++) { michael@0: // When original != target, the target is a callsite clone. The michael@0: // original should be used for guards, and the target should be the michael@0: // actual function inlined. michael@0: JSFunction *original = &originals[i]->as(); michael@0: JSFunction *target = &targets[i]->as(); michael@0: michael@0: // Target must be inlineable. michael@0: if (!choiceSet[i]) michael@0: continue; michael@0: michael@0: // Target must be reachable by the MDispatchInstruction. michael@0: if (maybeCache && !maybeCache->propTable()->hasFunction(original)) { michael@0: choiceSet[i] = false; michael@0: continue; michael@0: } michael@0: michael@0: MBasicBlock *inlineBlock = newBlock(dispatchBlock, pc); michael@0: if (!inlineBlock) michael@0: return false; michael@0: michael@0: // Create a function MConstant to use in the entry ResumePoint. michael@0: MConstant *funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints()); michael@0: funcDef->setImplicitlyUsedUnchecked(); michael@0: dispatchBlock->add(funcDef); michael@0: michael@0: // Use the MConstant in the inline resume point and on stack. michael@0: int funIndex = inlineBlock->entryResumePoint()->numOperands() - callInfo.numFormals(); michael@0: inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef); michael@0: inlineBlock->rewriteSlot(funIndex, funcDef); michael@0: michael@0: // Create a new CallInfo to track modified state within the inline block. michael@0: CallInfo inlineInfo(alloc(), callInfo.constructing()); michael@0: if (!inlineInfo.init(callInfo)) michael@0: return false; michael@0: inlineInfo.popFormals(inlineBlock); michael@0: inlineInfo.setFun(funcDef); michael@0: michael@0: if (maybeCache) { michael@0: JS_ASSERT(callInfo.thisArg() == maybeCache->object()); michael@0: types::TemporaryTypeSet *targetThisTypes = michael@0: maybeCache->propTable()->buildTypeSetForFunction(original); michael@0: if (!targetThisTypes) michael@0: return false; michael@0: maybeCache->object()->setResultTypeSet(targetThisTypes); michael@0: } michael@0: michael@0: // Inline the call into the inlineBlock. michael@0: if (!setCurrentAndSpecializePhis(inlineBlock)) michael@0: return false; michael@0: InliningStatus status = inlineSingleCall(inlineInfo, target); michael@0: if (status == InliningStatus_Error) michael@0: return false; michael@0: michael@0: // Natives may veto inlining. michael@0: if (status == InliningStatus_NotInlined) { michael@0: JS_ASSERT(target->isNative()); michael@0: JS_ASSERT(current == inlineBlock); michael@0: inlineBlock->discardAllResumePoints(); michael@0: graph().removeBlock(inlineBlock); michael@0: choiceSet[i] = false; michael@0: continue; michael@0: } michael@0: michael@0: // inlineSingleCall() changed |current| to the inline return block. michael@0: MBasicBlock *inlineReturnBlock = current; michael@0: setCurrent(dispatchBlock); michael@0: michael@0: // Connect the inline path to the returnBlock. michael@0: // michael@0: // Note that guarding is on the original function pointer even michael@0: // if there is a clone, since cloning occurs at the callsite. michael@0: dispatch->addCase(original, inlineBlock); michael@0: michael@0: MDefinition *retVal = inlineReturnBlock->peek(-1); michael@0: retPhi->addInput(retVal); michael@0: inlineReturnBlock->end(MGoto::New(alloc(), returnBlock)); michael@0: if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock)) michael@0: return false; michael@0: } michael@0: michael@0: // Patch the InlinePropertyTable to not dispatch to vetoed paths. michael@0: // michael@0: // Note that like above, we trim using originals instead of targets. michael@0: if (maybeCache) { michael@0: maybeCache->object()->setResultTypeSet(cacheObjectTypeSet); michael@0: michael@0: InlinePropertyTable *propTable = maybeCache->propTable(); michael@0: propTable->trimTo(originals, choiceSet); michael@0: michael@0: // If all paths were vetoed, output only a generic fallback path. michael@0: if (propTable->numEntries() == 0) { michael@0: JS_ASSERT(dispatch->numCases() == 0); michael@0: maybeCache = nullptr; michael@0: } michael@0: } michael@0: michael@0: // If necessary, generate a fallback path. michael@0: // MTypeObjectDispatch always uses a fallback path. michael@0: if (maybeCache || dispatch->numCases() < targets.length()) { michael@0: // Generate fallback blocks, and set |current| to the fallback return block. michael@0: if (maybeCache) { michael@0: MBasicBlock *fallbackTarget; michael@0: if (!inlineTypeObjectFallback(callInfo, dispatchBlock, (MTypeObjectDispatch *)dispatch, michael@0: maybeCache, &fallbackTarget)) michael@0: { michael@0: return false; michael@0: } michael@0: dispatch->addFallback(fallbackTarget); michael@0: } else { michael@0: JSFunction *remaining = nullptr; michael@0: bool clonedAtCallsite = false; michael@0: michael@0: // If there is only 1 remaining case, we can annotate the fallback call michael@0: // with the target information. michael@0: if (dispatch->numCases() + 1 == originals.length()) { michael@0: for (uint32_t i = 0; i < originals.length(); i++) { michael@0: if (choiceSet[i]) michael@0: continue; michael@0: michael@0: remaining = &targets[i]->as(); michael@0: clonedAtCallsite = targets[i] != originals[i]; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (!inlineGenericFallback(remaining, callInfo, dispatchBlock, clonedAtCallsite)) michael@0: return false; michael@0: dispatch->addFallback(current); michael@0: } michael@0: michael@0: MBasicBlock *fallbackReturnBlock = current; michael@0: michael@0: // Connect fallback case to return infrastructure. michael@0: MDefinition *retVal = fallbackReturnBlock->peek(-1); michael@0: retPhi->addInput(retVal); michael@0: fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock)); michael@0: if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock)) michael@0: return false; michael@0: } michael@0: michael@0: // Finally add the dispatch instruction. michael@0: // This must be done at the end so that add() may be called above. michael@0: dispatchBlock->end(dispatch); michael@0: michael@0: // Check the depth change: +1 for retval michael@0: JS_ASSERT(returnBlock->stackDepth() == dispatchBlock->stackDepth() - callInfo.numFormals() + 1); michael@0: michael@0: graph().moveBlockToEnd(returnBlock); michael@0: return setCurrentAndSpecializePhis(returnBlock); michael@0: } michael@0: michael@0: MInstruction * michael@0: IonBuilder::createDeclEnvObject(MDefinition *callee, MDefinition *scope) michael@0: { michael@0: // Get a template CallObject that we'll use to generate inline object michael@0: // creation. michael@0: DeclEnvObject *templateObj = inspector->templateDeclEnvObject(); michael@0: michael@0: // One field is added to the function to handle its name. This cannot be a michael@0: // dynamic slot because there is still plenty of room on the DeclEnv object. michael@0: JS_ASSERT(!templateObj->hasDynamicSlots()); michael@0: michael@0: // Allocate the actual object. It is important that no intervening michael@0: // instructions could potentially bailout, thus leaking the dynamic slots michael@0: // pointer. michael@0: MInstruction *declEnvObj = MNewDeclEnvObject::New(alloc(), templateObj); michael@0: current->add(declEnvObj); michael@0: michael@0: // Initialize the object's reserved slots. No post barrier is needed here: michael@0: // the object will be allocated in the nursery if possible, and if the michael@0: // tenured heap is used instead, a minor collection will have been performed michael@0: // that moved scope/callee to the tenured heap. michael@0: current->add(MStoreFixedSlot::New(alloc(), declEnvObj, DeclEnvObject::enclosingScopeSlot(), scope)); michael@0: current->add(MStoreFixedSlot::New(alloc(), declEnvObj, DeclEnvObject::lambdaSlot(), callee)); michael@0: michael@0: return declEnvObj; michael@0: } michael@0: michael@0: MInstruction * michael@0: IonBuilder::createCallObject(MDefinition *callee, MDefinition *scope) michael@0: { michael@0: // Get a template CallObject that we'll use to generate inline object michael@0: // creation. michael@0: CallObject *templateObj = inspector->templateCallObject(); michael@0: michael@0: // If the CallObject needs dynamic slots, allocate those now. michael@0: MInstruction *slots; michael@0: if (templateObj->hasDynamicSlots()) { michael@0: size_t nslots = JSObject::dynamicSlotsCount(templateObj->numFixedSlots(), michael@0: templateObj->lastProperty()->slotSpan(templateObj->getClass()), michael@0: templateObj->getClass()); michael@0: slots = MNewSlots::New(alloc(), nslots); michael@0: } else { michael@0: slots = MConstant::New(alloc(), NullValue()); michael@0: } michael@0: current->add(slots); michael@0: michael@0: // Allocate the actual object. It is important that no intervening michael@0: // instructions could potentially bailout, thus leaking the dynamic slots michael@0: // pointer. Run-once scripts need a singleton type, so always do a VM call michael@0: // in such cases. michael@0: MUnaryInstruction *callObj; michael@0: if (script()->treatAsRunOnce()) michael@0: callObj = MNewRunOnceCallObject::New(alloc(), templateObj, slots); michael@0: else michael@0: callObj = MNewCallObject::New(alloc(), templateObj, slots); michael@0: current->add(callObj); michael@0: michael@0: // Initialize the object's reserved slots. No post barrier is needed here, michael@0: // for the same reason as in createDeclEnvObject. michael@0: current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::enclosingScopeSlot(), scope)); michael@0: current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee)); michael@0: michael@0: // Initialize argument slots. michael@0: for (AliasedFormalIter i(script()); i; i++) { michael@0: unsigned slot = i.scopeSlot(); michael@0: unsigned formal = i.frameIndex(); michael@0: MDefinition *param = current->getSlot(info().argSlotUnchecked(formal)); michael@0: if (slot >= templateObj->numFixedSlots()) michael@0: current->add(MStoreSlot::New(alloc(), slots, slot - templateObj->numFixedSlots(), param)); michael@0: else michael@0: current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param)); michael@0: } michael@0: michael@0: return callObj; michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::createThisScripted(MDefinition *callee) michael@0: { michael@0: // Get callee.prototype. michael@0: // michael@0: // This instruction MUST be idempotent: since it does not correspond to an michael@0: // explicit operation in the bytecode, we cannot use resumeAfter(). michael@0: // Getters may not override |prototype| fetching, so this operation is indeed idempotent. michael@0: // - First try an idempotent property cache. michael@0: // - Upon failing idempotent property cache, we can't use a non-idempotent cache, michael@0: // therefore we fallback to CallGetProperty michael@0: // michael@0: // Note: both CallGetProperty and GetPropertyCache can trigger a GC, michael@0: // and thus invalidation. michael@0: MInstruction *getProto; michael@0: if (!invalidatedIdempotentCache()) { michael@0: MGetPropertyCache *getPropCache = MGetPropertyCache::New(alloc(), callee, names().prototype, michael@0: /* monitored = */ false); michael@0: getPropCache->setIdempotent(); michael@0: getProto = getPropCache; michael@0: } else { michael@0: MCallGetProperty *callGetProp = MCallGetProperty::New(alloc(), callee, names().prototype, michael@0: /* callprop = */ false); michael@0: callGetProp->setIdempotent(); michael@0: getProto = callGetProp; michael@0: } michael@0: current->add(getProto); michael@0: michael@0: // Create this from prototype michael@0: MCreateThisWithProto *createThis = MCreateThisWithProto::New(alloc(), callee, getProto); michael@0: current->add(createThis); michael@0: michael@0: return createThis; michael@0: } michael@0: michael@0: JSObject * michael@0: IonBuilder::getSingletonPrototype(JSFunction *target) michael@0: { michael@0: if (!target || !target->hasSingletonType()) michael@0: return nullptr; michael@0: types::TypeObjectKey *targetType = types::TypeObjectKey::get(target); michael@0: if (targetType->unknownProperties()) michael@0: return nullptr; michael@0: michael@0: jsid protoid = NameToId(names().prototype); michael@0: types::HeapTypeSetKey protoProperty = targetType->property(protoid); michael@0: michael@0: return protoProperty.singleton(constraints()); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::createThisScriptedSingleton(JSFunction *target, MDefinition *callee) michael@0: { michael@0: // Get the singleton prototype (if exists) michael@0: JSObject *proto = getSingletonPrototype(target); michael@0: if (!proto) michael@0: return nullptr; michael@0: michael@0: JSObject *templateObject = inspector->getTemplateObject(pc); michael@0: if (!templateObject || !templateObject->is()) michael@0: return nullptr; michael@0: if (!templateObject->hasTenuredProto() || templateObject->getProto() != proto) michael@0: return nullptr; michael@0: michael@0: if (!target->nonLazyScript()->types) michael@0: return nullptr; michael@0: if (!types::TypeScript::ThisTypes(target->nonLazyScript())->hasType(types::Type::ObjectType(templateObject))) michael@0: return nullptr; michael@0: michael@0: // For template objects with NewScript info, the appropriate allocation michael@0: // kind to use may change due to dynamic property adds. In these cases michael@0: // calling Ion code will be invalidated, but any baseline template object michael@0: // may be stale. Update to the correct template object in this case. michael@0: types::TypeObject *templateType = templateObject->type(); michael@0: if (templateType->hasNewScript()) { michael@0: templateObject = templateType->newScript()->templateObject; michael@0: JS_ASSERT(templateObject->type() == templateType); michael@0: michael@0: // Trigger recompilation if the templateObject changes. michael@0: types::TypeObjectKey::get(templateType)->watchStateChangeForNewScriptTemplate(constraints()); michael@0: } michael@0: michael@0: // Generate an inline path to create a new |this| object with michael@0: // the given singleton prototype. michael@0: MCreateThisWithTemplate *createThis = michael@0: MCreateThisWithTemplate::New(alloc(), constraints(), templateObject, michael@0: templateObject->type()->initialHeap(constraints())); michael@0: current->add(createThis); michael@0: michael@0: return createThis; michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::createThis(JSFunction *target, MDefinition *callee) michael@0: { michael@0: // Create this for unknown target michael@0: if (!target) { michael@0: MCreateThis *createThis = MCreateThis::New(alloc(), callee); michael@0: current->add(createThis); michael@0: return createThis; michael@0: } michael@0: michael@0: // Native constructors build the new Object themselves. michael@0: if (target->isNative()) { michael@0: if (!target->isNativeConstructor()) michael@0: return nullptr; michael@0: michael@0: MConstant *magic = MConstant::New(alloc(), MagicValue(JS_IS_CONSTRUCTING)); michael@0: current->add(magic); michael@0: return magic; michael@0: } michael@0: michael@0: // Try baking in the prototype. michael@0: MDefinition *createThis = createThisScriptedSingleton(target, callee); michael@0: if (createThis) michael@0: return createThis; michael@0: michael@0: return createThisScripted(callee); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_funcall(uint32_t argc) michael@0: { michael@0: // Stack for JSOP_FUNCALL: michael@0: // 1: arg0 michael@0: // ... michael@0: // argc: argN michael@0: // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position. michael@0: // argc+2: The native 'call' function. michael@0: michael@0: int calleeDepth = -((int)argc + 2); michael@0: int funcDepth = -((int)argc + 1); michael@0: michael@0: // If |Function.prototype.call| may be overridden, don't optimize callsite. michael@0: types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); michael@0: JSFunction *native = getSingleCallTarget(calleeTypes); michael@0: if (!native || !native->isNative() || native->native() != &js_fun_call) { michael@0: CallInfo callInfo(alloc(), false); michael@0: if (!callInfo.init(current, argc)) michael@0: return false; michael@0: return makeCall(native, callInfo, false); michael@0: } michael@0: current->peek(calleeDepth)->setImplicitlyUsedUnchecked(); michael@0: michael@0: // Extract call target. michael@0: types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet(); michael@0: JSFunction *target = getSingleCallTarget(funTypes); michael@0: michael@0: // Shimmy the slots down to remove the native 'call' function. michael@0: current->shimmySlots(funcDepth - 1); michael@0: michael@0: bool zeroArguments = (argc == 0); michael@0: michael@0: // If no |this| argument was provided, explicitly pass Undefined. michael@0: // Pushing is safe here, since one stack slot has been removed. michael@0: if (zeroArguments) { michael@0: pushConstant(UndefinedValue()); michael@0: } else { michael@0: // |this| becomes implicit in the call. michael@0: argc -= 1; michael@0: } michael@0: michael@0: CallInfo callInfo(alloc(), false); michael@0: if (!callInfo.init(current, argc)) michael@0: return false; michael@0: michael@0: // Try to inline the call. michael@0: if (!zeroArguments) { michael@0: InliningDecision decision = makeInliningDecision(target, callInfo); michael@0: switch (decision) { michael@0: case InliningDecision_Error: michael@0: return false; michael@0: case InliningDecision_DontInline: michael@0: break; michael@0: case InliningDecision_Inline: michael@0: if (target->isInterpreted()) michael@0: return inlineScriptedCall(callInfo, target); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // Call without inlining. michael@0: return makeCall(target, callInfo, false); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_funapply(uint32_t argc) michael@0: { michael@0: int calleeDepth = -((int)argc + 2); michael@0: michael@0: types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); michael@0: JSFunction *native = getSingleCallTarget(calleeTypes); michael@0: if (argc != 2) { michael@0: CallInfo callInfo(alloc(), false); michael@0: if (!callInfo.init(current, argc)) michael@0: return false; michael@0: return makeCall(native, callInfo, false); michael@0: } michael@0: michael@0: // Disable compilation if the second argument to |apply| cannot be guaranteed michael@0: // to be either definitely |arguments| or definitely not |arguments|. michael@0: MDefinition *argument = current->peek(-1); michael@0: if (script()->argumentsHasVarBinding() && michael@0: argument->mightBeType(MIRType_MagicOptimizedArguments) && michael@0: argument->type() != MIRType_MagicOptimizedArguments) michael@0: { michael@0: return abort("fun.apply with MaybeArguments"); michael@0: } michael@0: michael@0: // Fallback to regular call if arg 2 is not definitely |arguments|. michael@0: if (argument->type() != MIRType_MagicOptimizedArguments) { michael@0: CallInfo callInfo(alloc(), false); michael@0: if (!callInfo.init(current, argc)) michael@0: return false; michael@0: return makeCall(native, callInfo, false); michael@0: } michael@0: michael@0: if (!native || michael@0: !native->isNative() || michael@0: native->native() != js_fun_apply) michael@0: { michael@0: return abort("fun.apply speculation failed"); michael@0: } michael@0: michael@0: current->peek(calleeDepth)->setImplicitlyUsedUnchecked(); michael@0: michael@0: // Use funapply that definitely uses |arguments| michael@0: return jsop_funapplyarguments(argc); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_funapplyarguments(uint32_t argc) michael@0: { michael@0: // Stack for JSOP_FUNAPPLY: michael@0: // 1: Vp michael@0: // 2: This michael@0: // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position. michael@0: // argc+2: The native 'apply' function. michael@0: michael@0: int funcDepth = -((int)argc + 1); michael@0: michael@0: // Extract call target. michael@0: types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet(); michael@0: JSFunction *target = getSingleCallTarget(funTypes); michael@0: michael@0: // When this script isn't inlined, use MApplyArgs, michael@0: // to copy the arguments from the stack and call the function michael@0: if (inliningDepth_ == 0 && info().executionMode() != DefinitePropertiesAnalysis) { michael@0: // The array argument corresponds to the arguments object. As the JIT michael@0: // is implicitly reading the arguments object in the next instruction, michael@0: // we need to prevent the deletion of the arguments object from resume michael@0: // points, so that Baseline will behave correctly after a bailout. michael@0: MDefinition *vp = current->pop(); michael@0: vp->setImplicitlyUsedUnchecked(); michael@0: michael@0: MDefinition *argThis = current->pop(); michael@0: michael@0: // Unwrap the (JSFunction *) parameter. michael@0: MDefinition *argFunc = current->pop(); michael@0: michael@0: // Pop apply function. michael@0: current->pop(); michael@0: michael@0: MArgumentsLength *numArgs = MArgumentsLength::New(alloc()); michael@0: current->add(numArgs); michael@0: michael@0: MApplyArgs *apply = MApplyArgs::New(alloc(), target, argFunc, numArgs, argThis); michael@0: current->add(apply); michael@0: current->push(apply); michael@0: if (!resumeAfter(apply)) michael@0: return false; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return pushTypeBarrier(apply, types, true); michael@0: } michael@0: michael@0: // When inlining we have the arguments the function gets called with michael@0: // and can optimize even more, by just calling the functions with the args. michael@0: // We also try this path when doing the definite properties analysis, as we michael@0: // can inline the apply() target and don't care about the actual arguments michael@0: // that were passed in. michael@0: michael@0: CallInfo callInfo(alloc(), false); michael@0: michael@0: // Vp michael@0: MDefinition *vp = current->pop(); michael@0: vp->setImplicitlyUsedUnchecked(); michael@0: michael@0: // Arguments michael@0: MDefinitionVector args(alloc()); michael@0: if (inliningDepth_) { michael@0: if (!args.appendAll(inlineCallInfo_->argv())) michael@0: return false; michael@0: } michael@0: callInfo.setArgs(&args); michael@0: michael@0: // This michael@0: MDefinition *argThis = current->pop(); michael@0: callInfo.setThis(argThis); michael@0: michael@0: // Pop function parameter. michael@0: MDefinition *argFunc = current->pop(); michael@0: callInfo.setFun(argFunc); michael@0: michael@0: // Pop apply function. michael@0: current->pop(); michael@0: michael@0: // Try to inline the call. michael@0: InliningDecision decision = makeInliningDecision(target, callInfo); michael@0: switch (decision) { michael@0: case InliningDecision_Error: michael@0: return false; michael@0: case InliningDecision_DontInline: michael@0: break; michael@0: case InliningDecision_Inline: michael@0: if (target->isInterpreted()) michael@0: return inlineScriptedCall(callInfo, target); michael@0: } michael@0: michael@0: return makeCall(target, callInfo, false); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_call(uint32_t argc, bool constructing) michael@0: { michael@0: // If this call has never executed, try to seed the observed type set michael@0: // based on how the call result is used. michael@0: types::TemporaryTypeSet *observed = bytecodeTypes(pc); michael@0: if (observed->empty()) { michael@0: if (BytecodeFlowsToBitop(pc)) { michael@0: observed->addType(types::Type::Int32Type(), alloc_->lifoAlloc()); michael@0: } else if (*GetNextPc(pc) == JSOP_POS) { michael@0: // Note: this is lame, overspecialized on the code patterns used michael@0: // by asm.js and should be replaced by a more general mechanism. michael@0: // See bug 870847. michael@0: observed->addType(types::Type::DoubleType(), alloc_->lifoAlloc()); michael@0: } michael@0: } michael@0: michael@0: int calleeDepth = -((int)argc + 2); michael@0: michael@0: // Acquire known call target if existent. michael@0: ObjectVector originals(alloc()); michael@0: bool gotLambda = false; michael@0: types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); michael@0: if (calleeTypes) { michael@0: if (!getPolyCallTargets(calleeTypes, constructing, originals, 4, &gotLambda)) michael@0: return false; michael@0: } michael@0: JS_ASSERT_IF(gotLambda, originals.length() <= 1); michael@0: michael@0: // If any call targets need to be cloned, look for existing clones to use. michael@0: // Keep track of the originals as we need to case on them for poly inline. michael@0: bool hasClones = false; michael@0: ObjectVector targets(alloc()); michael@0: for (uint32_t i = 0; i < originals.length(); i++) { michael@0: JSFunction *fun = &originals[i]->as(); michael@0: if (fun->hasScript() && fun->nonLazyScript()->shouldCloneAtCallsite()) { michael@0: if (JSFunction *clone = ExistingCloneFunctionAtCallsite(compartment->callsiteClones(), fun, script(), pc)) { michael@0: fun = clone; michael@0: hasClones = true; michael@0: } michael@0: } michael@0: if (!targets.append(fun)) michael@0: return false; michael@0: } michael@0: michael@0: CallInfo callInfo(alloc(), constructing); michael@0: if (!callInfo.init(current, argc)) michael@0: return false; michael@0: michael@0: // Try inlining michael@0: InliningStatus status = inlineCallsite(targets, originals, gotLambda, callInfo); michael@0: if (status == InliningStatus_Inlined) michael@0: return true; michael@0: if (status == InliningStatus_Error) michael@0: return false; michael@0: michael@0: // No inline, just make the call. michael@0: JSFunction *target = nullptr; michael@0: if (targets.length() == 1) michael@0: target = &targets[0]->as(); michael@0: michael@0: return makeCall(target, callInfo, hasClones); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::makeCallsiteClone(JSFunction *target, MDefinition *fun) michael@0: { michael@0: // Bake in the clone eagerly if we have a known target. We have arrived here michael@0: // because TI told us that the known target is a should-clone-at-callsite michael@0: // function, which means that target already is the clone. Make sure to ensure michael@0: // that the old definition remains in resume points. michael@0: if (target) { michael@0: fun->setImplicitlyUsedUnchecked(); michael@0: return constant(ObjectValue(*target)); michael@0: } michael@0: michael@0: // Add a callsite clone IC if we have multiple targets. Note that we michael@0: // should have checked already that at least some targets are marked as michael@0: // should-clone-at-callsite. michael@0: MCallsiteCloneCache *clone = MCallsiteCloneCache::New(alloc(), fun, pc); michael@0: current->add(clone); michael@0: return clone; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::testShouldDOMCall(types::TypeSet *inTypes, michael@0: JSFunction *func, JSJitInfo::OpType opType) michael@0: { michael@0: if (!func->isNative() || !func->jitInfo()) michael@0: return false; michael@0: michael@0: // If all the DOM objects flowing through are legal with this michael@0: // property, we can bake in a call to the bottom half of the DOM michael@0: // accessor michael@0: DOMInstanceClassMatchesProto instanceChecker = michael@0: compartment->runtime()->DOMcallbacks()->instanceClassMatchesProto; michael@0: michael@0: const JSJitInfo *jinfo = func->jitInfo(); michael@0: if (jinfo->type() != opType) michael@0: return false; michael@0: michael@0: for (unsigned i = 0; i < inTypes->getObjectCount(); i++) { michael@0: types::TypeObjectKey *curType = inTypes->getObject(i); michael@0: if (!curType) michael@0: continue; michael@0: michael@0: if (!curType->hasTenuredProto()) michael@0: return false; michael@0: JSObject *proto = curType->proto().toObjectOrNull(); michael@0: if (!instanceChecker(proto, jinfo->protoID, jinfo->depth)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: ArgumentTypesMatch(MDefinition *def, types::StackTypeSet *calleeTypes) michael@0: { michael@0: if (def->resultTypeSet()) { michael@0: JS_ASSERT(def->type() == MIRType_Value || def->mightBeType(def->type())); michael@0: return def->resultTypeSet()->isSubset(calleeTypes); michael@0: } michael@0: michael@0: if (def->type() == MIRType_Value) michael@0: return false; michael@0: michael@0: if (def->type() == MIRType_Object) michael@0: return calleeTypes->unknownObject(); michael@0: michael@0: return calleeTypes->mightBeMIRType(def->type()); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo) michael@0: { michael@0: // If we have a known target, check if the caller arg types are a subset of callee. michael@0: // Since typeset accumulates and can't decrease that means we don't need to check michael@0: // the arguments anymore. michael@0: if (!target->hasScript()) michael@0: return true; michael@0: michael@0: JSScript *targetScript = target->nonLazyScript(); michael@0: michael@0: if (!targetScript->types) michael@0: return true; michael@0: michael@0: if (!ArgumentTypesMatch(callInfo.thisArg(), types::TypeScript::ThisTypes(targetScript))) michael@0: return true; michael@0: uint32_t expected_args = Min(callInfo.argc(), target->nargs()); michael@0: for (size_t i = 0; i < expected_args; i++) { michael@0: if (!ArgumentTypesMatch(callInfo.getArg(i), types::TypeScript::ArgTypes(targetScript, i))) michael@0: return true; michael@0: } michael@0: for (size_t i = callInfo.argc(); i < target->nargs(); i++) { michael@0: if (!types::TypeScript::ArgTypes(targetScript, i)->mightBeMIRType(MIRType_Undefined)) michael@0: return true; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: MCall * michael@0: IonBuilder::makeCallHelper(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite) michael@0: { michael@0: // This function may be called with mutated stack. michael@0: // Querying TI for popped types is invalid. michael@0: michael@0: uint32_t targetArgs = callInfo.argc(); michael@0: michael@0: // Collect number of missing arguments provided that the target is michael@0: // scripted. Native functions are passed an explicit 'argc' parameter. michael@0: if (target && !target->isNative()) michael@0: targetArgs = Max(target->nargs(), callInfo.argc()); michael@0: michael@0: bool isDOMCall = false; michael@0: if (target && !callInfo.constructing()) { michael@0: // We know we have a single call target. Check whether the "this" types michael@0: // are DOM types and our function a DOM function, and if so flag the michael@0: // MCall accordingly. michael@0: types::TemporaryTypeSet *thisTypes = callInfo.thisArg()->resultTypeSet(); michael@0: if (thisTypes && michael@0: thisTypes->getKnownMIRType() == MIRType_Object && michael@0: thisTypes->isDOMClass() && michael@0: testShouldDOMCall(thisTypes, target, JSJitInfo::Method)) michael@0: { michael@0: isDOMCall = true; michael@0: } michael@0: } michael@0: michael@0: MCall *call = MCall::New(alloc(), target, targetArgs + 1, callInfo.argc(), michael@0: callInfo.constructing(), isDOMCall); michael@0: if (!call) michael@0: return nullptr; michael@0: michael@0: // Explicitly pad any missing arguments with |undefined|. michael@0: // This permits skipping the argumentsRectifier. michael@0: for (int i = targetArgs; i > (int)callInfo.argc(); i--) { michael@0: JS_ASSERT_IF(target, !target->isNative()); michael@0: MConstant *undef = constant(UndefinedValue()); michael@0: call->addArg(i, undef); michael@0: } michael@0: michael@0: // Add explicit arguments. michael@0: // Skip addArg(0) because it is reserved for this michael@0: for (int32_t i = callInfo.argc() - 1; i >= 0; i--) michael@0: call->addArg(i + 1, callInfo.getArg(i)); michael@0: michael@0: // Now that we've told it about all the args, compute whether it's movable michael@0: call->computeMovable(); michael@0: michael@0: // Inline the constructor on the caller-side. michael@0: if (callInfo.constructing()) { michael@0: MDefinition *create = createThis(target, callInfo.fun()); michael@0: if (!create) { michael@0: abort("Failure inlining constructor for call."); michael@0: return nullptr; michael@0: } michael@0: michael@0: callInfo.thisArg()->setImplicitlyUsedUnchecked(); michael@0: callInfo.setThis(create); michael@0: } michael@0: michael@0: // Pass |this| and function. michael@0: MDefinition *thisArg = callInfo.thisArg(); michael@0: call->addArg(0, thisArg); michael@0: michael@0: // Add a callsite clone IC for multiple targets which all should be michael@0: // callsite cloned, or bake in the clone for a single target. michael@0: if (cloneAtCallsite) { michael@0: MDefinition *fun = makeCallsiteClone(target, callInfo.fun()); michael@0: callInfo.setFun(fun); michael@0: } michael@0: michael@0: if (target && !testNeedsArgumentCheck(target, callInfo)) michael@0: call->disableArgCheck(); michael@0: michael@0: call->initFunction(callInfo.fun()); michael@0: michael@0: current->add(call); michael@0: return call; michael@0: } michael@0: michael@0: static bool michael@0: DOMCallNeedsBarrier(const JSJitInfo* jitinfo, types::TemporaryTypeSet *types) michael@0: { michael@0: // If the return type of our DOM native is in "types" already, we don't michael@0: // actually need a barrier. michael@0: if (jitinfo->returnType() == JSVAL_TYPE_UNKNOWN) michael@0: return true; michael@0: michael@0: // JSVAL_TYPE_OBJECT doesn't tell us much; we still have to barrier on the michael@0: // actual type of the object. michael@0: if (jitinfo->returnType() == JSVAL_TYPE_OBJECT) michael@0: return true; michael@0: michael@0: // No need for a barrier if we're already expecting the type we'll produce. michael@0: return MIRTypeFromValueType(jitinfo->returnType()) != types->getKnownMIRType(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::makeCall(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite) michael@0: { michael@0: // Constructor calls to non-constructors should throw. We don't want to use michael@0: // CallKnown in this case. michael@0: JS_ASSERT_IF(callInfo.constructing() && target, michael@0: target->isInterpretedConstructor() || target->isNativeConstructor()); michael@0: michael@0: MCall *call = makeCallHelper(target, callInfo, cloneAtCallsite); michael@0: if (!call) michael@0: return false; michael@0: michael@0: current->push(call); michael@0: if (call->isEffectful() && !resumeAfter(call)) michael@0: return false; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: michael@0: if (call->isCallDOMNative()) michael@0: return pushDOMTypeBarrier(call, types, call->getSingleTarget()); michael@0: michael@0: return pushTypeBarrier(call, types, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_eval(uint32_t argc) michael@0: { michael@0: int calleeDepth = -((int)argc + 2); michael@0: types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); michael@0: michael@0: // Emit a normal call if the eval has never executed. This keeps us from michael@0: // disabling compilation for the script when testing with --ion-eager. michael@0: if (calleeTypes && calleeTypes->empty()) michael@0: return jsop_call(argc, /* constructing = */ false); michael@0: michael@0: JSFunction *singleton = getSingleCallTarget(calleeTypes); michael@0: if (!singleton) michael@0: return abort("No singleton callee for eval()"); michael@0: michael@0: if (script()->global().valueIsEval(ObjectValue(*singleton))) { michael@0: if (argc != 1) michael@0: return abort("Direct eval with more than one argument"); michael@0: michael@0: if (!info().funMaybeLazy()) michael@0: return abort("Direct eval in global code"); michael@0: michael@0: // The 'this' value for the outer and eval scripts must be the michael@0: // same. This is not guaranteed if a primitive string/number/etc. michael@0: // is passed through to the eval invoke as the primitive may be michael@0: // boxed into different objects if accessed via 'this'. michael@0: MIRType type = thisTypes->getKnownMIRType(); michael@0: if (type != MIRType_Object && type != MIRType_Null && type != MIRType_Undefined) michael@0: return abort("Direct eval from script with maybe-primitive 'this'"); michael@0: michael@0: CallInfo callInfo(alloc(), /* constructing = */ false); michael@0: if (!callInfo.init(current, argc)) michael@0: return false; michael@0: callInfo.setImplicitlyUsedUnchecked(); michael@0: michael@0: callInfo.fun()->setImplicitlyUsedUnchecked(); michael@0: michael@0: MDefinition *scopeChain = current->scopeChain(); michael@0: MDefinition *string = callInfo.getArg(0); michael@0: michael@0: // Direct eval acts as identity on non-string types according to michael@0: // ES5 15.1.2.1 step 1. michael@0: if (!string->mightBeType(MIRType_String)) { michael@0: current->push(string); michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return pushTypeBarrier(string, types, true); michael@0: } michael@0: michael@0: current->pushSlot(info().thisSlot()); michael@0: MDefinition *thisValue = current->pop(); michael@0: michael@0: // Try to pattern match 'eval(v + "()")'. In this case v is likely a michael@0: // name on the scope chain and the eval is performing a call on that michael@0: // value. Use a dynamic scope chain lookup rather than a full eval. michael@0: if (string->isConcat() && michael@0: string->getOperand(1)->isConstant() && michael@0: string->getOperand(1)->toConstant()->value().isString()) michael@0: { michael@0: JSAtom *atom = &string->getOperand(1)->toConstant()->value().toString()->asAtom(); michael@0: michael@0: if (StringEqualsAscii(atom, "()")) { michael@0: MDefinition *name = string->getOperand(0); michael@0: MInstruction *dynamicName = MGetDynamicName::New(alloc(), scopeChain, name); michael@0: current->add(dynamicName); michael@0: michael@0: current->push(dynamicName); michael@0: current->push(thisValue); michael@0: michael@0: CallInfo evalCallInfo(alloc(), /* constructing = */ false); michael@0: if (!evalCallInfo.init(current, /* argc = */ 0)) michael@0: return false; michael@0: michael@0: return makeCall(nullptr, evalCallInfo, false); michael@0: } michael@0: } michael@0: michael@0: MInstruction *filterArguments = MFilterArgumentsOrEval::New(alloc(), string); michael@0: current->add(filterArguments); michael@0: michael@0: MInstruction *ins = MCallDirectEval::New(alloc(), scopeChain, string, thisValue, pc); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return resumeAfter(ins) && pushTypeBarrier(ins, types, true); michael@0: } michael@0: michael@0: return jsop_call(argc, /* constructing = */ false); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_compare(JSOp op) michael@0: { michael@0: MDefinition *right = current->pop(); michael@0: MDefinition *left = current->pop(); michael@0: michael@0: MCompare *ins = MCompare::New(alloc(), left, right, op); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: ins->infer(inspector, pc); michael@0: michael@0: if (ins->isEffectful() && !resumeAfter(ins)) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_newarray(uint32_t count) michael@0: { michael@0: JS_ASSERT(script()->compileAndGo()); michael@0: michael@0: JSObject *templateObject = inspector->getTemplateObject(pc); michael@0: if (!templateObject) michael@0: return abort("No template object for NEWARRAY"); michael@0: michael@0: JS_ASSERT(templateObject->is()); michael@0: if (templateObject->type()->unknownProperties()) { michael@0: // We will get confused in jsop_initelem_array if we can't find the michael@0: // type object being initialized. michael@0: return abort("New array has unknown properties"); michael@0: } michael@0: michael@0: MNewArray *ins = MNewArray::New(alloc(), constraints(), count, templateObject, michael@0: templateObject->type()->initialHeap(constraints()), michael@0: MNewArray::NewArray_Allocating); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: types::TemporaryTypeSet::DoubleConversion conversion = michael@0: ins->resultTypeSet()->convertDoubleElements(constraints()); michael@0: michael@0: if (conversion == types::TemporaryTypeSet::AlwaysConvertToDoubles) michael@0: templateObject->setShouldConvertDoubleElements(); michael@0: else michael@0: templateObject->clearShouldConvertDoubleElements(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_newobject() michael@0: { michael@0: // Don't bake in the TypeObject for non-CNG scripts. michael@0: JS_ASSERT(script()->compileAndGo()); michael@0: michael@0: JSObject *templateObject = inspector->getTemplateObject(pc); michael@0: if (!templateObject) michael@0: return abort("No template object for NEWOBJECT"); michael@0: michael@0: JS_ASSERT(templateObject->is()); michael@0: MNewObject *ins = MNewObject::New(alloc(), constraints(), templateObject, michael@0: templateObject->hasSingletonType() michael@0: ? gc::TenuredHeap michael@0: : templateObject->type()->initialHeap(constraints()), michael@0: /* templateObjectIsClassPrototype = */ false); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_initelem() michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *id = current->pop(); michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: MInitElem *initElem = MInitElem::New(alloc(), obj, id, value); michael@0: current->add(initElem); michael@0: michael@0: return resumeAfter(initElem); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_initelem_array() michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: // Make sure that arrays have the type being written to them by the michael@0: // intializer, and that arrays are marked as non-packed when writing holes michael@0: // to them during initialization. michael@0: bool needStub = false; michael@0: types::TypeObjectKey *initializer = obj->resultTypeSet()->getObject(0); michael@0: if (value->type() == MIRType_MagicHole) { michael@0: if (!initializer->hasFlags(constraints(), types::OBJECT_FLAG_NON_PACKED)) michael@0: needStub = true; michael@0: } else if (!initializer->unknownProperties()) { michael@0: types::HeapTypeSetKey elemTypes = initializer->property(JSID_VOID); michael@0: if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) { michael@0: elemTypes.freeze(constraints()); michael@0: needStub = true; michael@0: } michael@0: } michael@0: michael@0: if (NeedsPostBarrier(info(), value)) michael@0: current->add(MPostWriteBarrier::New(alloc(), obj, value)); michael@0: michael@0: if (needStub) { michael@0: MCallInitElementArray *store = MCallInitElementArray::New(alloc(), obj, GET_UINT24(pc), value); michael@0: current->add(store); michael@0: return resumeAfter(store); michael@0: } michael@0: michael@0: MConstant *id = MConstant::New(alloc(), Int32Value(GET_UINT24(pc))); michael@0: current->add(id); michael@0: michael@0: // Get the elements vector. michael@0: MElements *elements = MElements::New(alloc(), obj); michael@0: current->add(elements); michael@0: michael@0: JSObject *templateObject = obj->toNewArray()->templateObject(); michael@0: michael@0: if (templateObject->shouldConvertDoubleElements()) { michael@0: MInstruction *valueDouble = MToDouble::New(alloc(), value); michael@0: current->add(valueDouble); michael@0: value = valueDouble; michael@0: } michael@0: michael@0: // Store the value. michael@0: MStoreElement *store = MStoreElement::New(alloc(), elements, id, value, /* needsHoleCheck = */ false); michael@0: current->add(store); michael@0: michael@0: // Update the initialized length. (The template object for this array has michael@0: // the array's ultimate length, so the length field is already correct: no michael@0: // updating needed.) michael@0: MSetInitializedLength *initLength = MSetInitializedLength::New(alloc(), elements, id); michael@0: current->add(initLength); michael@0: michael@0: if (!resumeAfter(initLength)) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_mutateproto() michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: MMutateProto *mutate = MMutateProto::New(alloc(), obj, value); michael@0: current->add(mutate); michael@0: return resumeAfter(mutate); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_initprop(PropertyName *name) michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: JSObject *templateObject = obj->toNewObject()->templateObject(); michael@0: michael@0: Shape *shape = templateObject->lastProperty()->searchLinear(NameToId(name)); michael@0: michael@0: if (!shape) { michael@0: // JSOP_NEWINIT becomes an MNewObject without preconfigured properties. michael@0: MInitProp *init = MInitProp::New(alloc(), obj, name, value); michael@0: current->add(init); michael@0: return resumeAfter(init); michael@0: } michael@0: michael@0: if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, michael@0: &obj, name, &value, /* canModify = */ true)) michael@0: { michael@0: // JSOP_NEWINIT becomes an MNewObject without preconfigured properties. michael@0: MInitProp *init = MInitProp::New(alloc(), obj, name, value); michael@0: current->add(init); michael@0: return resumeAfter(init); michael@0: } michael@0: michael@0: if (NeedsPostBarrier(info(), value)) michael@0: current->add(MPostWriteBarrier::New(alloc(), obj, value)); michael@0: michael@0: bool needsBarrier = true; michael@0: if (obj->resultTypeSet() && michael@0: !obj->resultTypeSet()->propertyNeedsBarrier(constraints(), NameToId(name))) michael@0: { michael@0: needsBarrier = false; michael@0: } michael@0: michael@0: // In parallel execution, we never require write barriers. See michael@0: // forkjoin.cpp for more information. michael@0: if (info().executionMode() == ParallelExecution) michael@0: needsBarrier = false; michael@0: michael@0: if (templateObject->isFixedSlot(shape->slot())) { michael@0: MStoreFixedSlot *store = MStoreFixedSlot::New(alloc(), obj, shape->slot(), value); michael@0: if (needsBarrier) michael@0: store->setNeedsBarrier(); michael@0: michael@0: current->add(store); michael@0: return resumeAfter(store); michael@0: } michael@0: michael@0: MSlots *slots = MSlots::New(alloc(), obj); michael@0: current->add(slots); michael@0: michael@0: uint32_t slot = templateObject->dynamicSlotIndex(shape->slot()); michael@0: MStoreSlot *store = MStoreSlot::New(alloc(), slots, slot, value); michael@0: if (needsBarrier) michael@0: store->setNeedsBarrier(); michael@0: michael@0: current->add(store); michael@0: return resumeAfter(store); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_initprop_getter_setter(PropertyName *name) michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: MInitPropGetterSetter *init = MInitPropGetterSetter::New(alloc(), obj, name, value); michael@0: current->add(init); michael@0: return resumeAfter(init); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_initelem_getter_setter() michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *id = current->pop(); michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: MInitElemGetterSetter *init = MInitElemGetterSetter::New(alloc(), obj, id, value); michael@0: current->add(init); michael@0: return resumeAfter(init); michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::addBlock(MBasicBlock *block, uint32_t loopDepth) michael@0: { michael@0: if (!block) michael@0: return nullptr; michael@0: graph().addBlock(block); michael@0: block->setLoopDepth(loopDepth); michael@0: return block; michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc) michael@0: { michael@0: MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(), michael@0: predecessor, pc, MBasicBlock::NORMAL); michael@0: return addBlock(block, loopDepth_); michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, MResumePoint *priorResumePoint) michael@0: { michael@0: MBasicBlock *block = MBasicBlock::NewWithResumePoint(graph(), info(), predecessor, pc, michael@0: priorResumePoint); michael@0: return addBlock(block, loopDepth_); michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newBlockPopN(MBasicBlock *predecessor, jsbytecode *pc, uint32_t popped) michael@0: { michael@0: MBasicBlock *block = MBasicBlock::NewPopN(graph(), info(), predecessor, pc, MBasicBlock::NORMAL, popped); michael@0: return addBlock(block, loopDepth_); michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newBlockAfter(MBasicBlock *at, MBasicBlock *predecessor, jsbytecode *pc) michael@0: { michael@0: MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(), michael@0: predecessor, pc, MBasicBlock::NORMAL); michael@0: if (!block) michael@0: return nullptr; michael@0: graph().insertBlockAfter(at, block); michael@0: return block; michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32_t loopDepth) michael@0: { michael@0: MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(), michael@0: predecessor, pc, MBasicBlock::NORMAL); michael@0: return addBlock(block, loopDepth); michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newOsrPreheader(MBasicBlock *predecessor, jsbytecode *loopEntry) michael@0: { michael@0: JS_ASSERT(LoopEntryCanIonOsr(loopEntry)); michael@0: JS_ASSERT(loopEntry == info().osrPc()); michael@0: michael@0: // Create two blocks: one for the OSR entry with no predecessors, one for michael@0: // the preheader, which has the OSR entry block as a predecessor. The michael@0: // OSR block is always the second block (with id 1). michael@0: MBasicBlock *osrBlock = newBlockAfter(*graph().begin(), loopEntry); michael@0: MBasicBlock *preheader = newBlock(predecessor, loopEntry); michael@0: if (!osrBlock || !preheader) michael@0: return nullptr; michael@0: michael@0: MOsrEntry *entry = MOsrEntry::New(alloc()); michael@0: osrBlock->add(entry); michael@0: michael@0: // Initialize |scopeChain|. michael@0: { michael@0: uint32_t slot = info().scopeChainSlot(); michael@0: michael@0: MInstruction *scopev; michael@0: if (analysis().usesScopeChain()) { michael@0: scopev = MOsrScopeChain::New(alloc(), entry); michael@0: } else { michael@0: // Use an undefined value if the script does not need its scope michael@0: // chain, to match the type that is already being tracked for the michael@0: // slot. michael@0: scopev = MConstant::New(alloc(), UndefinedValue()); michael@0: } michael@0: michael@0: osrBlock->add(scopev); michael@0: osrBlock->initSlot(slot, scopev); michael@0: } michael@0: // Initialize |return value| michael@0: { michael@0: MInstruction *returnValue; michael@0: if (!script()->noScriptRval()) michael@0: returnValue = MOsrReturnValue::New(alloc(), entry); michael@0: else michael@0: returnValue = MConstant::New(alloc(), UndefinedValue()); michael@0: osrBlock->add(returnValue); michael@0: osrBlock->initSlot(info().returnValueSlot(), returnValue); michael@0: } michael@0: michael@0: // Initialize arguments object. michael@0: bool needsArgsObj = info().needsArgsObj(); michael@0: MInstruction *argsObj = nullptr; michael@0: if (info().hasArguments()) { michael@0: if (needsArgsObj) michael@0: argsObj = MOsrArgumentsObject::New(alloc(), entry); michael@0: else michael@0: argsObj = MConstant::New(alloc(), UndefinedValue()); michael@0: osrBlock->add(argsObj); michael@0: osrBlock->initSlot(info().argsObjSlot(), argsObj); michael@0: } michael@0: michael@0: if (info().funMaybeLazy()) { michael@0: // Initialize |this| parameter. michael@0: MParameter *thisv = MParameter::New(alloc(), MParameter::THIS_SLOT, nullptr); michael@0: osrBlock->add(thisv); michael@0: osrBlock->initSlot(info().thisSlot(), thisv); michael@0: michael@0: // Initialize arguments. michael@0: for (uint32_t i = 0; i < info().nargs(); i++) { michael@0: uint32_t slot = needsArgsObj ? info().argSlotUnchecked(i) : info().argSlot(i); michael@0: michael@0: // Only grab arguments from the arguments object if the arguments object michael@0: // aliases formals. If the argsobj does not alias formals, then the michael@0: // formals may have been assigned to during interpretation, and that change michael@0: // will not be reflected in the argsobj. michael@0: if (needsArgsObj && info().argsObjAliasesFormals()) { michael@0: JS_ASSERT(argsObj && argsObj->isOsrArgumentsObject()); michael@0: // If this is an aliased formal, then the arguments object michael@0: // contains a hole at this index. Any references to this michael@0: // variable in the jitcode will come from JSOP_*ALIASEDVAR michael@0: // opcodes, so the slot itself can be set to undefined. If michael@0: // it's not aliased, it must be retrieved from the arguments michael@0: // object. michael@0: MInstruction *osrv; michael@0: if (script()->formalIsAliased(i)) michael@0: osrv = MConstant::New(alloc(), UndefinedValue()); michael@0: else michael@0: osrv = MGetArgumentsObjectArg::New(alloc(), argsObj, i); michael@0: michael@0: osrBlock->add(osrv); michael@0: osrBlock->initSlot(slot, osrv); michael@0: } else { michael@0: MParameter *arg = MParameter::New(alloc(), i, nullptr); michael@0: osrBlock->add(arg); michael@0: osrBlock->initSlot(slot, arg); michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Initialize locals. michael@0: for (uint32_t i = 0; i < info().nlocals(); i++) { michael@0: uint32_t slot = info().localSlot(i); michael@0: ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(i); michael@0: michael@0: MOsrValue *osrv = MOsrValue::New(alloc(), entry, offset); michael@0: osrBlock->add(osrv); michael@0: osrBlock->initSlot(slot, osrv); michael@0: } michael@0: michael@0: // Initialize stack. michael@0: uint32_t numStackSlots = preheader->stackDepth() - info().firstStackSlot(); michael@0: for (uint32_t i = 0; i < numStackSlots; i++) { michael@0: uint32_t slot = info().stackSlot(i); michael@0: ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(info().nlocals() + i); michael@0: michael@0: MOsrValue *osrv = MOsrValue::New(alloc(), entry, offset); michael@0: osrBlock->add(osrv); michael@0: osrBlock->initSlot(slot, osrv); michael@0: } michael@0: michael@0: // Create an MStart to hold the first valid MResumePoint. michael@0: MStart *start = MStart::New(alloc(), MStart::StartType_Osr); michael@0: osrBlock->add(start); michael@0: graph().setOsrStart(start); michael@0: michael@0: // MOsrValue instructions are infallible, so the first MResumePoint must michael@0: // occur after they execute, at the point of the MStart. michael@0: if (!resumeAt(start, loopEntry)) michael@0: return nullptr; michael@0: michael@0: // Link the same MResumePoint from the MStart to each MOsrValue. michael@0: // This causes logic in ShouldSpecializeInput() to not replace Uses with michael@0: // Unboxes in the MResumePiont, so that the MStart always sees Values. michael@0: osrBlock->linkOsrValues(start); michael@0: michael@0: // Clone types of the other predecessor of the pre-header to the osr block, michael@0: // such as pre-header phi's won't discard specialized type of the michael@0: // predecessor. michael@0: JS_ASSERT(predecessor->stackDepth() == osrBlock->stackDepth()); michael@0: JS_ASSERT(info().scopeChainSlot() == 0); michael@0: michael@0: // Treat the OSR values as having the same type as the existing values michael@0: // coming in to the loop. These will be fixed up with appropriate michael@0: // unboxing and type barriers in finishLoop, once the possible types michael@0: // at the loop header are known. michael@0: for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++) { michael@0: MDefinition *existing = current->getSlot(i); michael@0: MDefinition *def = osrBlock->getSlot(i); michael@0: JS_ASSERT_IF(!needsArgsObj || !info().isSlotAliasedAtOsr(i), def->type() == MIRType_Value); michael@0: michael@0: // Aliased slots are never accessed, since they need to go through michael@0: // the callobject. No need to type them here. michael@0: if (info().isSlotAliasedAtOsr(i)) michael@0: continue; michael@0: michael@0: def->setResultType(existing->type()); michael@0: def->setResultTypeSet(existing->resultTypeSet()); michael@0: } michael@0: michael@0: // Finish the osrBlock. michael@0: osrBlock->end(MGoto::New(alloc(), preheader)); michael@0: if (!preheader->addPredecessor(alloc(), osrBlock)) michael@0: return nullptr; michael@0: graph().setOsrBlock(osrBlock); michael@0: michael@0: // Wrap |this| with a guaranteed use, to prevent instruction elimination. michael@0: // Prevent |this| from being DCE'd: necessary for constructors. michael@0: if (info().funMaybeLazy()) michael@0: preheader->getSlot(info().thisSlot())->setGuard(); michael@0: michael@0: return preheader; michael@0: } michael@0: michael@0: MBasicBlock * michael@0: IonBuilder::newPendingLoopHeader(MBasicBlock *predecessor, jsbytecode *pc, bool osr, bool canOsr, michael@0: unsigned stackPhiCount) michael@0: { michael@0: loopDepth_++; michael@0: // If this site can OSR, all values on the expression stack are part of the loop. michael@0: if (canOsr) michael@0: stackPhiCount = predecessor->stackDepth() - info().firstStackSlot(); michael@0: MBasicBlock *block = MBasicBlock::NewPendingLoopHeader(graph(), info(), predecessor, pc, michael@0: stackPhiCount); michael@0: if (!addBlock(block, loopDepth_)) michael@0: return nullptr; michael@0: michael@0: if (osr) { michael@0: // Incorporate type information from the OSR frame into the loop michael@0: // header. The OSR frame may have unexpected types due to type changes michael@0: // within the loop body or due to incomplete profiling information, michael@0: // in which case this may avoid restarts of loop analysis or bailouts michael@0: // during the OSR itself. michael@0: michael@0: // Unbox the MOsrValue if it is known to be unboxable. michael@0: for (uint32_t i = info().startArgSlot(); i < block->stackDepth(); i++) { michael@0: michael@0: // The value of aliased args and slots are in the callobject. So we can't michael@0: // the value from the baseline frame. michael@0: if (info().isSlotAliasedAtOsr(i)) michael@0: continue; michael@0: michael@0: // Don't bother with expression stack values. The stack should be michael@0: // empty except for let variables (not Ion-compiled) or iterators. michael@0: if (i >= info().firstStackSlot()) michael@0: continue; michael@0: michael@0: MPhi *phi = block->getSlot(i)->toPhi(); michael@0: michael@0: // Get the type from the baseline frame. michael@0: types::Type existingType = types::Type::UndefinedType(); michael@0: uint32_t arg = i - info().firstArgSlot(); michael@0: uint32_t var = i - info().firstLocalSlot(); michael@0: if (info().funMaybeLazy() && i == info().thisSlot()) michael@0: existingType = baselineFrame_->thisType; michael@0: else if (arg < info().nargs()) michael@0: existingType = baselineFrame_->argTypes[arg]; michael@0: else michael@0: existingType = baselineFrame_->varTypes[var]; michael@0: michael@0: // Extract typeset from value. michael@0: types::TemporaryTypeSet *typeSet = michael@0: alloc_->lifoAlloc()->new_(existingType); michael@0: if (!typeSet) michael@0: return nullptr; michael@0: MIRType type = typeSet->getKnownMIRType(); michael@0: if (!phi->addBackedgeType(type, typeSet)) michael@0: return nullptr; michael@0: } michael@0: } michael@0: michael@0: return block; michael@0: } michael@0: michael@0: // A resume point is a mapping of stack slots to MDefinitions. It is used to michael@0: // capture the environment such that if a guard fails, and IonMonkey needs michael@0: // to exit back to the interpreter, the interpreter state can be michael@0: // reconstructed. michael@0: // michael@0: // We capture stack state at critical points: michael@0: // * (1) At the beginning of every basic block. michael@0: // * (2) After every effectful operation. michael@0: // michael@0: // As long as these two properties are maintained, instructions can michael@0: // be moved, hoisted, or, eliminated without problems, and ops without side michael@0: // effects do not need to worry about capturing state at precisely the michael@0: // right point in time. michael@0: // michael@0: // Effectful instructions, of course, need to capture state after completion, michael@0: // where the interpreter will not attempt to repeat the operation. For this, michael@0: // ResumeAfter must be used. The state is attached directly to the effectful michael@0: // instruction to ensure that no intermediate instructions could be injected michael@0: // in between by a future analysis pass. michael@0: // michael@0: // During LIR construction, if an instruction can bail back to the interpreter, michael@0: // we create an LSnapshot, which uses the last known resume point to request michael@0: // register/stack assignments for every live value. michael@0: bool michael@0: IonBuilder::resume(MInstruction *ins, jsbytecode *pc, MResumePoint::Mode mode) michael@0: { michael@0: JS_ASSERT(ins->isEffectful() || !ins->isMovable()); michael@0: michael@0: MResumePoint *resumePoint = MResumePoint::New(alloc(), ins->block(), pc, callerResumePoint_, michael@0: mode); michael@0: if (!resumePoint) michael@0: return false; michael@0: ins->setResumePoint(resumePoint); michael@0: resumePoint->setInstruction(ins); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::resumeAt(MInstruction *ins, jsbytecode *pc) michael@0: { michael@0: return resume(ins, pc, MResumePoint::ResumeAt); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::resumeAfter(MInstruction *ins) michael@0: { michael@0: return resume(ins, pc, MResumePoint::ResumeAfter); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::maybeInsertResume() michael@0: { michael@0: // Create a resume point at the current position, without an existing michael@0: // effectful instruction. This resume point is not necessary for correct michael@0: // behavior (see above), but is added to avoid holding any values from the michael@0: // previous resume point which are now dead. This shortens the live ranges michael@0: // of such values and improves register allocation. michael@0: // michael@0: // This optimization is not performed outside of loop bodies, where good michael@0: // register allocation is not as critical, in order to avoid creating michael@0: // excessive resume points. michael@0: michael@0: if (loopDepth_ == 0) michael@0: return true; michael@0: michael@0: MNop *ins = MNop::New(alloc()); michael@0: current->add(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: static bool michael@0: ClassHasEffectlessLookup(const Class *clasp, PropertyName *name) michael@0: { michael@0: return clasp->isNative() && !clasp->ops.lookupGeneric; michael@0: } michael@0: michael@0: static bool michael@0: ClassHasResolveHook(CompileCompartment *comp, const Class *clasp, PropertyName *name) michael@0: { michael@0: // While arrays do not have resolve hooks, the types of their |length| michael@0: // properties are not reflected in type information, so pretend there is a michael@0: // resolve hook for this property. michael@0: if (clasp == &ArrayObject::class_) michael@0: return name == comp->runtime()->names().length; michael@0: michael@0: if (clasp->resolve == JS_ResolveStub) michael@0: return false; michael@0: michael@0: if (clasp->resolve == (JSResolveOp)str_resolve) { michael@0: // str_resolve only resolves integers, not names. michael@0: return false; michael@0: } michael@0: michael@0: if (clasp->resolve == (JSResolveOp)fun_resolve) michael@0: return FunctionHasResolveHook(comp->runtime()->names(), name); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: IonBuilder::insertRecompileCheck() michael@0: { michael@0: // PJS doesn't recompile and doesn't need recompile checks. michael@0: if (info().executionMode() != SequentialExecution) michael@0: return; michael@0: michael@0: // No need for recompile checks if this is the highest optimization level. michael@0: OptimizationLevel curLevel = optimizationInfo().level(); michael@0: if (js_IonOptimizations.isLastLevel(curLevel)) michael@0: return; michael@0: michael@0: // Add recompile check. michael@0: michael@0: // Get the topmost builder. The topmost script will get recompiled when michael@0: // usecount is high enough to justify a higher optimization level. michael@0: IonBuilder *topBuilder = this; michael@0: while (topBuilder->callerBuilder_) michael@0: topBuilder = topBuilder->callerBuilder_; michael@0: michael@0: // Add recompile check to recompile when the usecount reaches the usecount michael@0: // of the next optimization level. michael@0: OptimizationLevel nextLevel = js_IonOptimizations.nextLevel(curLevel); michael@0: const OptimizationInfo *info = js_IonOptimizations.get(nextLevel); michael@0: uint32_t useCount = info->usesBeforeCompile(topBuilder->script()); michael@0: current->add(MRecompileCheck::New(alloc(), topBuilder->script(), useCount)); michael@0: } michael@0: michael@0: JSObject * michael@0: IonBuilder::testSingletonProperty(JSObject *obj, PropertyName *name) michael@0: { michael@0: // We would like to completely no-op property/global accesses which can michael@0: // produce only a particular JSObject. When indicating the access result is michael@0: // definitely an object, type inference does not account for the michael@0: // possibility that the property is entirely missing from the input object michael@0: // and its prototypes (if this happens, a semantic trigger would be hit and michael@0: // the pushed types updated, even if there is no type barrier). michael@0: // michael@0: // If the access definitely goes through obj, either directly or on the michael@0: // prototype chain, and the object has singleton type, then the type michael@0: // information for that property reflects the value that will definitely be michael@0: // read on accesses to the object. If the property is later deleted or michael@0: // reconfigured as a getter/setter then the type information for the michael@0: // property will change and trigger invalidation. michael@0: michael@0: while (obj) { michael@0: if (!ClassHasEffectlessLookup(obj->getClass(), name)) michael@0: return nullptr; michael@0: michael@0: types::TypeObjectKey *objType = types::TypeObjectKey::get(obj); michael@0: if (analysisContext) michael@0: objType->ensureTrackedProperty(analysisContext, NameToId(name)); michael@0: michael@0: if (objType->unknownProperties()) michael@0: return nullptr; michael@0: michael@0: types::HeapTypeSetKey property = objType->property(NameToId(name)); michael@0: if (property.isOwnProperty(constraints())) { michael@0: if (obj->hasSingletonType()) michael@0: return property.singleton(constraints()); michael@0: return nullptr; michael@0: } michael@0: michael@0: if (ClassHasResolveHook(compartment, obj->getClass(), name)) michael@0: return nullptr; michael@0: michael@0: if (!obj->hasTenuredProto()) michael@0: return nullptr; michael@0: obj = obj->getProto(); michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::testSingletonPropertyTypes(MDefinition *obj, JSObject *singleton, PropertyName *name, michael@0: bool *testObject, bool *testString) michael@0: { michael@0: // As for TestSingletonProperty, but the input is any value in a type set michael@0: // rather than a specific object. If testObject is set then the constant michael@0: // result can only be used after ensuring the input is an object. michael@0: michael@0: *testObject = false; michael@0: *testString = false; michael@0: michael@0: types::TemporaryTypeSet *types = obj->resultTypeSet(); michael@0: if (types && types->unknownObject()) michael@0: return false; michael@0: michael@0: JSObject *objectSingleton = types ? types->getSingleton() : nullptr; michael@0: if (objectSingleton) michael@0: return testSingletonProperty(objectSingleton, name) == singleton; michael@0: michael@0: JSProtoKey key; michael@0: switch (obj->type()) { michael@0: case MIRType_String: michael@0: key = JSProto_String; michael@0: break; michael@0: michael@0: case MIRType_Int32: michael@0: case MIRType_Double: michael@0: key = JSProto_Number; michael@0: break; michael@0: michael@0: case MIRType_Boolean: michael@0: key = JSProto_Boolean; michael@0: break; michael@0: michael@0: case MIRType_Object: michael@0: case MIRType_Value: { michael@0: if (!types) michael@0: return false; michael@0: michael@0: if (types->hasType(types::Type::StringType())) { michael@0: key = JSProto_String; michael@0: *testString = true; michael@0: break; michael@0: } michael@0: michael@0: if (!types->maybeObject()) michael@0: return false; michael@0: michael@0: // For property accesses which may be on many objects, we just need to michael@0: // find a prototype common to all the objects; if that prototype michael@0: // has the singleton property, the access will not be on a missing property. michael@0: for (unsigned i = 0; i < types->getObjectCount(); i++) { michael@0: types::TypeObjectKey *object = types->getObject(i); michael@0: if (!object) michael@0: continue; michael@0: if (analysisContext) michael@0: object->ensureTrackedProperty(analysisContext, NameToId(name)); michael@0: michael@0: const Class *clasp = object->clasp(); michael@0: if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name)) michael@0: return false; michael@0: if (object->unknownProperties()) michael@0: return false; michael@0: types::HeapTypeSetKey property = object->property(NameToId(name)); michael@0: if (property.isOwnProperty(constraints())) michael@0: return false; michael@0: michael@0: if (!object->hasTenuredProto()) michael@0: return false; michael@0: if (JSObject *proto = object->proto().toObjectOrNull()) { michael@0: // Test this type. michael@0: if (testSingletonProperty(proto, name) != singleton) michael@0: return false; michael@0: } else { michael@0: // Can't be on the prototype chain with no prototypes... michael@0: return false; michael@0: } michael@0: } michael@0: // If this is not a known object, a test will be needed. michael@0: *testObject = (obj->type() != MIRType_Object); michael@0: return true; michael@0: } michael@0: default: michael@0: return false; michael@0: } michael@0: michael@0: JSObject *proto = GetBuiltinPrototypePure(&script()->global(), key); michael@0: if (proto) michael@0: return testSingletonProperty(proto, name) == singleton; michael@0: michael@0: return false; michael@0: } michael@0: michael@0: // Given an observed type set, annotates the IR as much as possible: michael@0: // (1) If no type information is provided, the value on the top of the stack is michael@0: // left in place. michael@0: // (2) If a single type definitely exists, and no type barrier is needed, michael@0: // then an infallible unbox instruction replaces the value on the top of michael@0: // the stack. michael@0: // (3) If a type barrier is needed, but has an unknown type set, leave the michael@0: // value at the top of the stack. michael@0: // (4) If a type barrier is needed, and has a single type, an unbox michael@0: // instruction replaces the top of the stack. michael@0: // (5) Lastly, a type barrier instruction replaces the top of the stack. michael@0: bool michael@0: IonBuilder::pushTypeBarrier(MDefinition *def, types::TemporaryTypeSet *observed, bool needsBarrier) michael@0: { michael@0: // Barriers are never needed for instructions whose result will not be used. michael@0: if (BytecodeIsPopped(pc)) michael@0: return true; michael@0: michael@0: // If the instruction has no side effects, we'll resume the entire operation. michael@0: // The actual type barrier will occur in the interpreter. If the michael@0: // instruction is effectful, even if it has a singleton type, there michael@0: // must be a resume point capturing the original def, and resuming michael@0: // to that point will explicitly monitor the new type. michael@0: michael@0: if (!needsBarrier) { michael@0: MDefinition *replace = ensureDefiniteType(def, observed->getKnownMIRType()); michael@0: if (replace != def) { michael@0: current->pop(); michael@0: current->push(replace); michael@0: } michael@0: replace->setResultTypeSet(observed); michael@0: return true; michael@0: } michael@0: michael@0: if (observed->unknown()) michael@0: return true; michael@0: michael@0: current->pop(); michael@0: michael@0: MInstruction *barrier = MTypeBarrier::New(alloc(), def, observed); michael@0: current->add(barrier); michael@0: michael@0: if (barrier->type() == MIRType_Undefined) michael@0: return pushConstant(UndefinedValue()); michael@0: if (barrier->type() == MIRType_Null) michael@0: return pushConstant(NullValue()); michael@0: michael@0: current->push(barrier); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::pushDOMTypeBarrier(MInstruction *ins, types::TemporaryTypeSet *observed, JSFunction* func) michael@0: { michael@0: JS_ASSERT(func && func->isNative() && func->jitInfo()); michael@0: michael@0: const JSJitInfo *jitinfo = func->jitInfo(); michael@0: bool barrier = DOMCallNeedsBarrier(jitinfo, observed); michael@0: // Need to be a bit careful: if jitinfo->returnType is JSVAL_TYPE_DOUBLE but michael@0: // types->getKnownMIRType() is MIRType_Int32, then don't unconditionally michael@0: // unbox as a double. Instead, go ahead and barrier on having an int type, michael@0: // since we know we need a barrier anyway due to the type mismatch. This is michael@0: // the only situation in which TI actually has more information about the michael@0: // JSValueType than codegen can, short of jitinfo->returnType just being michael@0: // JSVAL_TYPE_UNKNOWN. michael@0: MDefinition* replace = ins; michael@0: if (jitinfo->returnType() != JSVAL_TYPE_DOUBLE || michael@0: observed->getKnownMIRType() != MIRType_Int32) { michael@0: replace = ensureDefiniteType(ins, MIRTypeFromValueType(jitinfo->returnType())); michael@0: if (replace != ins) { michael@0: current->pop(); michael@0: current->push(replace); michael@0: } michael@0: } else { michael@0: JS_ASSERT(barrier); michael@0: } michael@0: michael@0: return pushTypeBarrier(replace, observed, barrier); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::ensureDefiniteType(MDefinition *def, MIRType definiteType) michael@0: { michael@0: MInstruction *replace; michael@0: switch (definiteType) { michael@0: case MIRType_Undefined: michael@0: def->setImplicitlyUsedUnchecked(); michael@0: replace = MConstant::New(alloc(), UndefinedValue()); michael@0: break; michael@0: michael@0: case MIRType_Null: michael@0: def->setImplicitlyUsedUnchecked(); michael@0: replace = MConstant::New(alloc(), NullValue()); michael@0: break; michael@0: michael@0: case MIRType_Value: michael@0: return def; michael@0: michael@0: default: { michael@0: if (def->type() != MIRType_Value) { michael@0: JS_ASSERT(def->type() == definiteType); michael@0: return def; michael@0: } michael@0: replace = MUnbox::New(alloc(), def, definiteType, MUnbox::Infallible); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: current->add(replace); michael@0: return replace; michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::ensureDefiniteTypeSet(MDefinition *def, types::TemporaryTypeSet *types) michael@0: { michael@0: // We cannot arbitrarily add a typeset to a definition. It can be shared michael@0: // in another path. So we always need to create a new MIR. michael@0: michael@0: // Use ensureDefiniteType to do unboxing. If that happened the type can michael@0: // be added on the newly created unbox operation. michael@0: MDefinition *replace = ensureDefiniteType(def, types->getKnownMIRType()); michael@0: if (replace != def) { michael@0: replace->setResultTypeSet(types); michael@0: return replace; michael@0: } michael@0: michael@0: // Create a NOP mir instruction to filter the typeset. michael@0: MFilterTypeSet *filter = MFilterTypeSet::New(alloc(), def, types); michael@0: current->add(filter); michael@0: return filter; michael@0: } michael@0: michael@0: static size_t michael@0: NumFixedSlots(JSObject *object) michael@0: { michael@0: // Note: we can't use object->numFixedSlots() here, as this will read the michael@0: // shape and can race with the main thread if we are building off thread. michael@0: // The allocation kind and object class (which goes through the type) can michael@0: // be read freely, however. michael@0: gc::AllocKind kind = object->tenuredGetAllocKind(); michael@0: return gc::GetGCKindSlots(kind, object->getClass()); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getStaticName(JSObject *staticObject, PropertyName *name, bool *psucceeded) michael@0: { michael@0: jsid id = NameToId(name); michael@0: michael@0: JS_ASSERT(staticObject->is() || staticObject->is()); michael@0: JS_ASSERT(staticObject->hasSingletonType()); michael@0: michael@0: *psucceeded = true; michael@0: michael@0: if (staticObject->is()) { michael@0: // Optimize undefined, NaN, and Infinity. michael@0: if (name == names().undefined) michael@0: return pushConstant(UndefinedValue()); michael@0: if (name == names().NaN) michael@0: return pushConstant(compartment->runtime()->NaNValue()); michael@0: if (name == names().Infinity) michael@0: return pushConstant(compartment->runtime()->positiveInfinityValue()); michael@0: } michael@0: michael@0: types::TypeObjectKey *staticType = types::TypeObjectKey::get(staticObject); michael@0: if (analysisContext) michael@0: staticType->ensureTrackedProperty(analysisContext, NameToId(name)); michael@0: michael@0: if (staticType->unknownProperties()) { michael@0: *psucceeded = false; michael@0: return true; michael@0: } michael@0: michael@0: types::HeapTypeSetKey property = staticType->property(id); michael@0: if (!property.maybeTypes() || michael@0: !property.maybeTypes()->definiteProperty() || michael@0: property.nonData(constraints())) michael@0: { michael@0: // The property has been reconfigured as non-configurable, non-enumerable michael@0: // or non-writable. michael@0: *psucceeded = false; michael@0: return true; michael@0: } michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), staticType, michael@0: name, types, /* updateObserved = */ true); michael@0: michael@0: JSObject *singleton = types->getSingleton(); michael@0: michael@0: MIRType knownType = types->getKnownMIRType(); michael@0: if (!barrier) { michael@0: if (singleton) { michael@0: // Try to inline a known constant value. michael@0: if (testSingletonProperty(staticObject, name) == singleton) michael@0: return pushConstant(ObjectValue(*singleton)); michael@0: } michael@0: if (knownType == MIRType_Undefined) michael@0: return pushConstant(UndefinedValue()); michael@0: if (knownType == MIRType_Null) michael@0: return pushConstant(NullValue()); michael@0: } michael@0: michael@0: MInstruction *obj = constant(ObjectValue(*staticObject)); michael@0: michael@0: MIRType rvalType = types->getKnownMIRType(); michael@0: if (barrier) michael@0: rvalType = MIRType_Value; michael@0: michael@0: return loadSlot(obj, property.maybeTypes()->definiteSlot(), NumFixedSlots(staticObject), michael@0: rvalType, barrier, types); michael@0: } michael@0: michael@0: // Whether 'types' includes all possible values represented by input/inputTypes. michael@0: bool michael@0: jit::TypeSetIncludes(types::TypeSet *types, MIRType input, types::TypeSet *inputTypes) michael@0: { michael@0: if (!types) michael@0: return inputTypes && inputTypes->empty(); michael@0: michael@0: switch (input) { michael@0: case MIRType_Undefined: michael@0: case MIRType_Null: michael@0: case MIRType_Boolean: michael@0: case MIRType_Int32: michael@0: case MIRType_Double: michael@0: case MIRType_Float32: michael@0: case MIRType_String: michael@0: case MIRType_MagicOptimizedArguments: michael@0: return types->hasType(types::Type::PrimitiveType(ValueTypeFromMIRType(input))); michael@0: michael@0: case MIRType_Object: michael@0: return types->unknownObject() || (inputTypes && inputTypes->isSubset(types)); michael@0: michael@0: case MIRType_Value: michael@0: return types->unknown() || (inputTypes && inputTypes->isSubset(types)); michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Bad input type"); michael@0: } michael@0: } michael@0: michael@0: // Whether a write of the given value may need a post-write barrier for GC purposes. michael@0: bool michael@0: jit::NeedsPostBarrier(CompileInfo &info, MDefinition *value) michael@0: { michael@0: return info.executionMode() != ParallelExecution && value->mightBeType(MIRType_Object); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setStaticName(JSObject *staticObject, PropertyName *name) michael@0: { michael@0: jsid id = NameToId(name); michael@0: michael@0: JS_ASSERT(staticObject->is() || staticObject->is()); michael@0: michael@0: MDefinition *value = current->peek(-1); michael@0: michael@0: types::TypeObjectKey *staticType = types::TypeObjectKey::get(staticObject); michael@0: if (staticType->unknownProperties()) michael@0: return jsop_setprop(name); michael@0: michael@0: types::HeapTypeSetKey property = staticType->property(id); michael@0: if (!property.maybeTypes() || michael@0: !property.maybeTypes()->definiteProperty() || michael@0: property.nonData(constraints()) || michael@0: property.nonWritable(constraints())) michael@0: { michael@0: // The property has been reconfigured as non-configurable, non-enumerable michael@0: // or non-writable. michael@0: return jsop_setprop(name); michael@0: } michael@0: michael@0: if (!TypeSetIncludes(property.maybeTypes(), value->type(), value->resultTypeSet())) michael@0: return jsop_setprop(name); michael@0: michael@0: current->pop(); michael@0: michael@0: // Pop the bound object on the stack. michael@0: MDefinition *obj = current->pop(); michael@0: JS_ASSERT(&obj->toConstant()->value().toObject() == staticObject); michael@0: michael@0: if (NeedsPostBarrier(info(), value)) michael@0: current->add(MPostWriteBarrier::New(alloc(), obj, value)); michael@0: michael@0: // If the property has a known type, we may be able to optimize typed stores by not michael@0: // storing the type tag. michael@0: MIRType slotType = MIRType_None; michael@0: MIRType knownType = property.knownMIRType(constraints()); michael@0: if (knownType != MIRType_Value) michael@0: slotType = knownType; michael@0: michael@0: bool needsBarrier = property.needsBarrier(constraints()); michael@0: return storeSlot(obj, property.maybeTypes()->definiteSlot(), NumFixedSlots(staticObject), michael@0: value, needsBarrier, slotType); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getgname(PropertyName *name) michael@0: { michael@0: JSObject *obj = &script()->global(); michael@0: bool succeeded; michael@0: if (!getStaticName(obj, name, &succeeded)) michael@0: return false; michael@0: if (succeeded) michael@0: return true; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: // Spoof the stack to call into the getProp path. michael@0: // First, make sure there's room. michael@0: if (!current->ensureHasSlots(1)) michael@0: return false; michael@0: pushConstant(ObjectValue(*obj)); michael@0: if (!getPropTryCommonGetter(&succeeded, name, types)) michael@0: return false; michael@0: if (succeeded) michael@0: return true; michael@0: michael@0: // Clean up the pushed global object if we were not sucessful. michael@0: current->pop(); michael@0: return jsop_getname(name); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getname(PropertyName *name) michael@0: { michael@0: MDefinition *object; michael@0: if (js_CodeSpec[*pc].format & JOF_GNAME) { michael@0: MInstruction *global = constant(ObjectValue(script()->global())); michael@0: object = global; michael@0: } else { michael@0: current->push(current->scopeChain()); michael@0: object = current->pop(); michael@0: } michael@0: michael@0: MGetNameCache *ins; michael@0: if (JSOp(*GetNextPc(pc)) == JSOP_TYPEOF) michael@0: ins = MGetNameCache::New(alloc(), object, name, MGetNameCache::NAMETYPEOF); michael@0: else michael@0: ins = MGetNameCache::New(alloc(), object, name, MGetNameCache::NAME); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return pushTypeBarrier(ins, types, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_intrinsic(PropertyName *name) michael@0: { michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: michael@0: // If we haven't executed this opcode yet, we need to get the intrinsic michael@0: // value and monitor the result. michael@0: if (types->empty()) { michael@0: MCallGetIntrinsicValue *ins = MCallGetIntrinsicValue::New(alloc(), name); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: return pushTypeBarrier(ins, types, true); michael@0: } michael@0: michael@0: // Bake in the intrinsic. Make sure that TI agrees with us on the type. michael@0: Value vp; michael@0: JS_ALWAYS_TRUE(script()->global().maybeGetIntrinsicValue(name, &vp)); michael@0: JS_ASSERT(types->hasType(types::GetValueType(vp))); michael@0: michael@0: pushConstant(vp); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_bindname(PropertyName *name) michael@0: { michael@0: JS_ASSERT(analysis().usesScopeChain()); michael@0: michael@0: MDefinition *scopeChain = current->scopeChain(); michael@0: MBindNameCache *ins = MBindNameCache::New(alloc(), scopeChain, name, script(), pc); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: static MIRType michael@0: GetElemKnownType(bool needsHoleCheck, types::TemporaryTypeSet *types) michael@0: { michael@0: MIRType knownType = types->getKnownMIRType(); michael@0: michael@0: // Null and undefined have no payload so they can't be specialized. michael@0: // Since folding null/undefined while building SSA is not safe (see the michael@0: // comment in IsPhiObservable), we just add an untyped load instruction michael@0: // and rely on pushTypeBarrier and DCE to replace it with a null/undefined michael@0: // constant. michael@0: if (knownType == MIRType_Undefined || knownType == MIRType_Null) michael@0: knownType = MIRType_Value; michael@0: michael@0: // Different architectures may want typed element reads which require michael@0: // hole checks to be done as either value or typed reads. michael@0: if (needsHoleCheck && !LIRGenerator::allowTypedElementHoleCheck()) michael@0: knownType = MIRType_Value; michael@0: michael@0: return knownType; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getelem() michael@0: { michael@0: MDefinition *index = current->pop(); michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: // Always use a call if we are performing analysis and not actually michael@0: // emitting code, to simplify later analysis. michael@0: if (info().executionModeIsAnalysis()) { michael@0: MInstruction *ins = MCallGetElement::New(alloc(), obj, index); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return pushTypeBarrier(ins, types, true); michael@0: } michael@0: michael@0: bool emitted = false; michael@0: michael@0: if (!getElemTryTypedObject(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (!getElemTryDense(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (!getElemTryTypedStatic(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (!getElemTryTypedArray(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (!getElemTryString(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (!getElemTryArguments(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (!getElemTryArgumentsInlined(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: if (script()->argumentsHasVarBinding() && obj->mightBeType(MIRType_MagicOptimizedArguments)) michael@0: return abort("Type is not definitely lazy arguments."); michael@0: michael@0: if (!getElemTryCache(&emitted, obj, index) || emitted) michael@0: return emitted; michael@0: michael@0: // Emit call. michael@0: MInstruction *ins = MCallGetElement::New(alloc(), obj, index); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return pushTypeBarrier(ins, types, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryTypedObject(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: TypeDescrSet objDescrs; michael@0: if (!lookupTypeDescrSet(obj, &objDescrs)) michael@0: return false; michael@0: michael@0: if (!objDescrs.allOfArrayKind()) michael@0: return true; michael@0: michael@0: TypeDescrSet elemDescrs; michael@0: if (!objDescrs.arrayElementType(*this, &elemDescrs)) michael@0: return false; michael@0: if (elemDescrs.empty()) michael@0: return true; michael@0: michael@0: JS_ASSERT(TypeDescr::isSized(elemDescrs.kind())); michael@0: michael@0: int32_t elemSize; michael@0: if (!elemDescrs.allHaveSameSize(&elemSize)) michael@0: return true; michael@0: michael@0: switch (elemDescrs.kind()) { michael@0: case TypeDescr::X4: michael@0: // FIXME (bug 894105): load into a MIRType_float32x4 etc michael@0: return true; michael@0: michael@0: case TypeDescr::Struct: michael@0: case TypeDescr::SizedArray: michael@0: return getElemTryComplexElemOfTypedObject(emitted, michael@0: obj, michael@0: index, michael@0: objDescrs, michael@0: elemDescrs, michael@0: elemSize); michael@0: case TypeDescr::Scalar: michael@0: return getElemTryScalarElemOfTypedObject(emitted, michael@0: obj, michael@0: index, michael@0: objDescrs, michael@0: elemDescrs, michael@0: elemSize); michael@0: michael@0: case TypeDescr::Reference: michael@0: return true; michael@0: michael@0: case TypeDescr::UnsizedArray: michael@0: MOZ_ASSUME_UNREACHABLE("Unsized arrays cannot be element types"); michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Bad kind"); michael@0: } michael@0: michael@0: static MIRType michael@0: MIRTypeForTypedArrayRead(ScalarTypeDescr::Type arrayType, michael@0: bool observedDouble); michael@0: michael@0: bool michael@0: IonBuilder::checkTypedObjectIndexInBounds(int32_t elemSize, michael@0: MDefinition *obj, michael@0: MDefinition *index, michael@0: TypeDescrSet objDescrs, michael@0: MDefinition **indexAsByteOffset, michael@0: bool *canBeNeutered) michael@0: { michael@0: // Ensure index is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), index); michael@0: current->add(idInt32); michael@0: michael@0: // If we know the length statically from the type, just embed it. michael@0: // Otherwise, load it from the appropriate reserved slot on the michael@0: // typed object. We know it's an int32, so we can convert from michael@0: // Value to int32 using truncation. michael@0: int32_t lenOfAll; michael@0: MDefinition *length; michael@0: if (objDescrs.hasKnownArrayLength(&lenOfAll)) { michael@0: length = constantInt(lenOfAll); michael@0: michael@0: // If we are not loading the length from the object itself, michael@0: // then we still need to check if the object was neutered. michael@0: *canBeNeutered = true; michael@0: } else { michael@0: MInstruction *lengthValue = MLoadFixedSlot::New(alloc(), obj, JS_TYPEDOBJ_SLOT_LENGTH); michael@0: current->add(lengthValue); michael@0: michael@0: MInstruction *length32 = MTruncateToInt32::New(alloc(), lengthValue); michael@0: current->add(length32); michael@0: michael@0: length = length32; michael@0: michael@0: // If we are loading the length from the object itself, michael@0: // then we do not need an extra neuter check, because the length michael@0: // will have been set to 0 when the object was neutered. michael@0: *canBeNeutered = false; michael@0: } michael@0: michael@0: index = addBoundsCheck(idInt32, length); michael@0: michael@0: // Since we passed the bounds check, it is impossible for the michael@0: // result of multiplication to overflow; so enable imul path. michael@0: MMul *mul = MMul::New(alloc(), index, constantInt(elemSize), michael@0: MIRType_Int32, MMul::Integer); michael@0: current->add(mul); michael@0: michael@0: *indexAsByteOffset = mul; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryScalarElemOfTypedObject(bool *emitted, michael@0: MDefinition *obj, michael@0: MDefinition *index, michael@0: TypeDescrSet objDescrs, michael@0: TypeDescrSet elemDescrs, michael@0: int32_t elemSize) michael@0: { michael@0: JS_ASSERT(objDescrs.allOfArrayKind()); michael@0: michael@0: // Must always be loading the same scalar type michael@0: ScalarTypeDescr::Type elemType; michael@0: if (!elemDescrs.scalarType(&elemType)) michael@0: return true; michael@0: JS_ASSERT(elemSize == ScalarTypeDescr::alignment(elemType)); michael@0: michael@0: bool canBeNeutered; michael@0: MDefinition *indexAsByteOffset; michael@0: if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objDescrs, michael@0: &indexAsByteOffset, &canBeNeutered)) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: return pushScalarLoadFromTypedObject(emitted, obj, indexAsByteOffset, elemType, canBeNeutered); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::pushScalarLoadFromTypedObject(bool *emitted, michael@0: MDefinition *obj, michael@0: MDefinition *offset, michael@0: ScalarTypeDescr::Type elemType, michael@0: bool canBeNeutered) michael@0: { michael@0: int32_t size = ScalarTypeDescr::size(elemType); michael@0: JS_ASSERT(size == ScalarTypeDescr::alignment(elemType)); michael@0: michael@0: // Find location within the owner object. michael@0: MDefinition *elements, *scaledOffset; michael@0: loadTypedObjectElements(obj, offset, size, canBeNeutered, michael@0: &elements, &scaledOffset); michael@0: michael@0: // Load the element. michael@0: MLoadTypedArrayElement *load = MLoadTypedArrayElement::New(alloc(), elements, scaledOffset, elemType); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: // If we are reading in-bounds elements, we can use knowledge about michael@0: // the array type to determine the result type, even if the opcode has michael@0: // never executed. The known pushed type is only used to distinguish michael@0: // uint32 reads that may produce either doubles or integers. michael@0: types::TemporaryTypeSet *resultTypes = bytecodeTypes(pc); michael@0: bool allowDouble = resultTypes->hasType(types::Type::DoubleType()); michael@0: michael@0: // Note: knownType is not necessarily in resultTypes; e.g. if we michael@0: // have only observed integers coming out of float array. michael@0: MIRType knownType = MIRTypeForTypedArrayRead(elemType, allowDouble); michael@0: michael@0: // Note: we can ignore the type barrier here, we know the type must michael@0: // be valid and unbarriered. Also, need not set resultTypeSet, michael@0: // because knownType is scalar and a resultTypeSet would provide michael@0: // no useful additional info. michael@0: load->setResultType(knownType); michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryComplexElemOfTypedObject(bool *emitted, michael@0: MDefinition *obj, michael@0: MDefinition *index, michael@0: TypeDescrSet objDescrs, michael@0: TypeDescrSet elemDescrs, michael@0: int32_t elemSize) michael@0: { michael@0: JS_ASSERT(objDescrs.allOfArrayKind()); michael@0: michael@0: MDefinition *type = loadTypedObjectType(obj); michael@0: MDefinition *elemTypeObj = typeObjectForElementFromArrayStructType(type); michael@0: michael@0: bool canBeNeutered; michael@0: MDefinition *indexAsByteOffset; michael@0: if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objDescrs, michael@0: &indexAsByteOffset, &canBeNeutered)) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: return pushDerivedTypedObject(emitted, obj, indexAsByteOffset, michael@0: elemDescrs, elemTypeObj, canBeNeutered); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::pushDerivedTypedObject(bool *emitted, michael@0: MDefinition *obj, michael@0: MDefinition *offset, michael@0: TypeDescrSet derivedTypeDescrs, michael@0: MDefinition *derivedTypeObj, michael@0: bool canBeNeutered) michael@0: { michael@0: // Find location within the owner object. michael@0: MDefinition *owner, *ownerOffset; michael@0: loadTypedObjectData(obj, offset, canBeNeutered, &owner, &ownerOffset); michael@0: michael@0: // Create the derived typed object. michael@0: MInstruction *derivedTypedObj = MNewDerivedTypedObject::New(alloc(), michael@0: derivedTypeDescrs, michael@0: derivedTypeObj, michael@0: owner, michael@0: ownerOffset); michael@0: current->add(derivedTypedObj); michael@0: current->push(derivedTypedObj); michael@0: michael@0: // Determine (if possible) the class/proto that `derivedTypedObj` michael@0: // will have. For derived typed objects, the class (transparent vs michael@0: // opaque) will be the same as the incoming object from which the michael@0: // derived typed object is, well, derived. The prototype will be michael@0: // determined based on the type descriptor (and is immutable). michael@0: types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); michael@0: const Class *expectedClass = objTypes ? objTypes->getKnownClass() : nullptr; michael@0: JSObject *expectedProto = derivedTypeDescrs.knownPrototype(); michael@0: JS_ASSERT_IF(expectedClass, IsTypedObjectClass(expectedClass)); michael@0: michael@0: // Determine (if possible) the class/proto that the observed type set michael@0: // describes. michael@0: types::TemporaryTypeSet *observedTypes = bytecodeTypes(pc); michael@0: const Class *observedClass = observedTypes->getKnownClass(); michael@0: JSObject *observedProto = observedTypes->getCommonPrototype(); michael@0: michael@0: // If expectedClass/expectedProto are both non-null (and hence michael@0: // known), we can predict precisely what TI type object michael@0: // derivedTypedObj will have. Therefore, if we observe that this michael@0: // TI type object is already contained in the set of michael@0: // observedTypes, we can skip the barrier. michael@0: // michael@0: // Barriers still wind up being needed in some relatively michael@0: // rare cases: michael@0: // michael@0: // - if multiple kinds of typed objects flow into this point, michael@0: // in which case we will not be able to predict expectedClass michael@0: // nor expectedProto. michael@0: // michael@0: // - if the code has never executed, in which case the set of michael@0: // observed types will be incomplete. michael@0: // michael@0: // Barriers are particularly expensive here because they prevent michael@0: // us from optimizing the MNewDerivedTypedObject away. michael@0: if (observedClass && observedProto && observedClass == expectedClass && michael@0: observedProto == expectedProto) michael@0: { michael@0: derivedTypedObj->setResultTypeSet(observedTypes); michael@0: } else { michael@0: if (!pushTypeBarrier(derivedTypedObj, observedTypes, true)) michael@0: return false; michael@0: } michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryDense(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (!ElementAccessIsDenseNative(obj, index)) michael@0: return true; michael@0: michael@0: // Don't generate a fast path if there have been bounds check failures michael@0: // and this access might be on a sparse property. michael@0: if (ElementAccessHasExtraIndexedProperty(constraints(), obj) && failedBoundsCheck_) michael@0: return true; michael@0: michael@0: // Don't generate a fast path if this pc has seen negative indexes accessed, michael@0: // which will not appear to be extra indexed properties. michael@0: if (inspector->hasSeenNegativeIndexGetElement(pc)) michael@0: return true; michael@0: michael@0: // Emit dense getelem variant. michael@0: if (!jsop_getelem_dense(obj, index)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryTypedStatic(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: ScalarTypeDescr::Type arrayType; michael@0: if (!ElementAccessIsTypedArray(obj, index, &arrayType)) michael@0: return true; michael@0: michael@0: if (!LIRGenerator::allowStaticTypedArrayAccesses()) michael@0: return true; michael@0: michael@0: if (ElementAccessHasExtraIndexedProperty(constraints(), obj)) michael@0: return true; michael@0: michael@0: if (!obj->resultTypeSet()) michael@0: return true; michael@0: michael@0: JSObject *tarrObj = obj->resultTypeSet()->getSingleton(); michael@0: if (!tarrObj) michael@0: return true; michael@0: michael@0: TypedArrayObject *tarr = &tarrObj->as(); michael@0: michael@0: types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr); michael@0: if (tarrType->unknownProperties()) michael@0: return true; michael@0: michael@0: // LoadTypedArrayElementStatic currently treats uint32 arrays as int32. michael@0: ArrayBufferView::ViewType viewType = (ArrayBufferView::ViewType) tarr->type(); michael@0: if (viewType == ArrayBufferView::TYPE_UINT32) michael@0: return true; michael@0: michael@0: MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType); michael@0: if (!ptr) michael@0: return true; michael@0: michael@0: // Emit LoadTypedArrayElementStatic. michael@0: tarrType->watchStateChangeForTypedArrayData(constraints()); michael@0: michael@0: obj->setImplicitlyUsedUnchecked(); michael@0: index->setImplicitlyUsedUnchecked(); michael@0: michael@0: MLoadTypedArrayElementStatic *load = MLoadTypedArrayElementStatic::New(alloc(), tarr, ptr); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: // The load is infallible if an undefined result will be coerced to the michael@0: // appropriate numeric type if the read is out of bounds. The truncation michael@0: // analysis picks up some of these cases, but is incomplete with respect michael@0: // to others. For now, sniff the bytecode for simple patterns following michael@0: // the load which guarantee a truncation or numeric conversion. michael@0: if (viewType == ArrayBufferView::TYPE_FLOAT32 || viewType == ArrayBufferView::TYPE_FLOAT64) { michael@0: jsbytecode *next = pc + JSOP_GETELEM_LENGTH; michael@0: if (*next == JSOP_POS) michael@0: load->setInfallible(); michael@0: } else { michael@0: jsbytecode *next = pc + JSOP_GETELEM_LENGTH; michael@0: if (*next == JSOP_ZERO && *(next + JSOP_ZERO_LENGTH) == JSOP_BITOR) michael@0: load->setInfallible(); michael@0: } michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryTypedArray(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: ScalarTypeDescr::Type arrayType; michael@0: if (!ElementAccessIsTypedArray(obj, index, &arrayType)) michael@0: return true; michael@0: michael@0: // Emit typed getelem variant. michael@0: if (!jsop_getelem_typed(obj, index, arrayType)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryString(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (obj->type() != MIRType_String || !IsNumberType(index->type())) michael@0: return true; michael@0: michael@0: // If the index is expected to be out-of-bounds, don't optimize to avoid michael@0: // frequent bailouts. michael@0: if (bytecodeTypes(pc)->hasType(types::Type::UndefinedType())) michael@0: return true; michael@0: michael@0: // Emit fast path for string[index]. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), index); michael@0: current->add(idInt32); michael@0: index = idInt32; michael@0: michael@0: MStringLength *length = MStringLength::New(alloc(), obj); michael@0: current->add(length); michael@0: michael@0: index = addBoundsCheck(index, length); michael@0: michael@0: MCharCodeAt *charCode = MCharCodeAt::New(alloc(), obj, index); michael@0: current->add(charCode); michael@0: michael@0: MFromCharCode *result = MFromCharCode::New(alloc(), charCode); michael@0: current->add(result); michael@0: current->push(result); michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryArguments(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (inliningDepth_ > 0) michael@0: return true; michael@0: michael@0: if (obj->type() != MIRType_MagicOptimizedArguments) michael@0: return true; michael@0: michael@0: // Emit GetFrameArgument. michael@0: michael@0: JS_ASSERT(!info().argsObjAliasesFormals()); michael@0: michael@0: // Type Inference has guaranteed this is an optimized arguments object. michael@0: obj->setImplicitlyUsedUnchecked(); michael@0: michael@0: // To ensure that we are not looking above the number of actual arguments. michael@0: MArgumentsLength *length = MArgumentsLength::New(alloc()); michael@0: current->add(length); michael@0: michael@0: // Ensure index is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), index); michael@0: current->add(idInt32); michael@0: index = idInt32; michael@0: michael@0: // Bailouts if we read more than the number of actual arguments. michael@0: index = addBoundsCheck(index, length); michael@0: michael@0: // Load the argument from the actual arguments. michael@0: MGetFrameArgument *load = MGetFrameArgument::New(alloc(), index, analysis_.hasSetArg()); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: if (!pushTypeBarrier(load, types, true)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryArgumentsInlined(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (inliningDepth_ == 0) michael@0: return true; michael@0: michael@0: if (obj->type() != MIRType_MagicOptimizedArguments) michael@0: return true; michael@0: michael@0: // Emit inlined arguments. michael@0: obj->setImplicitlyUsedUnchecked(); michael@0: michael@0: JS_ASSERT(!info().argsObjAliasesFormals()); michael@0: michael@0: // When the id is constant, we can just return the corresponding inlined argument michael@0: if (index->isConstant() && index->toConstant()->value().isInt32()) { michael@0: JS_ASSERT(inliningDepth_ > 0); michael@0: michael@0: int32_t id = index->toConstant()->value().toInt32(); michael@0: index->setImplicitlyUsedUnchecked(); michael@0: michael@0: if (id < (int32_t)inlineCallInfo_->argc() && id >= 0) michael@0: current->push(inlineCallInfo_->getArg(id)); michael@0: else michael@0: pushConstant(UndefinedValue()); michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: // inlined not constant not supported, yet. michael@0: return abort("NYI inlined not constant get argument element"); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getElemTryCache(bool *emitted, MDefinition *obj, MDefinition *index) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: // Make sure we have at least an object. michael@0: if (!obj->mightBeType(MIRType_Object)) michael@0: return true; michael@0: michael@0: // Don't cache for strings. michael@0: if (obj->mightBeType(MIRType_String)) michael@0: return true; michael@0: michael@0: // Index should be integer or string michael@0: if (!index->mightBeType(MIRType_Int32) && !index->mightBeType(MIRType_String)) michael@0: return true; michael@0: michael@0: // Turn off cacheing if the element is int32 and we've seen non-native objects as the target michael@0: // of this getelem. michael@0: bool nonNativeGetElement = inspector->hasSeenNonNativeGetElement(pc); michael@0: if (index->mightBeType(MIRType_Int32) && nonNativeGetElement) michael@0: return true; michael@0: michael@0: // Emit GetElementCache. michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj, nullptr, types); michael@0: michael@0: // Always add a barrier if the index might be a string, so that the cache michael@0: // can attach stubs for particular properties. michael@0: if (index->mightBeType(MIRType_String)) michael@0: barrier = true; michael@0: michael@0: // See note about always needing a barrier in jsop_getprop. michael@0: if (needsToMonitorMissingProperties(types)) michael@0: barrier = true; michael@0: michael@0: MInstruction *ins = MGetElementCache::New(alloc(), obj, index, barrier); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: // Spice up type information. michael@0: if (index->type() == MIRType_Int32 && !barrier) { michael@0: bool needHoleCheck = !ElementAccessIsPacked(constraints(), obj); michael@0: MIRType knownType = GetElemKnownType(needHoleCheck, types); michael@0: michael@0: if (knownType != MIRType_Value && knownType != MIRType_Double) michael@0: ins->setResultType(knownType); michael@0: } michael@0: michael@0: if (!pushTypeBarrier(ins, types, barrier)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getelem_dense(MDefinition *obj, MDefinition *index) michael@0: { michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: michael@0: if (JSOp(*pc) == JSOP_CALLELEM && !index->mightBeType(MIRType_String)) { michael@0: // Indexed call on an element of an array. Populate the observed types michael@0: // with any objects that could be in the array, to avoid extraneous michael@0: // type barriers. michael@0: AddObjectsForPropertyRead(obj, nullptr, types); michael@0: } michael@0: michael@0: bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj, nullptr, types); michael@0: bool needsHoleCheck = !ElementAccessIsPacked(constraints(), obj); michael@0: michael@0: // Reads which are on holes in the object do not have to bail out if michael@0: // undefined values have been observed at this access site and the access michael@0: // cannot hit another indexed property on the object or its prototypes. michael@0: bool readOutOfBounds = michael@0: types->hasType(types::Type::UndefinedType()) && michael@0: !ElementAccessHasExtraIndexedProperty(constraints(), obj); michael@0: michael@0: MIRType knownType = MIRType_Value; michael@0: if (!barrier) michael@0: knownType = GetElemKnownType(needsHoleCheck, types); michael@0: michael@0: // Ensure index is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), index); michael@0: current->add(idInt32); michael@0: index = idInt32; michael@0: michael@0: // Get the elements vector. michael@0: MInstruction *elements = MElements::New(alloc(), obj); michael@0: current->add(elements); michael@0: michael@0: // Note: to help GVN, use the original MElements instruction and not michael@0: // MConvertElementsToDoubles as operand. This is fine because converting michael@0: // elements to double does not change the initialized length. michael@0: MInitializedLength *initLength = MInitializedLength::New(alloc(), elements); michael@0: current->add(initLength); michael@0: michael@0: // If we can load the element as a definite double, make sure to check that michael@0: // the array has been converted to homogenous doubles first. michael@0: // michael@0: // NB: We disable this optimization in parallel execution mode michael@0: // because it is inherently not threadsafe (how do you convert the michael@0: // array atomically when there might be concurrent readers)? michael@0: types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); michael@0: ExecutionMode executionMode = info().executionMode(); michael@0: bool loadDouble = michael@0: executionMode == SequentialExecution && michael@0: !barrier && michael@0: loopDepth_ && michael@0: !readOutOfBounds && michael@0: !needsHoleCheck && michael@0: knownType == MIRType_Double && michael@0: objTypes && michael@0: objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles; michael@0: if (loadDouble) michael@0: elements = addConvertElementsToDoubles(elements); michael@0: michael@0: MInstruction *load; michael@0: michael@0: if (!readOutOfBounds) { michael@0: // This load should not return undefined, so likely we're reading michael@0: // in-bounds elements, and the array is packed or its holes are not michael@0: // read. This is the best case: we can separate the bounds check for michael@0: // hoisting. michael@0: index = addBoundsCheck(index, initLength); michael@0: michael@0: load = MLoadElement::New(alloc(), elements, index, needsHoleCheck, loadDouble); michael@0: current->add(load); michael@0: } else { michael@0: // This load may return undefined, so assume that we *can* read holes, michael@0: // or that we can read out-of-bounds accesses. In this case, the bounds michael@0: // check is part of the opcode. michael@0: load = MLoadElementHole::New(alloc(), elements, index, initLength, needsHoleCheck); michael@0: current->add(load); michael@0: michael@0: // If maybeUndefined was true, the typeset must have undefined, and michael@0: // then either additional types or a barrier. This means we should michael@0: // never have a typed version of LoadElementHole. michael@0: JS_ASSERT(knownType == MIRType_Value); michael@0: } michael@0: michael@0: // If the array is being converted to doubles, but we've observed michael@0: // just int, substitute a type set of int+double into the observed michael@0: // type set. The reason for this is that, in the michael@0: // interpreter+baseline, such arrays may consist of mixed michael@0: // ints/doubles, but when we enter ion code, we will be coercing michael@0: // all inputs to doubles. Therefore, the type barrier checking for michael@0: // just int is highly likely (*almost* guaranteed) to fail sooner michael@0: // or later. Essentially, by eagerly coercing to double, ion is michael@0: // making the observed types outdated. To compensate for this, we michael@0: // substitute a broader observed type set consisting of both ints michael@0: // and doubles. There is perhaps a tradeoff here, so we limit this michael@0: // optimization to parallel code, where it is needed to prevent michael@0: // perpetual bailouts in some extreme cases. (Bug 977853) michael@0: // michael@0: // NB: we have not added a MConvertElementsToDoubles MIR, so we michael@0: // cannot *assume* the result is a double. michael@0: if (executionMode == ParallelExecution && michael@0: barrier && michael@0: types->getKnownMIRType() == MIRType_Int32 && michael@0: objTypes && michael@0: objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles) michael@0: { michael@0: // Note: double implies int32 as well for typesets michael@0: types = alloc_->lifoAlloc()->new_(types::Type::DoubleType()); michael@0: if (!types) michael@0: return false; michael@0: michael@0: barrier = false; // Don't need a barrier anymore michael@0: } michael@0: michael@0: if (knownType != MIRType_Value) michael@0: load->setResultType(knownType); michael@0: michael@0: current->push(load); michael@0: return pushTypeBarrier(load, types, barrier); michael@0: } michael@0: michael@0: void michael@0: IonBuilder::addTypedArrayLengthAndData(MDefinition *obj, michael@0: BoundsChecking checking, michael@0: MDefinition **index, michael@0: MInstruction **length, MInstruction **elements) michael@0: { michael@0: MOZ_ASSERT((index != nullptr) == (elements != nullptr)); michael@0: michael@0: if (obj->isConstant() && obj->toConstant()->value().isObject()) { michael@0: TypedArrayObject *tarr = &obj->toConstant()->value().toObject().as(); michael@0: void *data = tarr->viewData(); michael@0: // Bug 979449 - Optimistically embed the elements and use TI to michael@0: // invalidate if we move them. michael@0: if (!gc::IsInsideNursery(tarr->runtimeFromMainThread(), data)) { michael@0: // The 'data' pointer can change in rare circumstances michael@0: // (ArrayBufferObject::changeContents). michael@0: types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr); michael@0: if (!tarrType->unknownProperties()) { michael@0: tarrType->watchStateChangeForTypedArrayData(constraints()); michael@0: michael@0: obj->setImplicitlyUsedUnchecked(); michael@0: michael@0: int32_t len = SafeCast(tarr->length()); michael@0: *length = MConstant::New(alloc(), Int32Value(len)); michael@0: current->add(*length); michael@0: michael@0: if (index) { michael@0: if (checking == DoBoundsCheck) michael@0: *index = addBoundsCheck(*index, *length); michael@0: michael@0: *elements = MConstantElements::New(alloc(), data); michael@0: current->add(*elements); michael@0: } michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: michael@0: *length = MTypedArrayLength::New(alloc(), obj); michael@0: current->add(*length); michael@0: michael@0: if (index) { michael@0: if (checking == DoBoundsCheck) michael@0: *index = addBoundsCheck(*index, *length); michael@0: michael@0: *elements = MTypedArrayElements::New(alloc(), obj); michael@0: current->add(*elements); michael@0: } michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::convertShiftToMaskForStaticTypedArray(MDefinition *id, michael@0: ArrayBufferView::ViewType viewType) michael@0: { michael@0: // No shifting is necessary if the typed array has single byte elements. michael@0: if (TypedArrayShift(viewType) == 0) michael@0: return id; michael@0: michael@0: // If the index is an already shifted constant, undo the shift to get the michael@0: // absolute offset being accessed. michael@0: if (id->isConstant() && id->toConstant()->value().isInt32()) { michael@0: int32_t index = id->toConstant()->value().toInt32(); michael@0: MConstant *offset = MConstant::New(alloc(), Int32Value(index << TypedArrayShift(viewType))); michael@0: current->add(offset); michael@0: return offset; michael@0: } michael@0: michael@0: if (!id->isRsh() || id->isEffectful()) michael@0: return nullptr; michael@0: if (!id->getOperand(1)->isConstant()) michael@0: return nullptr; michael@0: const Value &value = id->getOperand(1)->toConstant()->value(); michael@0: if (!value.isInt32() || uint32_t(value.toInt32()) != TypedArrayShift(viewType)) michael@0: return nullptr; michael@0: michael@0: // Instead of shifting, mask off the low bits of the index so that michael@0: // a non-scaled access on the typed array can be performed. michael@0: MConstant *mask = MConstant::New(alloc(), Int32Value(~((1 << value.toInt32()) - 1))); michael@0: MBitAnd *ptr = MBitAnd::New(alloc(), id->getOperand(0), mask); michael@0: michael@0: ptr->infer(nullptr, nullptr); michael@0: JS_ASSERT(!ptr->isEffectful()); michael@0: michael@0: current->add(mask); michael@0: current->add(ptr); michael@0: michael@0: return ptr; michael@0: } michael@0: michael@0: static MIRType michael@0: MIRTypeForTypedArrayRead(ScalarTypeDescr::Type arrayType, michael@0: bool observedDouble) michael@0: { michael@0: switch (arrayType) { michael@0: case ScalarTypeDescr::TYPE_INT8: michael@0: case ScalarTypeDescr::TYPE_UINT8: michael@0: case ScalarTypeDescr::TYPE_UINT8_CLAMPED: michael@0: case ScalarTypeDescr::TYPE_INT16: michael@0: case ScalarTypeDescr::TYPE_UINT16: michael@0: case ScalarTypeDescr::TYPE_INT32: michael@0: return MIRType_Int32; michael@0: case ScalarTypeDescr::TYPE_UINT32: michael@0: return observedDouble ? MIRType_Double : MIRType_Int32; michael@0: case ScalarTypeDescr::TYPE_FLOAT32: michael@0: return (LIRGenerator::allowFloat32Optimizations()) ? MIRType_Float32 : MIRType_Double; michael@0: case ScalarTypeDescr::TYPE_FLOAT64: michael@0: return MIRType_Double; michael@0: } michael@0: MOZ_ASSUME_UNREACHABLE("Unknown typed array type"); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getelem_typed(MDefinition *obj, MDefinition *index, michael@0: ScalarTypeDescr::Type arrayType) michael@0: { michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: michael@0: bool maybeUndefined = types->hasType(types::Type::UndefinedType()); michael@0: michael@0: // Reading from an Uint32Array will result in a double for values michael@0: // that don't fit in an int32. We have to bailout if this happens michael@0: // and the instruction is not known to return a double. michael@0: bool allowDouble = types->hasType(types::Type::DoubleType()); michael@0: michael@0: // Ensure id is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), index); michael@0: current->add(idInt32); michael@0: index = idInt32; michael@0: michael@0: if (!maybeUndefined) { michael@0: // Assume the index is in range, so that we can hoist the length, michael@0: // elements vector and bounds check. michael@0: michael@0: // If we are reading in-bounds elements, we can use knowledge about michael@0: // the array type to determine the result type, even if the opcode has michael@0: // never executed. The known pushed type is only used to distinguish michael@0: // uint32 reads that may produce either doubles or integers. michael@0: MIRType knownType = MIRTypeForTypedArrayRead(arrayType, allowDouble); michael@0: michael@0: // Get length, bounds-check, then get elements, and add all instructions. michael@0: MInstruction *length; michael@0: MInstruction *elements; michael@0: addTypedArrayLengthAndData(obj, DoBoundsCheck, &index, &length, &elements); michael@0: michael@0: // Load the element. michael@0: MLoadTypedArrayElement *load = MLoadTypedArrayElement::New(alloc(), elements, index, arrayType); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: // Note: we can ignore the type barrier here, we know the type must michael@0: // be valid and unbarriered. michael@0: load->setResultType(knownType); michael@0: return true; michael@0: } else { michael@0: // We need a type barrier if the array's element type has never been michael@0: // observed (we've only read out-of-bounds values). Note that for michael@0: // Uint32Array, we only check for int32: if allowDouble is false we michael@0: // will bailout when we read a double. michael@0: bool needsBarrier = true; michael@0: switch (arrayType) { michael@0: case ScalarTypeDescr::TYPE_INT8: michael@0: case ScalarTypeDescr::TYPE_UINT8: michael@0: case ScalarTypeDescr::TYPE_UINT8_CLAMPED: michael@0: case ScalarTypeDescr::TYPE_INT16: michael@0: case ScalarTypeDescr::TYPE_UINT16: michael@0: case ScalarTypeDescr::TYPE_INT32: michael@0: case ScalarTypeDescr::TYPE_UINT32: michael@0: if (types->hasType(types::Type::Int32Type())) michael@0: needsBarrier = false; michael@0: break; michael@0: case ScalarTypeDescr::TYPE_FLOAT32: michael@0: case ScalarTypeDescr::TYPE_FLOAT64: michael@0: if (allowDouble) michael@0: needsBarrier = false; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unknown typed array type"); michael@0: } michael@0: michael@0: // Assume we will read out-of-bound values. In this case the michael@0: // bounds check will be part of the instruction, and the instruction michael@0: // will always return a Value. michael@0: MLoadTypedArrayElementHole *load = michael@0: MLoadTypedArrayElementHole::New(alloc(), obj, index, arrayType, allowDouble); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: return pushTypeBarrier(load, types, needsBarrier); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_setelem() michael@0: { michael@0: bool emitted = false; michael@0: michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *index = current->pop(); michael@0: MDefinition *object = current->pop(); michael@0: michael@0: if (!setElemTryTypedObject(&emitted, object, index, value) || emitted) michael@0: return emitted; michael@0: michael@0: if (!setElemTryTypedStatic(&emitted, object, index, value) || emitted) michael@0: return emitted; michael@0: michael@0: if (!setElemTryTypedArray(&emitted, object, index, value) || emitted) michael@0: return emitted; michael@0: michael@0: if (!setElemTryDense(&emitted, object, index, value) || emitted) michael@0: return emitted; michael@0: michael@0: if (!setElemTryArguments(&emitted, object, index, value) || emitted) michael@0: return emitted; michael@0: michael@0: if (script()->argumentsHasVarBinding() && object->mightBeType(MIRType_MagicOptimizedArguments)) michael@0: return abort("Type is not definitely lazy arguments."); michael@0: michael@0: if (!setElemTryCache(&emitted, object, index, value) || emitted) michael@0: return emitted; michael@0: michael@0: // Emit call. michael@0: MInstruction *ins = MCallSetElement::New(alloc(), object, index, value); michael@0: current->add(ins); michael@0: current->push(value); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryTypedObject(bool *emitted, MDefinition *obj, michael@0: MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: TypeDescrSet objTypeDescrs; michael@0: if (!lookupTypeDescrSet(obj, &objTypeDescrs)) michael@0: return false; michael@0: michael@0: if (!objTypeDescrs.allOfArrayKind()) michael@0: return true; michael@0: michael@0: TypeDescrSet elemTypeDescrs; michael@0: if (!objTypeDescrs.arrayElementType(*this, &elemTypeDescrs)) michael@0: return false; michael@0: if (elemTypeDescrs.empty()) michael@0: return true; michael@0: michael@0: JS_ASSERT(TypeDescr::isSized(elemTypeDescrs.kind())); michael@0: michael@0: int32_t elemSize; michael@0: if (!elemTypeDescrs.allHaveSameSize(&elemSize)) michael@0: return true; michael@0: michael@0: switch (elemTypeDescrs.kind()) { michael@0: case TypeDescr::X4: michael@0: // FIXME (bug 894105): store a MIRType_float32x4 etc michael@0: return true; michael@0: michael@0: case TypeDescr::Reference: michael@0: case TypeDescr::Struct: michael@0: case TypeDescr::SizedArray: michael@0: case TypeDescr::UnsizedArray: michael@0: // For now, only optimize storing scalars. michael@0: return true; michael@0: michael@0: case TypeDescr::Scalar: michael@0: return setElemTryScalarElemOfTypedObject(emitted, michael@0: obj, michael@0: index, michael@0: objTypeDescrs, michael@0: value, michael@0: elemTypeDescrs, michael@0: elemSize); michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Bad kind"); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryScalarElemOfTypedObject(bool *emitted, michael@0: MDefinition *obj, michael@0: MDefinition *index, michael@0: TypeDescrSet objTypeDescrs, michael@0: MDefinition *value, michael@0: TypeDescrSet elemTypeDescrs, michael@0: int32_t elemSize) michael@0: { michael@0: // Must always be loading the same scalar type michael@0: ScalarTypeDescr::Type elemType; michael@0: if (!elemTypeDescrs.scalarType(&elemType)) michael@0: return true; michael@0: JS_ASSERT(elemSize == ScalarTypeDescr::alignment(elemType)); michael@0: michael@0: bool canBeNeutered; michael@0: MDefinition *indexAsByteOffset; michael@0: if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objTypeDescrs, michael@0: &indexAsByteOffset, &canBeNeutered)) michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: // Store the element michael@0: if (!storeScalarTypedObjectValue(obj, indexAsByteOffset, elemType, canBeNeutered, false, value)) michael@0: return false; michael@0: michael@0: current->push(value); michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryTypedStatic(bool *emitted, MDefinition *object, michael@0: MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: ScalarTypeDescr::Type arrayType; michael@0: if (!ElementAccessIsTypedArray(object, index, &arrayType)) michael@0: return true; michael@0: michael@0: if (!LIRGenerator::allowStaticTypedArrayAccesses()) michael@0: return true; michael@0: michael@0: if (ElementAccessHasExtraIndexedProperty(constraints(), object)) michael@0: return true; michael@0: michael@0: if (!object->resultTypeSet()) michael@0: return true; michael@0: JSObject *tarrObj = object->resultTypeSet()->getSingleton(); michael@0: if (!tarrObj) michael@0: return true; michael@0: michael@0: TypedArrayObject *tarr = &tarrObj->as(); michael@0: michael@0: types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr); michael@0: if (tarrType->unknownProperties()) michael@0: return true; michael@0: michael@0: ArrayBufferView::ViewType viewType = (ArrayBufferView::ViewType) tarr->type(); michael@0: MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType); michael@0: if (!ptr) michael@0: return true; michael@0: michael@0: // Emit StoreTypedArrayElementStatic. michael@0: tarrType->watchStateChangeForTypedArrayData(constraints()); michael@0: michael@0: object->setImplicitlyUsedUnchecked(); michael@0: index->setImplicitlyUsedUnchecked(); michael@0: michael@0: // Clamp value to [0, 255] for Uint8ClampedArray. michael@0: MDefinition *toWrite = value; michael@0: if (viewType == ArrayBufferView::TYPE_UINT8_CLAMPED) { michael@0: toWrite = MClampToUint8::New(alloc(), value); michael@0: current->add(toWrite->toInstruction()); michael@0: } michael@0: michael@0: MInstruction *store = MStoreTypedArrayElementStatic::New(alloc(), tarr, ptr, toWrite); michael@0: current->add(store); michael@0: current->push(value); michael@0: michael@0: if (!resumeAfter(store)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryTypedArray(bool *emitted, MDefinition *object, michael@0: MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: ScalarTypeDescr::Type arrayType; michael@0: if (!ElementAccessIsTypedArray(object, index, &arrayType)) michael@0: return true; michael@0: michael@0: // Emit typed setelem variant. michael@0: if (!jsop_setelem_typed(arrayType, SetElem_Normal, object, index, value)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryDense(bool *emitted, MDefinition *object, michael@0: MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (!ElementAccessIsDenseNative(object, index)) michael@0: return true; michael@0: if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, michael@0: &object, nullptr, &value, /* canModify = */ true)) michael@0: { michael@0: return true; michael@0: } michael@0: if (!object->resultTypeSet()) michael@0: return true; michael@0: michael@0: types::TemporaryTypeSet::DoubleConversion conversion = michael@0: object->resultTypeSet()->convertDoubleElements(constraints()); michael@0: michael@0: // If AmbiguousDoubleConversion, only handle int32 values for now. michael@0: if (conversion == types::TemporaryTypeSet::AmbiguousDoubleConversion && michael@0: value->type() != MIRType_Int32) michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: // Don't generate a fast path if there have been bounds check failures michael@0: // and this access might be on a sparse property. michael@0: if (ElementAccessHasExtraIndexedProperty(constraints(), object) && failedBoundsCheck_) michael@0: return true; michael@0: michael@0: // Emit dense setelem variant. michael@0: if (!jsop_setelem_dense(conversion, SetElem_Normal, object, index, value)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryArguments(bool *emitted, MDefinition *object, michael@0: MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (object->type() != MIRType_MagicOptimizedArguments) michael@0: return true; michael@0: michael@0: // Arguments are not supported yet. michael@0: return abort("NYI arguments[]="); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setElemTryCache(bool *emitted, MDefinition *object, michael@0: MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (!object->mightBeType(MIRType_Object)) michael@0: return true; michael@0: michael@0: if (!index->mightBeType(MIRType_Int32) && !index->mightBeType(MIRType_String)) michael@0: return true; michael@0: michael@0: // TODO: Bug 876650: remove this check: michael@0: // Temporary disable the cache if non dense native, michael@0: // until the cache supports more ics michael@0: SetElemICInspector icInspect(inspector->setElemICInspector(pc)); michael@0: if (!icInspect.sawDenseWrite() && !icInspect.sawTypedArrayWrite()) michael@0: return true; michael@0: michael@0: if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, michael@0: &object, nullptr, &value, /* canModify = */ true)) michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: // We can avoid worrying about holes in the IC if we know a priori we are safe michael@0: // from them. If TI can guard that there are no indexed properties on the prototype michael@0: // chain, we know that we anen't missing any setters by overwriting the hole with michael@0: // another value. michael@0: bool guardHoles = ElementAccessHasExtraIndexedProperty(constraints(), object); michael@0: michael@0: if (NeedsPostBarrier(info(), value)) michael@0: current->add(MPostWriteBarrier::New(alloc(), object, value)); michael@0: michael@0: // Emit SetElementCache. michael@0: MInstruction *ins = MSetElementCache::New(alloc(), object, index, value, script()->strict(), guardHoles); michael@0: current->add(ins); michael@0: current->push(value); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_setelem_dense(types::TemporaryTypeSet::DoubleConversion conversion, michael@0: SetElemSafety safety, michael@0: MDefinition *obj, MDefinition *id, MDefinition *value) michael@0: { michael@0: MIRType elementType = DenseNativeElementType(constraints(), obj); michael@0: bool packed = ElementAccessIsPacked(constraints(), obj); michael@0: michael@0: // Writes which are on holes in the object do not have to bail out if they michael@0: // cannot hit another indexed property on the object or its prototypes. michael@0: bool writeOutOfBounds = !ElementAccessHasExtraIndexedProperty(constraints(), obj); michael@0: michael@0: if (NeedsPostBarrier(info(), value)) michael@0: current->add(MPostWriteBarrier::New(alloc(), obj, value)); michael@0: michael@0: // Ensure id is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), id); michael@0: current->add(idInt32); michael@0: id = idInt32; michael@0: michael@0: // Get the elements vector. michael@0: MElements *elements = MElements::New(alloc(), obj); michael@0: current->add(elements); michael@0: michael@0: // Ensure the value is a double, if double conversion might be needed. michael@0: MDefinition *newValue = value; michael@0: switch (conversion) { michael@0: case types::TemporaryTypeSet::AlwaysConvertToDoubles: michael@0: case types::TemporaryTypeSet::MaybeConvertToDoubles: { michael@0: MInstruction *valueDouble = MToDouble::New(alloc(), value); michael@0: current->add(valueDouble); michael@0: newValue = valueDouble; michael@0: break; michael@0: } michael@0: michael@0: case types::TemporaryTypeSet::AmbiguousDoubleConversion: { michael@0: JS_ASSERT(value->type() == MIRType_Int32); michael@0: MInstruction *maybeDouble = MMaybeToDoubleElement::New(alloc(), elements, value); michael@0: current->add(maybeDouble); michael@0: newValue = maybeDouble; michael@0: break; michael@0: } michael@0: michael@0: case types::TemporaryTypeSet::DontConvertToDoubles: michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unknown double conversion"); michael@0: } michael@0: michael@0: bool writeHole = false; michael@0: if (safety == SetElem_Normal) { michael@0: SetElemICInspector icInspect(inspector->setElemICInspector(pc)); michael@0: writeHole = icInspect.sawOOBDenseWrite(); michael@0: } michael@0: michael@0: // Use MStoreElementHole if this SETELEM has written to out-of-bounds michael@0: // indexes in the past. Otherwise, use MStoreElement so that we can hoist michael@0: // the initialized length and bounds check. michael@0: MStoreElementCommon *store; michael@0: if (writeHole && writeOutOfBounds) { michael@0: JS_ASSERT(safety == SetElem_Normal); michael@0: michael@0: MStoreElementHole *ins = MStoreElementHole::New(alloc(), obj, elements, id, newValue); michael@0: store = ins; michael@0: michael@0: current->add(ins); michael@0: current->push(value); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: } else { michael@0: MInitializedLength *initLength = MInitializedLength::New(alloc(), elements); michael@0: current->add(initLength); michael@0: michael@0: bool needsHoleCheck; michael@0: if (safety == SetElem_Normal) { michael@0: id = addBoundsCheck(id, initLength); michael@0: needsHoleCheck = !packed && !writeOutOfBounds; michael@0: } else { michael@0: needsHoleCheck = false; michael@0: } michael@0: michael@0: MStoreElement *ins = MStoreElement::New(alloc(), elements, id, newValue, needsHoleCheck); michael@0: store = ins; michael@0: michael@0: if (safety == SetElem_Unsafe) michael@0: ins->setRacy(); michael@0: michael@0: current->add(ins); michael@0: michael@0: if (safety == SetElem_Normal) michael@0: current->push(value); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: } michael@0: michael@0: // Determine whether a write barrier is required. michael@0: if (obj->resultTypeSet()->propertyNeedsBarrier(constraints(), JSID_VOID)) michael@0: store->setNeedsBarrier(); michael@0: michael@0: if (elementType != MIRType_None && packed) michael@0: store->setElementType(elementType); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: michael@0: bool michael@0: IonBuilder::jsop_setelem_typed(ScalarTypeDescr::Type arrayType, michael@0: SetElemSafety safety, michael@0: MDefinition *obj, MDefinition *id, MDefinition *value) michael@0: { michael@0: bool expectOOB; michael@0: if (safety == SetElem_Normal) { michael@0: SetElemICInspector icInspect(inspector->setElemICInspector(pc)); michael@0: expectOOB = icInspect.sawOOBTypedArrayWrite(); michael@0: } else { michael@0: expectOOB = false; michael@0: } michael@0: michael@0: if (expectOOB) michael@0: spew("Emitting OOB TypedArray SetElem"); michael@0: michael@0: // Ensure id is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), id); michael@0: current->add(idInt32); michael@0: id = idInt32; michael@0: michael@0: // Get length, bounds-check, then get elements, and add all instructions. michael@0: MInstruction *length; michael@0: MInstruction *elements; michael@0: BoundsChecking checking = (!expectOOB && safety == SetElem_Normal) michael@0: ? DoBoundsCheck michael@0: : SkipBoundsCheck; michael@0: addTypedArrayLengthAndData(obj, checking, &id, &length, &elements); michael@0: michael@0: // Clamp value to [0, 255] for Uint8ClampedArray. michael@0: MDefinition *toWrite = value; michael@0: if (arrayType == ScalarTypeDescr::TYPE_UINT8_CLAMPED) { michael@0: toWrite = MClampToUint8::New(alloc(), value); michael@0: current->add(toWrite->toInstruction()); michael@0: } michael@0: michael@0: // Store the value. michael@0: MInstruction *ins; michael@0: if (expectOOB) { michael@0: ins = MStoreTypedArrayElementHole::New(alloc(), elements, length, id, toWrite, arrayType); michael@0: } else { michael@0: MStoreTypedArrayElement *store = michael@0: MStoreTypedArrayElement::New(alloc(), elements, id, toWrite, arrayType); michael@0: if (safety == SetElem_Unsafe) michael@0: store->setRacy(); michael@0: ins = store; michael@0: } michael@0: michael@0: current->add(ins); michael@0: michael@0: if (safety == SetElem_Normal) michael@0: current->push(value); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_setelem_typed_object(ScalarTypeDescr::Type arrayType, michael@0: SetElemSafety safety, michael@0: bool racy, michael@0: MDefinition *object, MDefinition *index, MDefinition *value) michael@0: { michael@0: JS_ASSERT(safety == SetElem_Unsafe); // Can be fixed, but there's been no reason to as of yet michael@0: michael@0: MInstruction *int_index = MToInt32::New(alloc(), index); michael@0: current->add(int_index); michael@0: michael@0: size_t elemSize = ScalarTypeDescr::alignment(arrayType); michael@0: MMul *byteOffset = MMul::New(alloc(), int_index, constantInt(elemSize), michael@0: MIRType_Int32, MMul::Integer); michael@0: current->add(byteOffset); michael@0: michael@0: if (!storeScalarTypedObjectValue(object, byteOffset, arrayType, false, racy, value)) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_length() michael@0: { michael@0: if (jsop_length_fastPath()) michael@0: return true; michael@0: michael@0: PropertyName *name = info().getAtom(pc)->asPropertyName(); michael@0: return jsop_getprop(name); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_length_fastPath() michael@0: { michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: michael@0: if (types->getKnownMIRType() != MIRType_Int32) michael@0: return false; michael@0: michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: if (obj->mightBeType(MIRType_String)) { michael@0: if (obj->mightBeType(MIRType_Object)) michael@0: return false; michael@0: current->pop(); michael@0: MStringLength *ins = MStringLength::New(alloc(), obj); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: return true; michael@0: } michael@0: michael@0: if (obj->mightBeType(MIRType_Object)) { michael@0: types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); michael@0: michael@0: if (objTypes && michael@0: objTypes->getKnownClass() == &ArrayObject::class_ && michael@0: !objTypes->hasObjectFlags(constraints(), types::OBJECT_FLAG_LENGTH_OVERFLOW)) michael@0: { michael@0: current->pop(); michael@0: MElements *elements = MElements::New(alloc(), obj); michael@0: current->add(elements); michael@0: michael@0: // Read length. michael@0: MArrayLength *length = MArrayLength::New(alloc(), elements); michael@0: current->add(length); michael@0: current->push(length); michael@0: return true; michael@0: } michael@0: michael@0: if (objTypes && objTypes->getTypedArrayType() != ScalarTypeDescr::TYPE_MAX) { michael@0: current->pop(); michael@0: MInstruction *length = addTypedArrayLength(obj); michael@0: current->push(length); michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_arguments() michael@0: { michael@0: if (info().needsArgsObj()) { michael@0: current->push(current->argumentsObject()); michael@0: return true; michael@0: } michael@0: JS_ASSERT(lazyArguments_); michael@0: current->push(lazyArguments_); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_arguments_length() michael@0: { michael@0: // Type Inference has guaranteed this is an optimized arguments object. michael@0: MDefinition *args = current->pop(); michael@0: args->setImplicitlyUsedUnchecked(); michael@0: michael@0: // We don't know anything from the callee michael@0: if (inliningDepth_ == 0) { michael@0: MInstruction *ins = MArgumentsLength::New(alloc()); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: return true; michael@0: } michael@0: michael@0: // We are inlining and know the number of arguments the callee pushed michael@0: return pushConstant(Int32Value(inlineCallInfo_->argv().length())); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_rest() michael@0: { michael@0: JSObject *templateObject = inspector->getTemplateObject(pc); michael@0: JS_ASSERT(templateObject->is()); michael@0: michael@0: if (inliningDepth_ == 0) { michael@0: // We don't know anything about the callee. michael@0: MArgumentsLength *numActuals = MArgumentsLength::New(alloc()); michael@0: current->add(numActuals); michael@0: michael@0: // Pass in the number of actual arguments, the number of formals (not michael@0: // including the rest parameter slot itself), and the template object. michael@0: MRest *rest = MRest::New(alloc(), constraints(), numActuals, info().nargs() - 1, michael@0: templateObject); michael@0: current->add(rest); michael@0: current->push(rest); michael@0: return true; michael@0: } michael@0: michael@0: // We know the exact number of arguments the callee pushed. michael@0: unsigned numActuals = inlineCallInfo_->argv().length(); michael@0: unsigned numFormals = info().nargs() - 1; michael@0: unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0; michael@0: michael@0: MNewArray *array = MNewArray::New(alloc(), constraints(), numRest, templateObject, michael@0: templateObject->type()->initialHeap(constraints()), michael@0: MNewArray::NewArray_Allocating); michael@0: current->add(array); michael@0: michael@0: if (numRest == 0) { michael@0: // No more updating to do. (Note that in this one case the length from michael@0: // the template object is already correct.) michael@0: current->push(array); michael@0: return true; michael@0: } michael@0: michael@0: MElements *elements = MElements::New(alloc(), array); michael@0: current->add(elements); michael@0: michael@0: // Unroll the argument copy loop. We don't need to do any bounds or hole michael@0: // checking here. michael@0: MConstant *index = nullptr; michael@0: for (unsigned i = numFormals; i < numActuals; i++) { michael@0: index = MConstant::New(alloc(), Int32Value(i - numFormals)); michael@0: current->add(index); michael@0: michael@0: MDefinition *arg = inlineCallInfo_->argv()[i]; michael@0: MStoreElement *store = MStoreElement::New(alloc(), elements, index, arg, michael@0: /* needsHoleCheck = */ false); michael@0: current->add(store); michael@0: michael@0: if (NeedsPostBarrier(info(), arg)) michael@0: current->add(MPostWriteBarrier::New(alloc(), array, arg)); michael@0: } michael@0: michael@0: // The array's length is incorrectly 0 now, from the template object michael@0: // created by BaselineCompiler::emit_JSOP_REST() before the actual argument michael@0: // count was known. Set the correct length now that we know that count. michael@0: MSetArrayLength *length = MSetArrayLength::New(alloc(), elements, index); michael@0: current->add(length); michael@0: michael@0: // Update the initialized length for all the (necessarily non-hole) michael@0: // elements added. michael@0: MSetInitializedLength *initLength = MSetInitializedLength::New(alloc(), elements, index); michael@0: current->add(initLength); michael@0: michael@0: current->push(array); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getDefiniteSlot(types::TemporaryTypeSet *types, PropertyName *name, michael@0: types::HeapTypeSetKey *property) michael@0: { michael@0: if (!types || types->unknownObject() || types->getObjectCount() != 1) michael@0: return false; michael@0: michael@0: types::TypeObjectKey *type = types->getObject(0); michael@0: if (type->unknownProperties() || type->singleton()) michael@0: return false; michael@0: michael@0: jsid id = NameToId(name); michael@0: michael@0: *property = type->property(id); michael@0: return property->maybeTypes() && michael@0: property->maybeTypes()->definiteProperty() && michael@0: !property->nonData(constraints()); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_runonce() michael@0: { michael@0: MRunOncePrologue *ins = MRunOncePrologue::New(alloc()); michael@0: current->add(ins); michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_not() michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: michael@0: MNot *ins = MNot::New(alloc(), value); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: ins->infer(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::objectsHaveCommonPrototype(types::TemporaryTypeSet *types, PropertyName *name, michael@0: bool isGetter, JSObject *foundProto) michael@0: { michael@0: // With foundProto a prototype with a getter or setter for name, return michael@0: // whether looking up name on any object in |types| will go through michael@0: // foundProto, i.e. all the objects have foundProto on their prototype michael@0: // chain and do not have a property for name before reaching foundProto. michael@0: michael@0: // No sense looking if we don't know what's going on. michael@0: if (!types || types->unknownObject()) michael@0: return false; michael@0: michael@0: for (unsigned i = 0; i < types->getObjectCount(); i++) { michael@0: if (types->getSingleObject(i) == foundProto) michael@0: continue; michael@0: michael@0: types::TypeObjectKey *type = types->getObject(i); michael@0: if (!type) michael@0: continue; michael@0: michael@0: while (type) { michael@0: if (type->unknownProperties()) michael@0: return false; michael@0: michael@0: const Class *clasp = type->clasp(); michael@0: if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name)) michael@0: return false; michael@0: michael@0: // Look for a getter/setter on the class itself which may need michael@0: // to be called. Ignore the getGeneric hook for typed arrays, it michael@0: // only handles integers and forwards names to the prototype. michael@0: if (isGetter && clasp->ops.getGeneric && !IsTypedArrayClass(clasp)) michael@0: return false; michael@0: if (!isGetter && clasp->ops.setGeneric) michael@0: return false; michael@0: michael@0: // Test for isOwnProperty() without freezing. If we end up michael@0: // optimizing, freezePropertiesForCommonPropFunc will freeze the michael@0: // property type sets later on. michael@0: types::HeapTypeSetKey property = type->property(NameToId(name)); michael@0: if (types::TypeSet *types = property.maybeTypes()) { michael@0: if (!types->empty() || types->nonDataProperty()) michael@0: return false; michael@0: } michael@0: if (JSObject *obj = type->singleton()) { michael@0: if (types::CanHaveEmptyPropertyTypesForOwnProperty(obj)) michael@0: return false; michael@0: } michael@0: michael@0: if (!type->hasTenuredProto()) michael@0: return false; michael@0: JSObject *proto = type->proto().toObjectOrNull(); michael@0: if (proto == foundProto) michael@0: break; michael@0: if (!proto) { michael@0: // The foundProto being searched for did not show up on the michael@0: // object's prototype chain. michael@0: return false; michael@0: } michael@0: type = types::TypeObjectKey::get(type->proto().toObjectOrNull()); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: IonBuilder::freezePropertiesForCommonPrototype(types::TemporaryTypeSet *types, PropertyName *name, michael@0: JSObject *foundProto) michael@0: { michael@0: for (unsigned i = 0; i < types->getObjectCount(); i++) { michael@0: // If we found a Singleton object's own-property, there's nothing to michael@0: // freeze. michael@0: if (types->getSingleObject(i) == foundProto) michael@0: continue; michael@0: michael@0: types::TypeObjectKey *type = types->getObject(i); michael@0: if (!type) michael@0: continue; michael@0: michael@0: while (true) { michael@0: types::HeapTypeSetKey property = type->property(NameToId(name)); michael@0: JS_ALWAYS_TRUE(!property.isOwnProperty(constraints())); michael@0: michael@0: // Don't mark the proto. It will be held down by the shape michael@0: // guard. This allows us to use properties found on prototypes michael@0: // with properties unknown to TI. michael@0: if (type->proto() == foundProto) michael@0: break; michael@0: type = types::TypeObjectKey::get(type->proto().toObjectOrNull()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: inline MDefinition * michael@0: IonBuilder::testCommonGetterSetter(types::TemporaryTypeSet *types, PropertyName *name, michael@0: bool isGetter, JSObject *foundProto, Shape *lastProperty) michael@0: { michael@0: // Check if all objects being accessed will lookup the name through foundProto. michael@0: if (!objectsHaveCommonPrototype(types, name, isGetter, foundProto)) michael@0: return nullptr; michael@0: michael@0: // We can optimize the getter/setter, so freeze all involved properties to michael@0: // ensure there isn't a lower shadowing getter or setter installed in the michael@0: // future. michael@0: freezePropertiesForCommonPrototype(types, name, foundProto); michael@0: michael@0: // Add a shape guard on the prototype we found the property on. The rest of michael@0: // the prototype chain is guarded by TI freezes. Note that a shape guard is michael@0: // good enough here, even in the proxy case, because we have ensured there michael@0: // are no lookup hooks for this property. michael@0: MInstruction *wrapper = constant(ObjectValue(*foundProto)); michael@0: return addShapeGuard(wrapper, lastProperty, Bailout_ShapeGuard); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::annotateGetPropertyCache(MDefinition *obj, MGetPropertyCache *getPropCache, michael@0: types::TemporaryTypeSet *objTypes, michael@0: types::TemporaryTypeSet *pushedTypes) michael@0: { michael@0: PropertyName *name = getPropCache->name(); michael@0: michael@0: // Ensure every pushed value is a singleton. michael@0: if (pushedTypes->unknownObject() || pushedTypes->baseFlags() != 0) michael@0: return true; michael@0: michael@0: for (unsigned i = 0; i < pushedTypes->getObjectCount(); i++) { michael@0: if (pushedTypes->getTypeObject(i) != nullptr) michael@0: return true; michael@0: } michael@0: michael@0: // Object's typeset should be a proper object michael@0: if (!objTypes || objTypes->baseFlags() || objTypes->unknownObject()) michael@0: return true; michael@0: michael@0: unsigned int objCount = objTypes->getObjectCount(); michael@0: if (objCount == 0) michael@0: return true; michael@0: michael@0: InlinePropertyTable *inlinePropTable = getPropCache->initInlinePropertyTable(alloc(), pc); michael@0: if (!inlinePropTable) michael@0: return false; michael@0: michael@0: // Ensure that the relevant property typeset for each type object is michael@0: // is a single-object typeset containing a JSFunction michael@0: for (unsigned int i = 0; i < objCount; i++) { michael@0: types::TypeObject *baseTypeObj = objTypes->getTypeObject(i); michael@0: if (!baseTypeObj) michael@0: continue; michael@0: types::TypeObjectKey *typeObj = types::TypeObjectKey::get(baseTypeObj); michael@0: if (typeObj->unknownProperties() || !typeObj->hasTenuredProto() || !typeObj->proto().isObject()) michael@0: continue; michael@0: michael@0: const Class *clasp = typeObj->clasp(); michael@0: if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name)) michael@0: continue; michael@0: michael@0: types::HeapTypeSetKey ownTypes = typeObj->property(NameToId(name)); michael@0: if (ownTypes.isOwnProperty(constraints())) michael@0: continue; michael@0: michael@0: JSObject *singleton = testSingletonProperty(typeObj->proto().toObject(), name); michael@0: if (!singleton || !singleton->is()) michael@0: continue; michael@0: michael@0: // Don't add cases corresponding to non-observed pushes michael@0: if (!pushedTypes->hasType(types::Type::ObjectType(singleton))) michael@0: continue; michael@0: michael@0: if (!inlinePropTable->addEntry(alloc(), baseTypeObj, &singleton->as())) michael@0: return false; michael@0: } michael@0: michael@0: if (inlinePropTable->numEntries() == 0) { michael@0: getPropCache->clearInlinePropertyTable(); michael@0: return true; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: if (inlinePropTable->numEntries() > 0) michael@0: IonSpew(IonSpew_Inlining, "Annotated GetPropertyCache with %d/%d inline cases", michael@0: (int) inlinePropTable->numEntries(), (int) objCount); michael@0: #endif michael@0: michael@0: // If we successfully annotated the GetPropertyCache and there are inline cases, michael@0: // then keep a resume point of the state right before this instruction for use michael@0: // later when we have to bail out to this point in the fallback case of a michael@0: // PolyInlineDispatch. michael@0: if (inlinePropTable->numEntries() > 0) { michael@0: // Push the object back onto the stack temporarily to capture the resume point. michael@0: current->push(obj); michael@0: MResumePoint *resumePoint = MResumePoint::New(alloc(), current, pc, callerResumePoint_, michael@0: MResumePoint::ResumeAt); michael@0: if (!resumePoint) michael@0: return false; michael@0: inlinePropTable->setPriorResumePoint(resumePoint); michael@0: current->pop(); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: // Returns true if an idempotent cache has ever invalidated this script michael@0: // or an outer script. michael@0: bool michael@0: IonBuilder::invalidatedIdempotentCache() michael@0: { michael@0: IonBuilder *builder = this; michael@0: do { michael@0: if (builder->script()->invalidatedIdempotentCache()) michael@0: return true; michael@0: builder = builder->callerBuilder_; michael@0: } while (builder); michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::loadSlot(MDefinition *obj, size_t slot, size_t nfixed, MIRType rvalType, michael@0: bool barrier, types::TemporaryTypeSet *types) michael@0: { michael@0: if (slot < nfixed) { michael@0: MLoadFixedSlot *load = MLoadFixedSlot::New(alloc(), obj, slot); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: load->setResultType(rvalType); michael@0: return pushTypeBarrier(load, types, barrier); michael@0: } michael@0: michael@0: MSlots *slots = MSlots::New(alloc(), obj); michael@0: current->add(slots); michael@0: michael@0: MLoadSlot *load = MLoadSlot::New(alloc(), slots, slot - nfixed); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: load->setResultType(rvalType); michael@0: return pushTypeBarrier(load, types, barrier); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::loadSlot(MDefinition *obj, Shape *shape, MIRType rvalType, michael@0: bool barrier, types::TemporaryTypeSet *types) michael@0: { michael@0: return loadSlot(obj, shape->slot(), shape->numFixedSlots(), rvalType, barrier, types); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::storeSlot(MDefinition *obj, size_t slot, size_t nfixed, michael@0: MDefinition *value, bool needsBarrier, michael@0: MIRType slotType /* = MIRType_None */) michael@0: { michael@0: if (slot < nfixed) { michael@0: MStoreFixedSlot *store = MStoreFixedSlot::New(alloc(), obj, slot, value); michael@0: current->add(store); michael@0: current->push(value); michael@0: if (needsBarrier) michael@0: store->setNeedsBarrier(); michael@0: return resumeAfter(store); michael@0: } michael@0: michael@0: MSlots *slots = MSlots::New(alloc(), obj); michael@0: current->add(slots); michael@0: michael@0: MStoreSlot *store = MStoreSlot::New(alloc(), slots, slot - nfixed, value); michael@0: current->add(store); michael@0: current->push(value); michael@0: if (needsBarrier) michael@0: store->setNeedsBarrier(); michael@0: if (slotType != MIRType_None) michael@0: store->setSlotType(slotType); michael@0: return resumeAfter(store); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::storeSlot(MDefinition *obj, Shape *shape, MDefinition *value, bool needsBarrier, michael@0: MIRType slotType /* = MIRType_None */) michael@0: { michael@0: JS_ASSERT(shape->writable()); michael@0: return storeSlot(obj, shape->slot(), shape->numFixedSlots(), value, needsBarrier, slotType); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getprop(PropertyName *name) michael@0: { michael@0: bool emitted = false; michael@0: michael@0: // Try to optimize arguments.length. michael@0: if (!getPropTryArgumentsLength(&emitted) || emitted) michael@0: return emitted; michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), michael@0: current->peek(-1), name, types); michael@0: michael@0: // Always use a call if we are performing analysis and michael@0: // not actually emitting code, to simplify later analysis. Also skip deeper michael@0: // analysis if there are no known types for this operation, as it will michael@0: // always invalidate when executing. michael@0: if (info().executionModeIsAnalysis() || types->empty()) { michael@0: MDefinition *obj = current->peek(-1); michael@0: MCallGetProperty *call = MCallGetProperty::New(alloc(), obj, name, *pc == JSOP_CALLPROP); michael@0: current->add(call); michael@0: michael@0: // During the definite properties analysis we can still try to bake in michael@0: // constants read off the prototype chain, to allow inlining later on. michael@0: // In this case we still need the getprop call so that the later michael@0: // analysis knows when the |this| value has been read from. michael@0: if (info().executionModeIsAnalysis()) { michael@0: if (!getPropTryConstant(&emitted, name, types) || emitted) michael@0: return emitted; michael@0: } michael@0: michael@0: current->pop(); michael@0: current->push(call); michael@0: return resumeAfter(call) && pushTypeBarrier(call, types, true); michael@0: } michael@0: michael@0: // Try to hardcode known constants. michael@0: if (!getPropTryConstant(&emitted, name, types) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit loads from known binary data blocks michael@0: if (!getPropTryTypedObject(&emitted, name, types) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit loads from definite slots. michael@0: if (!getPropTryDefiniteSlot(&emitted, name, barrier, types) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to inline a common property getter, or make a call. michael@0: if (!getPropTryCommonGetter(&emitted, name, types) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit a monomorphic/polymorphic access based on baseline caches. michael@0: if (!getPropTryInlineAccess(&emitted, name, barrier, types) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit a polymorphic cache. michael@0: if (!getPropTryCache(&emitted, name, barrier, types) || emitted) michael@0: return emitted; michael@0: michael@0: // Emit a call. michael@0: MDefinition *obj = current->pop(); michael@0: MCallGetProperty *call = MCallGetProperty::New(alloc(), obj, name, *pc == JSOP_CALLPROP); michael@0: current->add(call); michael@0: current->push(call); michael@0: if (!resumeAfter(call)) michael@0: return false; michael@0: michael@0: return pushTypeBarrier(call, types, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryArgumentsLength(bool *emitted) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: if (current->peek(-1)->type() != MIRType_MagicOptimizedArguments) { michael@0: if (script()->argumentsHasVarBinding() && michael@0: current->peek(-1)->mightBeType(MIRType_MagicOptimizedArguments)) michael@0: { michael@0: return abort("Type is not definitely lazy arguments."); michael@0: } michael@0: return true; michael@0: } michael@0: if (JSOp(*pc) != JSOP_LENGTH) michael@0: return true; michael@0: michael@0: *emitted = true; michael@0: return jsop_arguments_length(); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryConstant(bool *emitted, PropertyName *name, michael@0: types::TemporaryTypeSet *types) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: JSObject *singleton = types ? types->getSingleton() : nullptr; michael@0: if (!singleton) michael@0: return true; michael@0: michael@0: bool testObject, testString; michael@0: if (!testSingletonPropertyTypes(current->peek(-1), singleton, name, &testObject, &testString)) michael@0: return true; michael@0: michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: // Property access is a known constant -- safe to emit. michael@0: JS_ASSERT(!testString || !testObject); michael@0: if (testObject) michael@0: current->add(MGuardObject::New(alloc(), obj)); michael@0: else if (testString) michael@0: current->add(MGuardString::New(alloc(), obj)); michael@0: else michael@0: obj->setImplicitlyUsedUnchecked(); michael@0: michael@0: pushConstant(ObjectValue(*singleton)); michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryTypedObject(bool *emitted, PropertyName *name, michael@0: types::TemporaryTypeSet *resultTypes) michael@0: { michael@0: TypeDescrSet fieldDescrs; michael@0: int32_t fieldOffset; michael@0: size_t fieldIndex; michael@0: if (!lookupTypedObjectField(current->peek(-1), name, &fieldOffset, michael@0: &fieldDescrs, &fieldIndex)) michael@0: return false; michael@0: if (fieldDescrs.empty()) michael@0: return true; michael@0: michael@0: switch (fieldDescrs.kind()) { michael@0: case TypeDescr::Reference: michael@0: return true; michael@0: michael@0: case TypeDescr::X4: michael@0: // FIXME (bug 894104): load into a MIRType_float32x4 etc michael@0: return true; michael@0: michael@0: case TypeDescr::Struct: michael@0: case TypeDescr::SizedArray: michael@0: return getPropTryComplexPropOfTypedObject(emitted, michael@0: fieldOffset, michael@0: fieldDescrs, michael@0: fieldIndex, michael@0: resultTypes); michael@0: michael@0: case TypeDescr::Scalar: michael@0: return getPropTryScalarPropOfTypedObject(emitted, michael@0: fieldOffset, michael@0: fieldDescrs, michael@0: resultTypes); michael@0: michael@0: case TypeDescr::UnsizedArray: michael@0: MOZ_ASSUME_UNREACHABLE("Field of unsized array type"); michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Bad kind"); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryScalarPropOfTypedObject(bool *emitted, michael@0: int32_t fieldOffset, michael@0: TypeDescrSet fieldDescrs, michael@0: types::TemporaryTypeSet *resultTypes) michael@0: { michael@0: // Must always be loading the same scalar type michael@0: ScalarTypeDescr::Type fieldType; michael@0: if (!fieldDescrs.scalarType(&fieldType)) michael@0: return true; michael@0: michael@0: // OK, perform the optimization michael@0: michael@0: MDefinition *typedObj = current->pop(); michael@0: michael@0: return pushScalarLoadFromTypedObject(emitted, typedObj, constantInt(fieldOffset), michael@0: fieldType, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryComplexPropOfTypedObject(bool *emitted, michael@0: int32_t fieldOffset, michael@0: TypeDescrSet fieldDescrs, michael@0: size_t fieldIndex, michael@0: types::TemporaryTypeSet *resultTypes) michael@0: { michael@0: // Must know the field index so that we can load the new type michael@0: // object for the derived value michael@0: if (fieldIndex == SIZE_MAX) michael@0: return true; michael@0: michael@0: // OK, perform the optimization michael@0: michael@0: MDefinition *typedObj = current->pop(); michael@0: michael@0: // Identify the type object for the field. michael@0: MDefinition *type = loadTypedObjectType(typedObj); michael@0: MDefinition *fieldTypeObj = typeObjectForFieldFromStructType(type, fieldIndex); michael@0: michael@0: return pushDerivedTypedObject(emitted, typedObj, constantInt(fieldOffset), michael@0: fieldDescrs, fieldTypeObj, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryDefiniteSlot(bool *emitted, PropertyName *name, michael@0: bool barrier, types::TemporaryTypeSet *types) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: types::HeapTypeSetKey property; michael@0: if (!getDefiniteSlot(current->peek(-1)->resultTypeSet(), name, &property)) michael@0: return true; michael@0: michael@0: MDefinition *obj = current->pop(); michael@0: MDefinition *useObj = obj; michael@0: if (obj->type() != MIRType_Object) { michael@0: MGuardObject *guard = MGuardObject::New(alloc(), obj); michael@0: current->add(guard); michael@0: useObj = guard; michael@0: } michael@0: michael@0: MLoadFixedSlot *fixed = MLoadFixedSlot::New(alloc(), useObj, property.maybeTypes()->definiteSlot()); michael@0: if (!barrier) michael@0: fixed->setResultType(types->getKnownMIRType()); michael@0: michael@0: current->add(fixed); michael@0: current->push(fixed); michael@0: michael@0: if (!pushTypeBarrier(fixed, types, barrier)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryCommonGetter(bool *emitted, PropertyName *name, michael@0: types::TemporaryTypeSet *types) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: Shape *lastProperty = nullptr; michael@0: JSFunction *commonGetter = nullptr; michael@0: JSObject *foundProto = inspector->commonGetPropFunction(pc, &lastProperty, &commonGetter); michael@0: if (!foundProto) michael@0: return true; michael@0: michael@0: types::TemporaryTypeSet *objTypes = current->peek(-1)->resultTypeSet(); michael@0: MDefinition *guard = testCommonGetterSetter(objTypes, name, /* isGetter = */ true, michael@0: foundProto, lastProperty); michael@0: if (!guard) michael@0: return true; michael@0: michael@0: bool isDOM = objTypes->isDOMClass(); michael@0: michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: if (isDOM && testShouldDOMCall(objTypes, commonGetter, JSJitInfo::Getter)) { michael@0: const JSJitInfo *jitinfo = commonGetter->jitInfo(); michael@0: MInstruction *get; michael@0: if (jitinfo->isInSlot) { michael@0: // We can't use MLoadFixedSlot here because it might not have the michael@0: // right aliasing behavior; we want to alias DOM setters. michael@0: get = MGetDOMMember::New(alloc(), jitinfo, obj, guard); michael@0: } else { michael@0: get = MGetDOMProperty::New(alloc(), jitinfo, obj, guard); michael@0: } michael@0: current->add(get); michael@0: current->push(get); michael@0: michael@0: if (get->isEffectful() && !resumeAfter(get)) michael@0: return false; michael@0: michael@0: if (!pushDOMTypeBarrier(get, types, commonGetter)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: // Don't call the getter with a primitive value. michael@0: if (objTypes->getKnownMIRType() != MIRType_Object) { michael@0: MGuardObject *guardObj = MGuardObject::New(alloc(), obj); michael@0: current->add(guardObj); michael@0: obj = guardObj; michael@0: } michael@0: michael@0: // Spoof stack to expected state for call. michael@0: michael@0: // Make sure there's enough room michael@0: if (!current->ensureHasSlots(2)) michael@0: return false; michael@0: pushConstant(ObjectValue(*commonGetter)); michael@0: michael@0: current->push(obj); michael@0: michael@0: CallInfo callInfo(alloc(), false); michael@0: if (!callInfo.init(current, 0)) michael@0: return false; michael@0: michael@0: // Inline if we can, otherwise, forget it and just generate a call. michael@0: bool inlineable = false; michael@0: if (commonGetter->isInterpreted()) { michael@0: InliningDecision decision = makeInliningDecision(commonGetter, callInfo); michael@0: switch (decision) { michael@0: case InliningDecision_Error: michael@0: return false; michael@0: case InliningDecision_DontInline: michael@0: break; michael@0: case InliningDecision_Inline: michael@0: inlineable = true; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (inlineable) { michael@0: if (!inlineScriptedCall(callInfo, commonGetter)) michael@0: return false; michael@0: } else { michael@0: if (!makeCall(commonGetter, callInfo, false)) michael@0: return false; michael@0: } michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: CanInlinePropertyOpShapes(const BaselineInspector::ShapeVector &shapes) michael@0: { michael@0: for (size_t i = 0; i < shapes.length(); i++) { michael@0: // We inline the property access as long as the shape is not in michael@0: // dictionary made. We cannot be sure that the shape is still a michael@0: // lastProperty, and calling Shape::search() on dictionary mode michael@0: // shapes that aren't lastProperty is invalid. michael@0: if (shapes[i]->inDictionary()) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryInlineAccess(bool *emitted, PropertyName *name, michael@0: bool barrier, types::TemporaryTypeSet *types) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: if (current->peek(-1)->type() != MIRType_Object) michael@0: return true; michael@0: michael@0: BaselineInspector::ShapeVector shapes(alloc()); michael@0: if (!inspector->maybeShapesForPropertyOp(pc, shapes)) michael@0: return false; michael@0: michael@0: if (shapes.empty() || !CanInlinePropertyOpShapes(shapes)) michael@0: return true; michael@0: michael@0: MIRType rvalType = types->getKnownMIRType(); michael@0: if (barrier || IsNullOrUndefined(rvalType)) michael@0: rvalType = MIRType_Value; michael@0: michael@0: MDefinition *obj = current->pop(); michael@0: if (shapes.length() == 1) { michael@0: // In the monomorphic case, use separate ShapeGuard and LoadSlot michael@0: // instructions. michael@0: spew("Inlining monomorphic GETPROP"); michael@0: michael@0: Shape *objShape = shapes[0]; michael@0: obj = addShapeGuard(obj, objShape, Bailout_ShapeGuard); michael@0: michael@0: Shape *shape = objShape->searchLinear(NameToId(name)); michael@0: JS_ASSERT(shape); michael@0: michael@0: if (!loadSlot(obj, shape, rvalType, barrier, types)) michael@0: return false; michael@0: } else { michael@0: JS_ASSERT(shapes.length() > 1); michael@0: spew("Inlining polymorphic GETPROP"); michael@0: michael@0: MGetPropertyPolymorphic *load = MGetPropertyPolymorphic::New(alloc(), obj, name); michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: for (size_t i = 0; i < shapes.length(); i++) { michael@0: Shape *objShape = shapes[i]; michael@0: Shape *shape = objShape->searchLinear(NameToId(name)); michael@0: JS_ASSERT(shape); michael@0: if (!load->addShape(objShape, shape)) michael@0: return false; michael@0: } michael@0: michael@0: if (failedShapeGuard_) michael@0: load->setNotMovable(); michael@0: michael@0: load->setResultType(rvalType); michael@0: if (!pushTypeBarrier(load, types, barrier)) michael@0: return false; michael@0: } michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::getPropTryCache(bool *emitted, PropertyName *name, michael@0: bool barrier, types::TemporaryTypeSet *types) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: MDefinition *obj = current->peek(-1); michael@0: michael@0: // The input value must either be an object, or we should have strong suspicions michael@0: // that it can be safely unboxed to an object. michael@0: if (obj->type() != MIRType_Object) { michael@0: types::TemporaryTypeSet *types = obj->resultTypeSet(); michael@0: if (!types || !types->objectOrSentinel()) michael@0: return true; michael@0: } michael@0: michael@0: // Since getters have no guaranteed return values, we must barrier in order to be michael@0: // able to attach stubs for them. michael@0: if (inspector->hasSeenAccessedGetter(pc)) michael@0: barrier = true; michael@0: michael@0: if (needsToMonitorMissingProperties(types)) michael@0: barrier = true; michael@0: michael@0: // Caches can read values from prototypes, so update the barrier to michael@0: // reflect such possible values. michael@0: if (!barrier) michael@0: barrier = PropertyReadOnPrototypeNeedsTypeBarrier(constraints(), obj, name, types); michael@0: michael@0: current->pop(); michael@0: MGetPropertyCache *load = MGetPropertyCache::New(alloc(), obj, name, barrier); michael@0: michael@0: // Try to mark the cache as idempotent. michael@0: // michael@0: // In parallel execution, idempotency of caches is ignored, since we michael@0: // repeat the entire ForkJoin workload if we bail out. Note that it's michael@0: // overly restrictive to mark everything as idempotent, because we can michael@0: // treat non-idempotent caches in parallel as repeatable. michael@0: if (obj->type() == MIRType_Object && !invalidatedIdempotentCache() && michael@0: info().executionMode() != ParallelExecution) michael@0: { michael@0: if (PropertyReadIsIdempotent(constraints(), obj, name)) michael@0: load->setIdempotent(); michael@0: } michael@0: michael@0: if (JSOp(*pc) == JSOP_CALLPROP) { michael@0: if (!annotateGetPropertyCache(obj, load, obj->resultTypeSet(), types)) michael@0: return false; michael@0: } michael@0: michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: if (load->isEffectful() && !resumeAfter(load)) michael@0: return false; michael@0: michael@0: MIRType rvalType = types->getKnownMIRType(); michael@0: if (barrier || IsNullOrUndefined(rvalType)) michael@0: rvalType = MIRType_Value; michael@0: load->setResultType(rvalType); michael@0: michael@0: if (!pushTypeBarrier(load, types, barrier)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::needsToMonitorMissingProperties(types::TemporaryTypeSet *types) michael@0: { michael@0: // GetPropertyParIC and GetElementParIC cannot safely call michael@0: // TypeScript::Monitor to ensure that the observed type set contains michael@0: // undefined. To account for possible missing properties, which property michael@0: // types do not track, we must always insert a type barrier. michael@0: return (info().executionMode() == ParallelExecution && michael@0: !types->hasType(types::Type::UndefinedType())); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_setprop(PropertyName *name) michael@0: { michael@0: MDefinition *value = current->pop(); michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: bool emitted = false; michael@0: michael@0: // Always use a call if we are doing the definite properties analysis and michael@0: // not actually emitting code, to simplify later analysis. michael@0: if (info().executionModeIsAnalysis()) { michael@0: MInstruction *ins = MCallSetProperty::New(alloc(), obj, value, name, script()->strict()); michael@0: current->add(ins); michael@0: current->push(value); michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: // Add post barrier if needed. michael@0: if (NeedsPostBarrier(info(), value)) michael@0: current->add(MPostWriteBarrier::New(alloc(), obj, value)); michael@0: michael@0: // Try to inline a common property setter, or make a call. michael@0: if (!setPropTryCommonSetter(&emitted, obj, name, value) || emitted) michael@0: return emitted; michael@0: michael@0: types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); michael@0: bool barrier = PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, &obj, name, &value, michael@0: /* canModify = */ true); michael@0: michael@0: // Try to emit stores to known binary data blocks michael@0: if (!setPropTryTypedObject(&emitted, obj, name, value) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit store from definite slots. michael@0: if (!setPropTryDefiniteSlot(&emitted, obj, name, value, barrier, objTypes) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit a monomorphic/polymorphic store based on baseline caches. michael@0: if (!setPropTryInlineAccess(&emitted, obj, name, value, barrier, objTypes) || emitted) michael@0: return emitted; michael@0: michael@0: // Try to emit a polymorphic cache. michael@0: if (!setPropTryCache(&emitted, obj, name, value, barrier, objTypes) || emitted) michael@0: return emitted; michael@0: michael@0: // Emit call. michael@0: MInstruction *ins = MCallSetProperty::New(alloc(), obj, value, name, script()->strict()); michael@0: current->add(ins); michael@0: current->push(value); michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryCommonSetter(bool *emitted, MDefinition *obj, michael@0: PropertyName *name, MDefinition *value) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: Shape *lastProperty = nullptr; michael@0: JSFunction *commonSetter = nullptr; michael@0: JSObject *foundProto = inspector->commonSetPropFunction(pc, &lastProperty, &commonSetter); michael@0: if (!foundProto) michael@0: return true; michael@0: michael@0: types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); michael@0: MDefinition *guard = testCommonGetterSetter(objTypes, name, /* isGetter = */ false, michael@0: foundProto, lastProperty); michael@0: if (!guard) michael@0: return true; michael@0: michael@0: bool isDOM = objTypes->isDOMClass(); michael@0: michael@0: // Emit common setter. michael@0: michael@0: // Setters can be called even if the property write needs a type michael@0: // barrier, as calling the setter does not actually write any data michael@0: // properties. michael@0: michael@0: // Try emitting dom call. michael@0: if (!setPropTryCommonDOMSetter(emitted, obj, value, commonSetter, isDOM)) michael@0: return false; michael@0: michael@0: if (*emitted) michael@0: return true; michael@0: michael@0: // Don't call the setter with a primitive value. michael@0: if (objTypes->getKnownMIRType() != MIRType_Object) { michael@0: MGuardObject *guardObj = MGuardObject::New(alloc(), obj); michael@0: current->add(guardObj); michael@0: obj = guardObj; michael@0: } michael@0: michael@0: // Dummy up the stack, as in getprop. We are pushing an extra value, so michael@0: // ensure there is enough space. michael@0: if (!current->ensureHasSlots(3)) michael@0: return false; michael@0: michael@0: pushConstant(ObjectValue(*commonSetter)); michael@0: michael@0: current->push(obj); michael@0: current->push(value); michael@0: michael@0: // Call the setter. Note that we have to push the original value, not michael@0: // the setter's return value. michael@0: CallInfo callInfo(alloc(), false); michael@0: if (!callInfo.init(current, 1)) michael@0: return false; michael@0: michael@0: // Ensure that we know we are calling a setter in case we inline it. michael@0: callInfo.markAsSetter(); michael@0: michael@0: // Inline the setter if we can. michael@0: if (commonSetter->isInterpreted()) { michael@0: InliningDecision decision = makeInliningDecision(commonSetter, callInfo); michael@0: switch (decision) { michael@0: case InliningDecision_Error: michael@0: return false; michael@0: case InliningDecision_DontInline: michael@0: break; michael@0: case InliningDecision_Inline: michael@0: if (!inlineScriptedCall(callInfo, commonSetter)) michael@0: return false; michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: MCall *call = makeCallHelper(commonSetter, callInfo, false); michael@0: if (!call) michael@0: return false; michael@0: michael@0: current->push(value); michael@0: if (!resumeAfter(call)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryCommonDOMSetter(bool *emitted, MDefinition *obj, michael@0: MDefinition *value, JSFunction *setter, michael@0: bool isDOM) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (!isDOM) michael@0: return true; michael@0: michael@0: types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); michael@0: if (!testShouldDOMCall(objTypes, setter, JSJitInfo::Setter)) michael@0: return true; michael@0: michael@0: // Emit SetDOMProperty. michael@0: JS_ASSERT(setter->jitInfo()->type() == JSJitInfo::Setter); michael@0: MSetDOMProperty *set = MSetDOMProperty::New(alloc(), setter->jitInfo()->setter, obj, value); michael@0: michael@0: current->add(set); michael@0: current->push(value); michael@0: michael@0: if (!resumeAfter(set)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryTypedObject(bool *emitted, MDefinition *obj, michael@0: PropertyName *name, MDefinition *value) michael@0: { michael@0: TypeDescrSet fieldDescrs; michael@0: int32_t fieldOffset; michael@0: size_t fieldIndex; michael@0: if (!lookupTypedObjectField(obj, name, &fieldOffset, &fieldDescrs, michael@0: &fieldIndex)) michael@0: return false; michael@0: if (fieldDescrs.empty()) michael@0: return true; michael@0: michael@0: switch (fieldDescrs.kind()) { michael@0: case TypeDescr::X4: michael@0: // FIXME (bug 894104): store into a MIRType_float32x4 etc michael@0: return true; michael@0: michael@0: case TypeDescr::Reference: michael@0: case TypeDescr::Struct: michael@0: case TypeDescr::SizedArray: michael@0: case TypeDescr::UnsizedArray: michael@0: // For now, only optimize storing scalars. michael@0: return true; michael@0: michael@0: case TypeDescr::Scalar: michael@0: return setPropTryScalarPropOfTypedObject(emitted, obj, fieldOffset, michael@0: value, fieldDescrs); michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Unknown kind"); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryScalarPropOfTypedObject(bool *emitted, michael@0: MDefinition *obj, michael@0: int32_t fieldOffset, michael@0: MDefinition *value, michael@0: TypeDescrSet fieldDescrs) michael@0: { michael@0: // Must always be loading the same scalar type michael@0: ScalarTypeDescr::Type fieldType; michael@0: if (!fieldDescrs.scalarType(&fieldType)) michael@0: return true; michael@0: michael@0: // OK! Perform the optimization. michael@0: michael@0: if (!storeScalarTypedObjectValue(obj, constantInt(fieldOffset), fieldType, true, false, value)) michael@0: return false; michael@0: michael@0: current->push(value); michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryDefiniteSlot(bool *emitted, MDefinition *obj, michael@0: PropertyName *name, MDefinition *value, michael@0: bool barrier, types::TemporaryTypeSet *objTypes) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (barrier) michael@0: return true; michael@0: michael@0: types::HeapTypeSetKey property; michael@0: if (!getDefiniteSlot(obj->resultTypeSet(), name, &property)) michael@0: return true; michael@0: michael@0: if (property.nonWritable(constraints())) michael@0: return true; michael@0: michael@0: MStoreFixedSlot *fixed = MStoreFixedSlot::New(alloc(), obj, property.maybeTypes()->definiteSlot(), value); michael@0: current->add(fixed); michael@0: current->push(value); michael@0: michael@0: if (property.needsBarrier(constraints())) michael@0: fixed->setNeedsBarrier(); michael@0: michael@0: if (!resumeAfter(fixed)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryInlineAccess(bool *emitted, MDefinition *obj, michael@0: PropertyName *name, michael@0: MDefinition *value, bool barrier, michael@0: types::TemporaryTypeSet *objTypes) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: if (barrier) michael@0: return true; michael@0: michael@0: BaselineInspector::ShapeVector shapes(alloc()); michael@0: if (!inspector->maybeShapesForPropertyOp(pc, shapes)) michael@0: return false; michael@0: michael@0: if (shapes.empty()) michael@0: return true; michael@0: michael@0: if (!CanInlinePropertyOpShapes(shapes)) michael@0: return true; michael@0: michael@0: if (shapes.length() == 1) { michael@0: spew("Inlining monomorphic SETPROP"); michael@0: michael@0: // The Baseline IC was monomorphic, so we inline the property access as michael@0: // long as the shape is not in dictionary mode. We cannot be sure michael@0: // that the shape is still a lastProperty, and calling Shape::search michael@0: // on dictionary mode shapes that aren't lastProperty is invalid. michael@0: Shape *objShape = shapes[0]; michael@0: obj = addShapeGuard(obj, objShape, Bailout_ShapeGuard); michael@0: michael@0: Shape *shape = objShape->searchLinear(NameToId(name)); michael@0: JS_ASSERT(shape); michael@0: michael@0: bool needsBarrier = objTypes->propertyNeedsBarrier(constraints(), NameToId(name)); michael@0: if (!storeSlot(obj, shape, value, needsBarrier)) michael@0: return false; michael@0: } else { michael@0: JS_ASSERT(shapes.length() > 1); michael@0: spew("Inlining polymorphic SETPROP"); michael@0: michael@0: MSetPropertyPolymorphic *ins = MSetPropertyPolymorphic::New(alloc(), obj, value); michael@0: current->add(ins); michael@0: current->push(value); michael@0: michael@0: for (size_t i = 0; i < shapes.length(); i++) { michael@0: Shape *objShape = shapes[i]; michael@0: Shape *shape = objShape->searchLinear(NameToId(name)); michael@0: JS_ASSERT(shape); michael@0: if (!ins->addShape(objShape, shape)) michael@0: return false; michael@0: } michael@0: michael@0: if (objTypes->propertyNeedsBarrier(constraints(), NameToId(name))) michael@0: ins->setNeedsBarrier(); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: } michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::setPropTryCache(bool *emitted, MDefinition *obj, michael@0: PropertyName *name, MDefinition *value, michael@0: bool barrier, types::TemporaryTypeSet *objTypes) michael@0: { michael@0: JS_ASSERT(*emitted == false); michael@0: michael@0: // Emit SetPropertyCache. michael@0: MSetPropertyCache *ins = MSetPropertyCache::New(alloc(), obj, value, name, script()->strict(), barrier); michael@0: michael@0: if (!objTypes || objTypes->propertyNeedsBarrier(constraints(), NameToId(name))) michael@0: ins->setNeedsBarrier(); michael@0: michael@0: current->add(ins); michael@0: current->push(value); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: *emitted = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_delprop(PropertyName *name) michael@0: { michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: MInstruction *ins = MDeleteProperty::New(alloc(), obj, name); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_delelem() michael@0: { michael@0: MDefinition *index = current->pop(); michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: MDeleteElement *ins = MDeleteElement::New(alloc(), obj, index); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_regexp(RegExpObject *reobj) michael@0: { michael@0: // JS semantics require regular expression literals to create different michael@0: // objects every time they execute. We only need to do this cloning if the michael@0: // script could actually observe the effect of such cloning, for instance michael@0: // by getting or setting properties on it. michael@0: // michael@0: // First, make sure the regex is one we can safely optimize. Lowering can michael@0: // then check if this regex object only flows into known natives and can michael@0: // avoid cloning in this case. michael@0: michael@0: bool mustClone = true; michael@0: types::TypeObjectKey *typeObj = types::TypeObjectKey::get(&script()->global()); michael@0: if (!typeObj->hasFlags(constraints(), types::OBJECT_FLAG_REGEXP_FLAGS_SET)) { michael@0: RegExpStatics *res = script()->global().getRegExpStatics(); michael@0: michael@0: DebugOnly origFlags = reobj->getFlags(); michael@0: DebugOnly staticsFlags = res->getFlags(); michael@0: JS_ASSERT((origFlags & staticsFlags) == staticsFlags); michael@0: michael@0: if (!reobj->global() && !reobj->sticky()) michael@0: mustClone = false; michael@0: } michael@0: michael@0: MRegExp *regexp = MRegExp::New(alloc(), constraints(), reobj, mustClone); michael@0: current->add(regexp); michael@0: current->push(regexp); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_object(JSObject *obj) michael@0: { michael@0: if (options.cloneSingletons()) { michael@0: MCloneLiteral *clone = MCloneLiteral::New(alloc(), constant(ObjectValue(*obj))); michael@0: current->add(clone); michael@0: current->push(clone); michael@0: return resumeAfter(clone); michael@0: } michael@0: michael@0: compartment->setSingletonsAsValues(); michael@0: pushConstant(ObjectValue(*obj)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_lambda(JSFunction *fun) michael@0: { michael@0: MOZ_ASSERT(analysis().usesScopeChain()); michael@0: MOZ_ASSERT(!fun->isArrow()); michael@0: michael@0: if (fun->isNative() && IsAsmJSModuleNative(fun->native())) michael@0: return abort("asm.js module function"); michael@0: michael@0: MLambda *ins = MLambda::New(alloc(), constraints(), current->scopeChain(), fun); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_lambda_arrow(JSFunction *fun) michael@0: { michael@0: MOZ_ASSERT(analysis().usesScopeChain()); michael@0: MOZ_ASSERT(fun->isArrow()); michael@0: MOZ_ASSERT(!fun->isNative()); michael@0: michael@0: MDefinition *thisDef = current->pop(); michael@0: michael@0: MLambdaArrow *ins = MLambdaArrow::New(alloc(), constraints(), current->scopeChain(), michael@0: thisDef, fun); michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_setarg(uint32_t arg) michael@0: { michael@0: // To handle this case, we should spill the arguments to the space where michael@0: // actual arguments are stored. The tricky part is that if we add a MIR michael@0: // to wrap the spilling action, we don't want the spilling to be michael@0: // captured by the GETARG and by the resume point, only by michael@0: // MGetFrameArgument. michael@0: JS_ASSERT(analysis_.hasSetArg()); michael@0: MDefinition *val = current->peek(-1); michael@0: michael@0: // If an arguments object is in use, and it aliases formals, then all SETARGs michael@0: // must go through the arguments object. michael@0: if (info().argsObjAliasesFormals()) { michael@0: if (NeedsPostBarrier(info(), val)) michael@0: current->add(MPostWriteBarrier::New(alloc(), current->argumentsObject(), val)); michael@0: current->add(MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(), michael@0: GET_ARGNO(pc), val)); michael@0: return true; michael@0: } michael@0: michael@0: // :TODO: if hasArguments() is true, and the script has a JSOP_SETARG, then michael@0: // convert all arg accesses to go through the arguments object. (see Bug 957475) michael@0: if (info().hasArguments()) michael@0: return abort("NYI: arguments & setarg."); michael@0: michael@0: // Otherwise, if a magic arguments is in use, and it aliases formals, and there exist michael@0: // arguments[...] GETELEM expressions in the script, then SetFrameArgument must be used. michael@0: // If no arguments[...] GETELEM expressions are in the script, and an argsobj is not michael@0: // required, then it means that any aliased argument set can never be observed, and michael@0: // the frame does not actually need to be updated with the new arg value. michael@0: if (info().argumentsAliasesFormals()) { michael@0: // JSOP_SETARG with magic arguments within inline frames is not yet supported. michael@0: JS_ASSERT(script()->uninlineable() && !isInlineBuilder()); michael@0: michael@0: MSetFrameArgument *store = MSetFrameArgument::New(alloc(), arg, val); michael@0: modifiesFrameArguments_ = true; michael@0: current->add(store); michael@0: current->setArg(arg); michael@0: return true; michael@0: } michael@0: michael@0: // If this assignment is at the start of the function and is coercing michael@0: // the original value for the argument which was passed in, loosen michael@0: // the type information for that original argument if it is currently michael@0: // empty due to originally executing in the interpreter. michael@0: if (graph().numBlocks() == 1 && michael@0: (val->isBitOr() || val->isBitAnd() || val->isMul() /* for JSOP_POS */)) michael@0: { michael@0: for (size_t i = 0; i < val->numOperands(); i++) { michael@0: MDefinition *op = val->getOperand(i); michael@0: if (op->isParameter() && michael@0: op->toParameter()->index() == (int32_t)arg && michael@0: op->resultTypeSet() && michael@0: op->resultTypeSet()->empty()) michael@0: { michael@0: bool otherUses = false; michael@0: for (MUseDefIterator iter(op); iter; iter++) { michael@0: MDefinition *def = iter.def(); michael@0: if (def == val) michael@0: continue; michael@0: otherUses = true; michael@0: } michael@0: if (!otherUses) { michael@0: JS_ASSERT(op->resultTypeSet() == &argTypes[arg]); michael@0: argTypes[arg].addType(types::Type::UnknownType(), alloc_->lifoAlloc()); michael@0: if (val->isMul()) { michael@0: val->setResultType(MIRType_Double); michael@0: val->toMul()->setSpecialization(MIRType_Double); michael@0: } else { michael@0: JS_ASSERT(val->type() == MIRType_Int32); michael@0: } michael@0: val->setResultTypeSet(nullptr); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: current->setArg(arg); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_defvar(uint32_t index) michael@0: { michael@0: JS_ASSERT(JSOp(*pc) == JSOP_DEFVAR || JSOp(*pc) == JSOP_DEFCONST); michael@0: michael@0: PropertyName *name = script()->getName(index); michael@0: michael@0: // Bake in attrs. michael@0: unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT; michael@0: if (JSOp(*pc) == JSOP_DEFCONST) michael@0: attrs |= JSPROP_READONLY; michael@0: michael@0: // Pass the ScopeChain. michael@0: JS_ASSERT(analysis().usesScopeChain()); michael@0: michael@0: // Bake the name pointer into the MDefVar. michael@0: MDefVar *defvar = MDefVar::New(alloc(), name, attrs, current->scopeChain()); michael@0: current->add(defvar); michael@0: michael@0: return resumeAfter(defvar); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_deffun(uint32_t index) michael@0: { michael@0: JSFunction *fun = script()->getFunction(index); michael@0: if (fun->isNative() && IsAsmJSModuleNative(fun->native())) michael@0: return abort("asm.js module function"); michael@0: michael@0: JS_ASSERT(analysis().usesScopeChain()); michael@0: michael@0: MDefFun *deffun = MDefFun::New(alloc(), fun, current->scopeChain()); michael@0: current->add(deffun); michael@0: michael@0: return resumeAfter(deffun); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_this() michael@0: { michael@0: if (!info().funMaybeLazy()) michael@0: return abort("JSOP_THIS outside of a JSFunction."); michael@0: michael@0: if (info().funMaybeLazy()->isArrow()) { michael@0: // Arrow functions store their lexical |this| in an extended slot. michael@0: MLoadArrowThis *thisObj = MLoadArrowThis::New(alloc(), getCallee()); michael@0: current->add(thisObj); michael@0: current->push(thisObj); michael@0: return true; michael@0: } michael@0: michael@0: if (script()->strict() || info().funMaybeLazy()->isSelfHostedBuiltin()) { michael@0: // No need to wrap primitive |this| in strict mode or self-hosted code. michael@0: current->pushSlot(info().thisSlot()); michael@0: return true; michael@0: } michael@0: michael@0: if (thisTypes->getKnownMIRType() == MIRType_Object || michael@0: (thisTypes->empty() && baselineFrame_ && baselineFrame_->thisType.isSomeObject())) michael@0: { michael@0: // This is safe, because if the entry type of |this| is an object, it michael@0: // will necessarily be an object throughout the entire function. OSR michael@0: // can introduce a phi, but this phi will be specialized. michael@0: current->pushSlot(info().thisSlot()); michael@0: return true; michael@0: } michael@0: michael@0: // If we are doing an analysis, we might not yet know the type of |this|. michael@0: // Instead of bailing out just push the |this| slot, as this code won't michael@0: // actually execute and it does not matter whether |this| is primitive. michael@0: if (info().executionModeIsAnalysis()) { michael@0: current->pushSlot(info().thisSlot()); michael@0: return true; michael@0: } michael@0: michael@0: // Hard case: |this| may be a primitive we have to wrap. michael@0: MDefinition *def = current->getSlot(info().thisSlot()); michael@0: michael@0: if (def->type() == MIRType_Object) { michael@0: // If we already computed a |this| object, we can reuse it. michael@0: current->push(def); michael@0: return true; michael@0: } michael@0: michael@0: MComputeThis *thisObj = MComputeThis::New(alloc(), def); michael@0: current->add(thisObj); michael@0: current->push(thisObj); michael@0: michael@0: current->setSlot(info().thisSlot(), thisObj); michael@0: michael@0: return resumeAfter(thisObj); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_typeof() michael@0: { michael@0: MDefinition *input = current->pop(); michael@0: MTypeOf *ins = MTypeOf::New(alloc(), input, input->type()); michael@0: michael@0: ins->infer(); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_toid() michael@0: { michael@0: // No-op if the index is an integer. michael@0: if (current->peek(-1)->type() == MIRType_Int32) michael@0: return true; michael@0: michael@0: MDefinition *index = current->pop(); michael@0: MToId *ins = MToId::New(alloc(), current->peek(-1), index); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_iter(uint8_t flags) michael@0: { michael@0: if (flags != JSITER_ENUMERATE) michael@0: nonStringIteration_ = true; michael@0: michael@0: MDefinition *obj = current->pop(); michael@0: MInstruction *ins = MIteratorStart::New(alloc(), obj, flags); michael@0: michael@0: if (!iterators_.append(ins)) michael@0: return false; michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_iternext() michael@0: { michael@0: MDefinition *iter = current->peek(-1); michael@0: MInstruction *ins = MIteratorNext::New(alloc(), iter); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: if (!resumeAfter(ins)) michael@0: return false; michael@0: michael@0: if (!nonStringIteration_ && !inspector->hasSeenNonStringIterNext(pc)) { michael@0: ins = MUnbox::New(alloc(), ins, MIRType_String, MUnbox::Fallible, Bailout_BaselineInfo); michael@0: current->add(ins); michael@0: current->rewriteAtDepth(-1, ins); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_itermore() michael@0: { michael@0: MDefinition *iter = current->peek(-1); michael@0: MInstruction *ins = MIteratorMore::New(alloc(), iter); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_iterend() michael@0: { michael@0: MDefinition *iter = current->pop(); michael@0: MInstruction *ins = MIteratorEnd::New(alloc(), iter); michael@0: michael@0: current->add(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::walkScopeChain(unsigned hops) michael@0: { michael@0: MDefinition *scope = current->getSlot(info().scopeChainSlot()); michael@0: michael@0: for (unsigned i = 0; i < hops; i++) { michael@0: MInstruction *ins = MEnclosingScope::New(alloc(), scope); michael@0: current->add(ins); michael@0: scope = ins; michael@0: } michael@0: michael@0: return scope; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::hasStaticScopeObject(ScopeCoordinate sc, JSObject **pcall) michael@0: { michael@0: JSScript *outerScript = ScopeCoordinateFunctionScript(script(), pc); michael@0: if (!outerScript || !outerScript->treatAsRunOnce()) michael@0: return false; michael@0: michael@0: types::TypeObjectKey *funType = michael@0: types::TypeObjectKey::get(outerScript->functionNonDelazifying()); michael@0: if (funType->hasFlags(constraints(), types::OBJECT_FLAG_RUNONCE_INVALIDATED)) michael@0: return false; michael@0: michael@0: // The script this aliased var operation is accessing will run only once, michael@0: // so there will be only one call object and the aliased var access can be michael@0: // compiled in the same manner as a global access. We still need to find michael@0: // the call object though. michael@0: michael@0: // Look for the call object on the current script's function's scope chain. michael@0: // If the current script is inner to the outer script and the function has michael@0: // singleton type then it should show up here. michael@0: michael@0: MDefinition *scope = current->getSlot(info().scopeChainSlot()); michael@0: scope->setImplicitlyUsedUnchecked(); michael@0: michael@0: JSObject *environment = script()->functionNonDelazifying()->environment(); michael@0: while (environment && !environment->is()) { michael@0: if (environment->is() && michael@0: !environment->as().isForEval() && michael@0: environment->as().callee().nonLazyScript() == outerScript) michael@0: { michael@0: JS_ASSERT(environment->hasSingletonType()); michael@0: *pcall = environment; michael@0: return true; michael@0: } michael@0: environment = environment->enclosingScope(); michael@0: } michael@0: michael@0: // Look for the call object on the current frame, if we are compiling the michael@0: // outer script itself. Don't do this if we are at entry to the outer michael@0: // script, as the call object we see will not be the real one --- after michael@0: // entering the Ion code a different call object will be created. michael@0: michael@0: if (script() == outerScript && baselineFrame_ && info().osrPc()) { michael@0: JSObject *singletonScope = baselineFrame_->singletonScopeChain; michael@0: if (singletonScope && michael@0: singletonScope->is() && michael@0: singletonScope->as().callee().nonLazyScript() == outerScript) michael@0: { michael@0: JS_ASSERT(singletonScope->hasSingletonType()); michael@0: *pcall = singletonScope; michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_getaliasedvar(ScopeCoordinate sc) michael@0: { michael@0: JSObject *call = nullptr; michael@0: if (hasStaticScopeObject(sc, &call) && call) { michael@0: PropertyName *name = ScopeCoordinateName(scopeCoordinateNameCache, script(), pc); michael@0: bool succeeded; michael@0: if (!getStaticName(call, name, &succeeded)) michael@0: return false; michael@0: if (succeeded) michael@0: return true; michael@0: } michael@0: michael@0: MDefinition *obj = walkScopeChain(sc.hops()); michael@0: michael@0: Shape *shape = ScopeCoordinateToStaticScopeShape(script(), pc); michael@0: michael@0: MInstruction *load; michael@0: if (shape->numFixedSlots() <= sc.slot()) { michael@0: MInstruction *slots = MSlots::New(alloc(), obj); michael@0: current->add(slots); michael@0: michael@0: load = MLoadSlot::New(alloc(), slots, sc.slot() - shape->numFixedSlots()); michael@0: } else { michael@0: load = MLoadFixedSlot::New(alloc(), obj, sc.slot()); michael@0: } michael@0: michael@0: current->add(load); michael@0: current->push(load); michael@0: michael@0: types::TemporaryTypeSet *types = bytecodeTypes(pc); michael@0: return pushTypeBarrier(load, types, true); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_setaliasedvar(ScopeCoordinate sc) michael@0: { michael@0: JSObject *call = nullptr; michael@0: if (hasStaticScopeObject(sc, &call)) { michael@0: uint32_t depth = current->stackDepth() + 1; michael@0: if (depth > current->nslots()) { michael@0: if (!current->increaseSlots(depth - current->nslots())) michael@0: return false; michael@0: } michael@0: MDefinition *value = current->pop(); michael@0: PropertyName *name = ScopeCoordinateName(scopeCoordinateNameCache, script(), pc); michael@0: michael@0: if (call) { michael@0: // Push the object on the stack to match the bound object expected in michael@0: // the global and property set cases. michael@0: pushConstant(ObjectValue(*call)); michael@0: current->push(value); michael@0: return setStaticName(call, name); michael@0: } michael@0: michael@0: // The call object has type information we need to respect but we michael@0: // couldn't find it. Just do a normal property assign. michael@0: MDefinition *obj = walkScopeChain(sc.hops()); michael@0: current->push(obj); michael@0: current->push(value); michael@0: return jsop_setprop(name); michael@0: } michael@0: michael@0: MDefinition *rval = current->peek(-1); michael@0: MDefinition *obj = walkScopeChain(sc.hops()); michael@0: michael@0: Shape *shape = ScopeCoordinateToStaticScopeShape(script(), pc); michael@0: michael@0: if (NeedsPostBarrier(info(), rval)) michael@0: current->add(MPostWriteBarrier::New(alloc(), obj, rval)); michael@0: michael@0: MInstruction *store; michael@0: if (shape->numFixedSlots() <= sc.slot()) { michael@0: MInstruction *slots = MSlots::New(alloc(), obj); michael@0: current->add(slots); michael@0: michael@0: store = MStoreSlot::NewBarriered(alloc(), slots, sc.slot() - shape->numFixedSlots(), rval); michael@0: } else { michael@0: store = MStoreFixedSlot::NewBarriered(alloc(), obj, sc.slot(), rval); michael@0: } michael@0: michael@0: current->add(store); michael@0: return resumeAfter(store); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_in() michael@0: { michael@0: MDefinition *obj = current->peek(-1); michael@0: MDefinition *id = current->peek(-2); michael@0: michael@0: if (ElementAccessIsDenseNative(obj, id) && michael@0: !ElementAccessHasExtraIndexedProperty(constraints(), obj)) michael@0: { michael@0: return jsop_in_dense(); michael@0: } michael@0: michael@0: current->pop(); michael@0: current->pop(); michael@0: MIn *ins = MIn::New(alloc(), id, obj); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_in_dense() michael@0: { michael@0: MDefinition *obj = current->pop(); michael@0: MDefinition *id = current->pop(); michael@0: michael@0: bool needsHoleCheck = !ElementAccessIsPacked(constraints(), obj); michael@0: michael@0: // Ensure id is an integer. michael@0: MInstruction *idInt32 = MToInt32::New(alloc(), id); michael@0: current->add(idInt32); michael@0: id = idInt32; michael@0: michael@0: // Get the elements vector. michael@0: MElements *elements = MElements::New(alloc(), obj); michael@0: current->add(elements); michael@0: michael@0: MInitializedLength *initLength = MInitializedLength::New(alloc(), elements); michael@0: current->add(initLength); michael@0: michael@0: // Check if id < initLength and elem[id] not a hole. michael@0: MInArray *ins = MInArray::New(alloc(), elements, id, initLength, obj, needsHoleCheck); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::jsop_instanceof() michael@0: { michael@0: MDefinition *rhs = current->pop(); michael@0: MDefinition *obj = current->pop(); michael@0: michael@0: // If this is an 'x instanceof function' operation and we can determine the michael@0: // exact function and prototype object being tested for, use a typed path. michael@0: do { michael@0: types::TemporaryTypeSet *rhsTypes = rhs->resultTypeSet(); michael@0: JSObject *rhsObject = rhsTypes ? rhsTypes->getSingleton() : nullptr; michael@0: if (!rhsObject || !rhsObject->is() || rhsObject->isBoundFunction()) michael@0: break; michael@0: michael@0: types::TypeObjectKey *rhsType = types::TypeObjectKey::get(rhsObject); michael@0: if (rhsType->unknownProperties()) michael@0: break; michael@0: michael@0: types::HeapTypeSetKey protoProperty = michael@0: rhsType->property(NameToId(names().prototype)); michael@0: JSObject *protoObject = protoProperty.singleton(constraints()); michael@0: if (!protoObject) michael@0: break; michael@0: michael@0: rhs->setImplicitlyUsedUnchecked(); michael@0: michael@0: MInstanceOf *ins = MInstanceOf::New(alloc(), obj, protoObject); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } while (false); michael@0: michael@0: MCallInstanceOf *ins = MCallInstanceOf::New(alloc(), obj, rhs); michael@0: michael@0: current->add(ins); michael@0: current->push(ins); michael@0: michael@0: return resumeAfter(ins); michael@0: } michael@0: michael@0: MInstruction * michael@0: IonBuilder::addConvertElementsToDoubles(MDefinition *elements) michael@0: { michael@0: MInstruction *convert = MConvertElementsToDoubles::New(alloc(), elements); michael@0: current->add(convert); michael@0: return convert; michael@0: } michael@0: michael@0: MInstruction * michael@0: IonBuilder::addBoundsCheck(MDefinition *index, MDefinition *length) michael@0: { michael@0: MInstruction *check = MBoundsCheck::New(alloc(), index, length); michael@0: current->add(check); michael@0: michael@0: // If a bounds check failed in the past, don't optimize bounds checks. michael@0: if (failedBoundsCheck_) michael@0: check->setNotMovable(); michael@0: michael@0: return check; michael@0: } michael@0: michael@0: MInstruction * michael@0: IonBuilder::addShapeGuard(MDefinition *obj, Shape *const shape, BailoutKind bailoutKind) michael@0: { michael@0: MGuardShape *guard = MGuardShape::New(alloc(), obj, shape, bailoutKind); michael@0: current->add(guard); michael@0: michael@0: // If a shape guard failed in the past, don't optimize shape guard. michael@0: if (failedShapeGuard_) michael@0: guard->setNotMovable(); michael@0: michael@0: return guard; michael@0: } michael@0: michael@0: types::TemporaryTypeSet * michael@0: IonBuilder::bytecodeTypes(jsbytecode *pc) michael@0: { michael@0: return types::TypeScript::BytecodeTypes(script(), pc, bytecodeTypeMap, &typeArrayHint, typeArray); michael@0: } michael@0: michael@0: TypeDescrSetHash * michael@0: IonBuilder::getOrCreateDescrSetHash() michael@0: { michael@0: if (!descrSetHash_) { michael@0: TypeDescrSetHash *hash = michael@0: alloc_->lifoAlloc()->new_(alloc()); michael@0: if (!hash || !hash->init()) michael@0: return nullptr; michael@0: michael@0: descrSetHash_ = hash; michael@0: } michael@0: return descrSetHash_; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::lookupTypeDescrSet(MDefinition *typedObj, michael@0: TypeDescrSet *out) michael@0: { michael@0: *out = TypeDescrSet(); // default to unknown michael@0: michael@0: // Extract TypeDescrSet directly if we can michael@0: if (typedObj->isNewDerivedTypedObject()) { michael@0: *out = typedObj->toNewDerivedTypedObject()->set(); michael@0: return true; michael@0: } michael@0: michael@0: types::TemporaryTypeSet *types = typedObj->resultTypeSet(); michael@0: return typeSetToTypeDescrSet(types, out); michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::typeSetToTypeDescrSet(types::TemporaryTypeSet *types, michael@0: TypeDescrSet *out) michael@0: { michael@0: // Extract TypeDescrSet directly if we can michael@0: if (!types || types->getKnownMIRType() != MIRType_Object) michael@0: return true; michael@0: michael@0: // And only known objects. michael@0: if (types->unknownObject()) michael@0: return true; michael@0: michael@0: TypeDescrSetBuilder set; michael@0: for (uint32_t i = 0; i < types->getObjectCount(); i++) { michael@0: types::TypeObject *type = types->getTypeObject(i); michael@0: if (!type || type->unknownProperties()) michael@0: return true; michael@0: michael@0: if (!type->hasTypedObject()) michael@0: return true; michael@0: michael@0: TypeDescr &descr = type->typedObject()->descr(); michael@0: if (!set.insert(&descr)) michael@0: return false; michael@0: } michael@0: michael@0: return set.build(*this, out); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::loadTypedObjectType(MDefinition *typedObj) michael@0: { michael@0: // Shortcircuit derived type objects, meaning the intermediate michael@0: // objects created to represent `a.b` in an expression like michael@0: // `a.b.c`. In that case, the type object can be simply pulled michael@0: // from the operands of that instruction. michael@0: if (typedObj->isNewDerivedTypedObject()) michael@0: return typedObj->toNewDerivedTypedObject()->type(); michael@0: michael@0: MInstruction *load = MLoadFixedSlot::New(alloc(), typedObj, michael@0: JS_TYPEDOBJ_SLOT_TYPE_DESCR); michael@0: current->add(load); michael@0: return load; michael@0: } michael@0: michael@0: // Given a typed object `typedObj` and an offset `offset` into that michael@0: // object's data, returns another typed object and adusted offset michael@0: // where the data can be found. Often, these returned values are the michael@0: // same as the inputs, but in cases where intermediate derived type michael@0: // objects have been created, the return values will remove michael@0: // intermediate layers (often rendering those derived type objects michael@0: // into dead code). michael@0: void michael@0: IonBuilder::loadTypedObjectData(MDefinition *typedObj, michael@0: MDefinition *offset, michael@0: bool canBeNeutered, michael@0: MDefinition **owner, michael@0: MDefinition **ownerOffset) michael@0: { michael@0: JS_ASSERT(typedObj->type() == MIRType_Object); michael@0: JS_ASSERT(offset->type() == MIRType_Int32); michael@0: michael@0: // Shortcircuit derived type objects, meaning the intermediate michael@0: // objects created to represent `a.b` in an expression like michael@0: // `a.b.c`. In that case, the owned and a base offset can be michael@0: // pulled from the operands of the instruction and combined with michael@0: // `offset`. michael@0: if (typedObj->isNewDerivedTypedObject()) { michael@0: MNewDerivedTypedObject *ins = typedObj->toNewDerivedTypedObject(); michael@0: michael@0: // Note: we never need to check for neutering on this path, michael@0: // because when we create the derived typed object, we check michael@0: // for neutering there, if needed. michael@0: michael@0: MAdd *offsetAdd = MAdd::NewAsmJS(alloc(), ins->offset(), offset, MIRType_Int32); michael@0: current->add(offsetAdd); michael@0: michael@0: *owner = ins->owner(); michael@0: *ownerOffset = offsetAdd; michael@0: return; michael@0: } michael@0: michael@0: if (canBeNeutered) { michael@0: MNeuterCheck *chk = MNeuterCheck::New(alloc(), typedObj); michael@0: current->add(chk); michael@0: typedObj = chk; michael@0: } michael@0: michael@0: *owner = typedObj; michael@0: *ownerOffset = offset; michael@0: } michael@0: michael@0: // Takes as input a typed object, an offset into that typed object's michael@0: // memory, and the type repr of the data found at that offset. Returns michael@0: // the elements pointer and a scaled offset. The scaled offset is michael@0: // expressed in units of `unit`; when working with typed array MIR, michael@0: // this is typically the alignment. michael@0: void michael@0: IonBuilder::loadTypedObjectElements(MDefinition *typedObj, michael@0: MDefinition *offset, michael@0: int32_t unit, michael@0: bool canBeNeutered, michael@0: MDefinition **ownerElements, michael@0: MDefinition **ownerScaledOffset) michael@0: { michael@0: MDefinition *owner, *ownerOffset; michael@0: loadTypedObjectData(typedObj, offset, canBeNeutered, &owner, &ownerOffset); michael@0: michael@0: // Load the element data. michael@0: MTypedObjectElements *elements = MTypedObjectElements::New(alloc(), owner); michael@0: current->add(elements); michael@0: michael@0: // Scale to a different unit for compat with typed array MIRs. michael@0: if (unit != 1) { michael@0: MDiv *scaledOffset = MDiv::NewAsmJS(alloc(), ownerOffset, constantInt(unit), MIRType_Int32, michael@0: /* unsignd = */ false); michael@0: current->add(scaledOffset); michael@0: *ownerScaledOffset = scaledOffset; michael@0: } else { michael@0: *ownerScaledOffset = ownerOffset; michael@0: } michael@0: michael@0: *ownerElements = elements; michael@0: } michael@0: michael@0: // Looks up the offset/type-repr-set of the field `id`, given the type michael@0: // set `objTypes` of the field owner. Note that even when true is michael@0: // returned, `*fieldDescrs` might be empty if no useful type/offset michael@0: // pair could be determined. michael@0: bool michael@0: IonBuilder::lookupTypedObjectField(MDefinition *typedObj, michael@0: PropertyName *name, michael@0: int32_t *fieldOffset, michael@0: TypeDescrSet *fieldDescrs, michael@0: size_t *fieldIndex) michael@0: { michael@0: TypeDescrSet objDescrs; michael@0: if (!lookupTypeDescrSet(typedObj, &objDescrs)) michael@0: return false; michael@0: michael@0: // Must be accessing a struct. michael@0: if (!objDescrs.allOfKind(TypeDescr::Struct)) michael@0: return true; michael@0: michael@0: // Determine the type/offset of the field `name`, if any. michael@0: int32_t offset; michael@0: if (!objDescrs.fieldNamed(*this, NameToId(name), &offset, michael@0: fieldDescrs, fieldIndex)) michael@0: return false; michael@0: if (fieldDescrs->empty()) michael@0: return true; michael@0: michael@0: JS_ASSERT(offset >= 0); michael@0: *fieldOffset = offset; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::typeObjectForElementFromArrayStructType(MDefinition *typeObj) michael@0: { michael@0: MInstruction *elemType = MLoadFixedSlot::New(alloc(), typeObj, JS_DESCR_SLOT_ARRAY_ELEM_TYPE); michael@0: current->add(elemType); michael@0: michael@0: MInstruction *unboxElemType = MUnbox::New(alloc(), elemType, MIRType_Object, MUnbox::Infallible); michael@0: current->add(unboxElemType); michael@0: michael@0: return unboxElemType; michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::typeObjectForFieldFromStructType(MDefinition *typeObj, michael@0: size_t fieldIndex) michael@0: { michael@0: // Load list of field type objects. michael@0: michael@0: MInstruction *fieldTypes = MLoadFixedSlot::New(alloc(), typeObj, JS_DESCR_SLOT_STRUCT_FIELD_TYPES); michael@0: current->add(fieldTypes); michael@0: michael@0: MInstruction *unboxFieldTypes = MUnbox::New(alloc(), fieldTypes, MIRType_Object, MUnbox::Infallible); michael@0: current->add(unboxFieldTypes); michael@0: michael@0: // Index into list with index of field. michael@0: michael@0: MInstruction *fieldTypesElements = MElements::New(alloc(), unboxFieldTypes); michael@0: current->add(fieldTypesElements); michael@0: michael@0: MConstant *fieldIndexDef = constantInt(fieldIndex); michael@0: michael@0: MInstruction *fieldType = MLoadElement::New(alloc(), fieldTypesElements, fieldIndexDef, false, false); michael@0: current->add(fieldType); michael@0: michael@0: MInstruction *unboxFieldType = MUnbox::New(alloc(), fieldType, MIRType_Object, MUnbox::Infallible); michael@0: current->add(unboxFieldType); michael@0: michael@0: return unboxFieldType; michael@0: } michael@0: michael@0: bool michael@0: IonBuilder::storeScalarTypedObjectValue(MDefinition *typedObj, michael@0: MDefinition *byteOffset, michael@0: ScalarTypeDescr::Type type, michael@0: bool canBeNeutered, michael@0: bool racy, michael@0: MDefinition *value) michael@0: { michael@0: // Find location within the owner object. michael@0: MDefinition *elements, *scaledOffset; michael@0: size_t alignment = ScalarTypeDescr::alignment(type); michael@0: loadTypedObjectElements(typedObj, byteOffset, alignment, canBeNeutered, michael@0: &elements, &scaledOffset); michael@0: michael@0: // Clamp value to [0, 255] when type is Uint8Clamped michael@0: MDefinition *toWrite = value; michael@0: if (type == ScalarTypeDescr::TYPE_UINT8_CLAMPED) { michael@0: toWrite = MClampToUint8::New(alloc(), value); michael@0: current->add(toWrite->toInstruction()); michael@0: } michael@0: michael@0: MStoreTypedArrayElement *store = michael@0: MStoreTypedArrayElement::New(alloc(), elements, scaledOffset, toWrite, michael@0: type); michael@0: if (racy) michael@0: store->setRacy(); michael@0: current->add(store); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: MConstant * michael@0: IonBuilder::constant(const Value &v) michael@0: { michael@0: MConstant *c = MConstant::New(alloc(), v, constraints()); michael@0: current->add(c); michael@0: return c; michael@0: } michael@0: michael@0: MConstant * michael@0: IonBuilder::constantInt(int32_t i) michael@0: { michael@0: return constant(Int32Value(i)); michael@0: } michael@0: michael@0: MDefinition * michael@0: IonBuilder::getCallee() michael@0: { michael@0: if (inliningDepth_ == 0) { michael@0: MInstruction *callee = MCallee::New(alloc()); michael@0: current->add(callee); michael@0: return callee; michael@0: } michael@0: michael@0: return inlineCallInfo_->fun(); michael@0: }