1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/jit/IonBuilder.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,10343 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * This Source Code Form is subject to the terms of the Mozilla Public 1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.9 + 1.10 +#include "jit/IonBuilder.h" 1.11 + 1.12 +#include "mozilla/DebugOnly.h" 1.13 + 1.14 +#include "builtin/Eval.h" 1.15 +#include "builtin/TypedObject.h" 1.16 +#include "frontend/SourceNotes.h" 1.17 +#include "jit/BaselineFrame.h" 1.18 +#include "jit/BaselineInspector.h" 1.19 +#include "jit/Ion.h" 1.20 +#include "jit/IonOptimizationLevels.h" 1.21 +#include "jit/IonSpewer.h" 1.22 +#include "jit/Lowering.h" 1.23 +#include "jit/MIRGraph.h" 1.24 +#include "vm/ArgumentsObject.h" 1.25 +#include "vm/Opcodes.h" 1.26 +#include "vm/RegExpStatics.h" 1.27 + 1.28 +#include "jsinferinlines.h" 1.29 +#include "jsobjinlines.h" 1.30 +#include "jsopcodeinlines.h" 1.31 +#include "jsscriptinlines.h" 1.32 + 1.33 +#include "jit/CompileInfo-inl.h" 1.34 +#include "jit/ExecutionMode-inl.h" 1.35 + 1.36 +using namespace js; 1.37 +using namespace js::jit; 1.38 + 1.39 +using mozilla::DebugOnly; 1.40 +using mozilla::Maybe; 1.41 +using mozilla::SafeCast; 1.42 + 1.43 +class jit::BaselineFrameInspector 1.44 +{ 1.45 + public: 1.46 + types::Type thisType; 1.47 + JSObject *singletonScopeChain; 1.48 + 1.49 + Vector<types::Type, 4, IonAllocPolicy> argTypes; 1.50 + Vector<types::Type, 4, IonAllocPolicy> varTypes; 1.51 + 1.52 + BaselineFrameInspector(TempAllocator *temp) 1.53 + : thisType(types::Type::UndefinedType()), 1.54 + singletonScopeChain(nullptr), 1.55 + argTypes(*temp), 1.56 + varTypes(*temp) 1.57 + {} 1.58 +}; 1.59 + 1.60 +BaselineFrameInspector * 1.61 +jit::NewBaselineFrameInspector(TempAllocator *temp, BaselineFrame *frame, CompileInfo *info) 1.62 +{ 1.63 + JS_ASSERT(frame); 1.64 + 1.65 + BaselineFrameInspector *inspector = temp->lifoAlloc()->new_<BaselineFrameInspector>(temp); 1.66 + if (!inspector) 1.67 + return nullptr; 1.68 + 1.69 + // Note: copying the actual values into a temporary structure for use 1.70 + // during compilation could capture nursery pointers, so the values' types 1.71 + // are recorded instead. 1.72 + 1.73 + inspector->thisType = types::GetMaybeOptimizedOutValueType(frame->thisValue()); 1.74 + 1.75 + if (frame->scopeChain()->hasSingletonType()) 1.76 + inspector->singletonScopeChain = frame->scopeChain(); 1.77 + 1.78 + JSScript *script = frame->script(); 1.79 + 1.80 + if (script->functionNonDelazifying()) { 1.81 + if (!inspector->argTypes.reserve(frame->numFormalArgs())) 1.82 + return nullptr; 1.83 + for (size_t i = 0; i < frame->numFormalArgs(); i++) { 1.84 + if (script->formalIsAliased(i)) { 1.85 + inspector->argTypes.infallibleAppend(types::Type::UndefinedType()); 1.86 + } else if (!script->argsObjAliasesFormals()) { 1.87 + types::Type type = types::GetMaybeOptimizedOutValueType(frame->unaliasedFormal(i)); 1.88 + inspector->argTypes.infallibleAppend(type); 1.89 + } else if (frame->hasArgsObj()) { 1.90 + types::Type type = types::GetMaybeOptimizedOutValueType(frame->argsObj().arg(i)); 1.91 + inspector->argTypes.infallibleAppend(type); 1.92 + } else { 1.93 + inspector->argTypes.infallibleAppend(types::Type::UndefinedType()); 1.94 + } 1.95 + } 1.96 + } 1.97 + 1.98 + if (!inspector->varTypes.reserve(frame->script()->nfixed())) 1.99 + return nullptr; 1.100 + for (size_t i = 0; i < frame->script()->nfixed(); i++) { 1.101 + if (info->isSlotAliasedAtOsr(i + info->firstLocalSlot())) { 1.102 + inspector->varTypes.infallibleAppend(types::Type::UndefinedType()); 1.103 + } else { 1.104 + types::Type type = types::GetMaybeOptimizedOutValueType(frame->unaliasedLocal(i)); 1.105 + inspector->varTypes.infallibleAppend(type); 1.106 + } 1.107 + } 1.108 + 1.109 + return inspector; 1.110 +} 1.111 + 1.112 +IonBuilder::IonBuilder(JSContext *analysisContext, CompileCompartment *comp, 1.113 + const JitCompileOptions &options, TempAllocator *temp, 1.114 + MIRGraph *graph, types::CompilerConstraintList *constraints, 1.115 + BaselineInspector *inspector, CompileInfo *info, 1.116 + const OptimizationInfo *optimizationInfo, 1.117 + BaselineFrameInspector *baselineFrame, size_t inliningDepth, 1.118 + uint32_t loopDepth) 1.119 + : MIRGenerator(comp, options, temp, graph, info, optimizationInfo), 1.120 + backgroundCodegen_(nullptr), 1.121 + analysisContext(analysisContext), 1.122 + baselineFrame_(baselineFrame), 1.123 + abortReason_(AbortReason_Disable), 1.124 + descrSetHash_(nullptr), 1.125 + constraints_(constraints), 1.126 + analysis_(*temp, info->script()), 1.127 + thisTypes(nullptr), 1.128 + argTypes(nullptr), 1.129 + typeArray(nullptr), 1.130 + typeArrayHint(0), 1.131 + bytecodeTypeMap(nullptr), 1.132 + loopDepth_(loopDepth), 1.133 + callerResumePoint_(nullptr), 1.134 + callerBuilder_(nullptr), 1.135 + cfgStack_(*temp), 1.136 + loops_(*temp), 1.137 + switches_(*temp), 1.138 + labels_(*temp), 1.139 + iterators_(*temp), 1.140 + loopHeaders_(*temp), 1.141 + inspector(inspector), 1.142 + inliningDepth_(inliningDepth), 1.143 + numLoopRestarts_(0), 1.144 + failedBoundsCheck_(info->script()->failedBoundsCheck()), 1.145 + failedShapeGuard_(info->script()->failedShapeGuard()), 1.146 + nonStringIteration_(false), 1.147 + lazyArguments_(nullptr), 1.148 + inlineCallInfo_(nullptr) 1.149 +{ 1.150 + script_ = info->script(); 1.151 + pc = info->startPC(); 1.152 + 1.153 + JS_ASSERT(script()->hasBaselineScript() == (info->executionMode() != ArgumentsUsageAnalysis)); 1.154 + JS_ASSERT(!!analysisContext == (info->executionMode() == DefinitePropertiesAnalysis)); 1.155 +} 1.156 + 1.157 +void 1.158 +IonBuilder::clearForBackEnd() 1.159 +{ 1.160 + JS_ASSERT(!analysisContext); 1.161 + baselineFrame_ = nullptr; 1.162 + 1.163 + // The caches below allocate data from the malloc heap. Release this before 1.164 + // later phases of compilation to avoid leaks, as the top level IonBuilder 1.165 + // is not explicitly destroyed. Note that builders for inner scripts are 1.166 + // constructed on the stack and will release this memory on destruction. 1.167 + gsn.purge(); 1.168 + scopeCoordinateNameCache.purge(); 1.169 +} 1.170 + 1.171 +bool 1.172 +IonBuilder::abort(const char *message, ...) 1.173 +{ 1.174 + // Don't call PCToLineNumber in release builds. 1.175 +#ifdef DEBUG 1.176 + va_list ap; 1.177 + va_start(ap, message); 1.178 + abortFmt(message, ap); 1.179 + va_end(ap); 1.180 + IonSpew(IonSpew_Abort, "aborted @ %s:%d", script()->filename(), PCToLineNumber(script(), pc)); 1.181 +#endif 1.182 + return false; 1.183 +} 1.184 + 1.185 +void 1.186 +IonBuilder::spew(const char *message) 1.187 +{ 1.188 + // Don't call PCToLineNumber in release builds. 1.189 +#ifdef DEBUG 1.190 + IonSpew(IonSpew_MIR, "%s @ %s:%d", message, script()->filename(), PCToLineNumber(script(), pc)); 1.191 +#endif 1.192 +} 1.193 + 1.194 +static inline int32_t 1.195 +GetJumpOffset(jsbytecode *pc) 1.196 +{ 1.197 + JS_ASSERT(js_CodeSpec[JSOp(*pc)].type() == JOF_JUMP); 1.198 + return GET_JUMP_OFFSET(pc); 1.199 +} 1.200 + 1.201 +IonBuilder::CFGState 1.202 +IonBuilder::CFGState::If(jsbytecode *join, MTest *test) 1.203 +{ 1.204 + CFGState state; 1.205 + state.state = IF_TRUE; 1.206 + state.stopAt = join; 1.207 + state.branch.ifFalse = test->ifFalse(); 1.208 + state.branch.test = test; 1.209 + return state; 1.210 +} 1.211 + 1.212 +IonBuilder::CFGState 1.213 +IonBuilder::CFGState::IfElse(jsbytecode *trueEnd, jsbytecode *falseEnd, MTest *test) 1.214 +{ 1.215 + MBasicBlock *ifFalse = test->ifFalse(); 1.216 + 1.217 + CFGState state; 1.218 + // If the end of the false path is the same as the start of the 1.219 + // false path, then the "else" block is empty and we can devolve 1.220 + // this to the IF_TRUE case. We handle this here because there is 1.221 + // still an extra GOTO on the true path and we want stopAt to point 1.222 + // there, whereas the IF_TRUE case does not have the GOTO. 1.223 + state.state = (falseEnd == ifFalse->pc()) 1.224 + ? IF_TRUE_EMPTY_ELSE 1.225 + : IF_ELSE_TRUE; 1.226 + state.stopAt = trueEnd; 1.227 + state.branch.falseEnd = falseEnd; 1.228 + state.branch.ifFalse = ifFalse; 1.229 + state.branch.test = test; 1.230 + return state; 1.231 +} 1.232 + 1.233 +IonBuilder::CFGState 1.234 +IonBuilder::CFGState::AndOr(jsbytecode *join, MBasicBlock *joinStart) 1.235 +{ 1.236 + CFGState state; 1.237 + state.state = AND_OR; 1.238 + state.stopAt = join; 1.239 + state.branch.ifFalse = joinStart; 1.240 + state.branch.test = nullptr; 1.241 + return state; 1.242 +} 1.243 + 1.244 +IonBuilder::CFGState 1.245 +IonBuilder::CFGState::TableSwitch(jsbytecode *exitpc, MTableSwitch *ins) 1.246 +{ 1.247 + CFGState state; 1.248 + state.state = TABLE_SWITCH; 1.249 + state.stopAt = exitpc; 1.250 + state.tableswitch.exitpc = exitpc; 1.251 + state.tableswitch.breaks = nullptr; 1.252 + state.tableswitch.ins = ins; 1.253 + state.tableswitch.currentBlock = 0; 1.254 + return state; 1.255 +} 1.256 + 1.257 +JSFunction * 1.258 +IonBuilder::getSingleCallTarget(types::TemporaryTypeSet *calleeTypes) 1.259 +{ 1.260 + if (!calleeTypes) 1.261 + return nullptr; 1.262 + 1.263 + JSObject *obj = calleeTypes->getSingleton(); 1.264 + if (!obj || !obj->is<JSFunction>()) 1.265 + return nullptr; 1.266 + 1.267 + return &obj->as<JSFunction>(); 1.268 +} 1.269 + 1.270 +bool 1.271 +IonBuilder::getPolyCallTargets(types::TemporaryTypeSet *calleeTypes, bool constructing, 1.272 + ObjectVector &targets, uint32_t maxTargets, bool *gotLambda) 1.273 +{ 1.274 + JS_ASSERT(targets.empty()); 1.275 + JS_ASSERT(gotLambda); 1.276 + *gotLambda = false; 1.277 + 1.278 + if (!calleeTypes) 1.279 + return true; 1.280 + 1.281 + if (calleeTypes->baseFlags() != 0) 1.282 + return true; 1.283 + 1.284 + unsigned objCount = calleeTypes->getObjectCount(); 1.285 + 1.286 + if (objCount == 0 || objCount > maxTargets) 1.287 + return true; 1.288 + 1.289 + if (!targets.reserve(objCount)) 1.290 + return false; 1.291 + for(unsigned i = 0; i < objCount; i++) { 1.292 + JSObject *obj = calleeTypes->getSingleObject(i); 1.293 + JSFunction *fun; 1.294 + if (obj) { 1.295 + if (!obj->is<JSFunction>()) { 1.296 + targets.clear(); 1.297 + return true; 1.298 + } 1.299 + fun = &obj->as<JSFunction>(); 1.300 + } else { 1.301 + types::TypeObject *typeObj = calleeTypes->getTypeObject(i); 1.302 + JS_ASSERT(typeObj); 1.303 + if (!typeObj->interpretedFunction) { 1.304 + targets.clear(); 1.305 + return true; 1.306 + } 1.307 + 1.308 + fun = typeObj->interpretedFunction; 1.309 + *gotLambda = true; 1.310 + } 1.311 + 1.312 + // Don't optimize if we're constructing and the callee is not a 1.313 + // constructor, so that CallKnown does not have to handle this case 1.314 + // (it should always throw). 1.315 + if (constructing && !fun->isInterpretedConstructor() && !fun->isNativeConstructor()) { 1.316 + targets.clear(); 1.317 + return true; 1.318 + } 1.319 + 1.320 + DebugOnly<bool> appendOk = targets.append(fun); 1.321 + JS_ASSERT(appendOk); 1.322 + } 1.323 + 1.324 + // For now, only inline "singleton" lambda calls 1.325 + if (*gotLambda && targets.length() > 1) 1.326 + targets.clear(); 1.327 + 1.328 + return true; 1.329 +} 1.330 + 1.331 +IonBuilder::InliningDecision 1.332 +IonBuilder::DontInline(JSScript *targetScript, const char *reason) 1.333 +{ 1.334 + if (targetScript) { 1.335 + IonSpew(IonSpew_Inlining, "Cannot inline %s:%u: %s", 1.336 + targetScript->filename(), targetScript->lineno(), reason); 1.337 + } else { 1.338 + IonSpew(IonSpew_Inlining, "Cannot inline: %s", reason); 1.339 + } 1.340 + 1.341 + return InliningDecision_DontInline; 1.342 +} 1.343 + 1.344 +IonBuilder::InliningDecision 1.345 +IonBuilder::canInlineTarget(JSFunction *target, CallInfo &callInfo) 1.346 +{ 1.347 + if (!optimizationInfo().inlineInterpreted()) 1.348 + return InliningDecision_DontInline; 1.349 + 1.350 + if (!target->isInterpreted()) 1.351 + return DontInline(nullptr, "Non-interpreted target"); 1.352 + 1.353 + // Allow constructing lazy scripts when performing the definite properties 1.354 + // analysis, as baseline has not been used to warm the caller up yet. 1.355 + if (target->isInterpreted() && info().executionMode() == DefinitePropertiesAnalysis) { 1.356 + RootedScript script(analysisContext, target->getOrCreateScript(analysisContext)); 1.357 + if (!script) 1.358 + return InliningDecision_Error; 1.359 + 1.360 + if (!script->hasBaselineScript() && script->canBaselineCompile()) { 1.361 + MethodStatus status = BaselineCompile(analysisContext, script); 1.362 + if (status == Method_Error) 1.363 + return InliningDecision_Error; 1.364 + if (status != Method_Compiled) 1.365 + return InliningDecision_DontInline; 1.366 + } 1.367 + } 1.368 + 1.369 + if (!target->hasScript()) 1.370 + return DontInline(nullptr, "Lazy script"); 1.371 + 1.372 + JSScript *inlineScript = target->nonLazyScript(); 1.373 + if (callInfo.constructing() && !target->isInterpretedConstructor()) 1.374 + return DontInline(inlineScript, "Callee is not a constructor"); 1.375 + 1.376 + ExecutionMode executionMode = info().executionMode(); 1.377 + if (!CanIonCompile(inlineScript, executionMode)) 1.378 + return DontInline(inlineScript, "Disabled Ion compilation"); 1.379 + 1.380 + // Don't inline functions which don't have baseline scripts. 1.381 + if (!inlineScript->hasBaselineScript()) 1.382 + return DontInline(inlineScript, "No baseline jitcode"); 1.383 + 1.384 + if (TooManyArguments(target->nargs())) 1.385 + return DontInline(inlineScript, "Too many args"); 1.386 + 1.387 + if (TooManyArguments(callInfo.argc())) 1.388 + return DontInline(inlineScript, "Too many args"); 1.389 + 1.390 + // Allow inlining of recursive calls, but only one level deep. 1.391 + IonBuilder *builder = callerBuilder_; 1.392 + while (builder) { 1.393 + if (builder->script() == inlineScript) 1.394 + return DontInline(inlineScript, "Recursive call"); 1.395 + builder = builder->callerBuilder_; 1.396 + } 1.397 + 1.398 + if (target->isHeavyweight()) 1.399 + return DontInline(inlineScript, "Heavyweight function"); 1.400 + 1.401 + if (inlineScript->uninlineable()) 1.402 + return DontInline(inlineScript, "Uninlineable script"); 1.403 + 1.404 + if (inlineScript->needsArgsObj()) 1.405 + return DontInline(inlineScript, "Script that needs an arguments object"); 1.406 + 1.407 + if (!inlineScript->compileAndGo()) 1.408 + return DontInline(inlineScript, "Non-compileAndGo script"); 1.409 + 1.410 + types::TypeObjectKey *targetType = types::TypeObjectKey::get(target); 1.411 + if (targetType->unknownProperties()) 1.412 + return DontInline(inlineScript, "Target type has unknown properties"); 1.413 + 1.414 + return InliningDecision_Inline; 1.415 +} 1.416 + 1.417 +void 1.418 +IonBuilder::popCfgStack() 1.419 +{ 1.420 + if (cfgStack_.back().isLoop()) 1.421 + loops_.popBack(); 1.422 + if (cfgStack_.back().state == CFGState::LABEL) 1.423 + labels_.popBack(); 1.424 + cfgStack_.popBack(); 1.425 +} 1.426 + 1.427 +bool 1.428 +IonBuilder::analyzeNewLoopTypes(MBasicBlock *entry, jsbytecode *start, jsbytecode *end) 1.429 +{ 1.430 + // The phi inputs at the loop head only reflect types for variables that 1.431 + // were present at the start of the loop. If the variable changes to a new 1.432 + // type within the loop body, and that type is carried around to the loop 1.433 + // head, then we need to know about the new type up front. 1.434 + // 1.435 + // Since SSA information hasn't been constructed for the loop body yet, we 1.436 + // need a separate analysis to pick out the types that might flow around 1.437 + // the loop header. This is a best-effort analysis that may either over- 1.438 + // or under-approximate the set of such types. 1.439 + // 1.440 + // Over-approximating the types may lead to inefficient generated code, and 1.441 + // under-approximating the types will cause the loop body to be analyzed 1.442 + // multiple times as the correct types are deduced (see finishLoop). 1.443 + 1.444 + // If we restarted processing of an outer loop then get loop header types 1.445 + // directly from the last time we have previously processed this loop. This 1.446 + // both avoids repeated work from the bytecode traverse below, and will 1.447 + // also pick up types discovered while previously building the loop body. 1.448 + for (size_t i = 0; i < loopHeaders_.length(); i++) { 1.449 + if (loopHeaders_[i].pc == start) { 1.450 + MBasicBlock *oldEntry = loopHeaders_[i].header; 1.451 + for (MPhiIterator oldPhi = oldEntry->phisBegin(); 1.452 + oldPhi != oldEntry->phisEnd(); 1.453 + oldPhi++) 1.454 + { 1.455 + MPhi *newPhi = entry->getSlot(oldPhi->slot())->toPhi(); 1.456 + if (!newPhi->addBackedgeType(oldPhi->type(), oldPhi->resultTypeSet())) 1.457 + return false; 1.458 + } 1.459 + // Update the most recent header for this loop encountered, in case 1.460 + // new types flow to the phis and the loop is processed at least 1.461 + // three times. 1.462 + loopHeaders_[i].header = entry; 1.463 + return true; 1.464 + } 1.465 + } 1.466 + loopHeaders_.append(LoopHeader(start, entry)); 1.467 + 1.468 + jsbytecode *last = nullptr, *earlier = nullptr; 1.469 + for (jsbytecode *pc = start; pc != end; earlier = last, last = pc, pc += GetBytecodeLength(pc)) { 1.470 + uint32_t slot; 1.471 + if (*pc == JSOP_SETLOCAL) 1.472 + slot = info().localSlot(GET_LOCALNO(pc)); 1.473 + else if (*pc == JSOP_SETARG) 1.474 + slot = info().argSlotUnchecked(GET_ARGNO(pc)); 1.475 + else 1.476 + continue; 1.477 + if (slot >= info().firstStackSlot()) 1.478 + continue; 1.479 + if (!analysis().maybeInfo(pc)) 1.480 + continue; 1.481 + 1.482 + MPhi *phi = entry->getSlot(slot)->toPhi(); 1.483 + 1.484 + if (*last == JSOP_POS) 1.485 + last = earlier; 1.486 + 1.487 + if (js_CodeSpec[*last].format & JOF_TYPESET) { 1.488 + types::TemporaryTypeSet *typeSet = bytecodeTypes(last); 1.489 + if (!typeSet->empty()) { 1.490 + MIRType type = typeSet->getKnownMIRType(); 1.491 + if (!phi->addBackedgeType(type, typeSet)) 1.492 + return false; 1.493 + } 1.494 + } else if (*last == JSOP_GETLOCAL || *last == JSOP_GETARG) { 1.495 + uint32_t slot = (*last == JSOP_GETLOCAL) 1.496 + ? info().localSlot(GET_LOCALNO(last)) 1.497 + : info().argSlotUnchecked(GET_ARGNO(last)); 1.498 + if (slot < info().firstStackSlot()) { 1.499 + MPhi *otherPhi = entry->getSlot(slot)->toPhi(); 1.500 + if (otherPhi->hasBackedgeType()) { 1.501 + if (!phi->addBackedgeType(otherPhi->type(), otherPhi->resultTypeSet())) 1.502 + return false; 1.503 + } 1.504 + } 1.505 + } else { 1.506 + MIRType type = MIRType_None; 1.507 + switch (*last) { 1.508 + case JSOP_VOID: 1.509 + case JSOP_UNDEFINED: 1.510 + type = MIRType_Undefined; 1.511 + break; 1.512 + case JSOP_NULL: 1.513 + type = MIRType_Null; 1.514 + break; 1.515 + case JSOP_ZERO: 1.516 + case JSOP_ONE: 1.517 + case JSOP_INT8: 1.518 + case JSOP_INT32: 1.519 + case JSOP_UINT16: 1.520 + case JSOP_UINT24: 1.521 + case JSOP_BITAND: 1.522 + case JSOP_BITOR: 1.523 + case JSOP_BITXOR: 1.524 + case JSOP_BITNOT: 1.525 + case JSOP_RSH: 1.526 + case JSOP_LSH: 1.527 + case JSOP_URSH: 1.528 + type = MIRType_Int32; 1.529 + break; 1.530 + case JSOP_FALSE: 1.531 + case JSOP_TRUE: 1.532 + case JSOP_EQ: 1.533 + case JSOP_NE: 1.534 + case JSOP_LT: 1.535 + case JSOP_LE: 1.536 + case JSOP_GT: 1.537 + case JSOP_GE: 1.538 + case JSOP_NOT: 1.539 + case JSOP_STRICTEQ: 1.540 + case JSOP_STRICTNE: 1.541 + case JSOP_IN: 1.542 + case JSOP_INSTANCEOF: 1.543 + type = MIRType_Boolean; 1.544 + break; 1.545 + case JSOP_DOUBLE: 1.546 + type = MIRType_Double; 1.547 + break; 1.548 + case JSOP_STRING: 1.549 + case JSOP_TYPEOF: 1.550 + case JSOP_TYPEOFEXPR: 1.551 + case JSOP_ITERNEXT: 1.552 + type = MIRType_String; 1.553 + break; 1.554 + case JSOP_ADD: 1.555 + case JSOP_SUB: 1.556 + case JSOP_MUL: 1.557 + case JSOP_DIV: 1.558 + case JSOP_MOD: 1.559 + case JSOP_NEG: 1.560 + type = inspector->expectedResultType(last); 1.561 + default: 1.562 + break; 1.563 + } 1.564 + if (type != MIRType_None) { 1.565 + if (!phi->addBackedgeType(type, nullptr)) 1.566 + return false; 1.567 + } 1.568 + } 1.569 + } 1.570 + return true; 1.571 +} 1.572 + 1.573 +bool 1.574 +IonBuilder::pushLoop(CFGState::State initial, jsbytecode *stopAt, MBasicBlock *entry, bool osr, 1.575 + jsbytecode *loopHead, jsbytecode *initialPc, 1.576 + jsbytecode *bodyStart, jsbytecode *bodyEnd, jsbytecode *exitpc, 1.577 + jsbytecode *continuepc) 1.578 +{ 1.579 + if (!continuepc) 1.580 + continuepc = entry->pc(); 1.581 + 1.582 + ControlFlowInfo loop(cfgStack_.length(), continuepc); 1.583 + if (!loops_.append(loop)) 1.584 + return false; 1.585 + 1.586 + CFGState state; 1.587 + state.state = initial; 1.588 + state.stopAt = stopAt; 1.589 + state.loop.bodyStart = bodyStart; 1.590 + state.loop.bodyEnd = bodyEnd; 1.591 + state.loop.exitpc = exitpc; 1.592 + state.loop.continuepc = continuepc; 1.593 + state.loop.entry = entry; 1.594 + state.loop.osr = osr; 1.595 + state.loop.successor = nullptr; 1.596 + state.loop.breaks = nullptr; 1.597 + state.loop.continues = nullptr; 1.598 + state.loop.initialState = initial; 1.599 + state.loop.initialPc = initialPc; 1.600 + state.loop.initialStopAt = stopAt; 1.601 + state.loop.loopHead = loopHead; 1.602 + return cfgStack_.append(state); 1.603 +} 1.604 + 1.605 +bool 1.606 +IonBuilder::init() 1.607 +{ 1.608 + if (!types::TypeScript::FreezeTypeSets(constraints(), script(), 1.609 + &thisTypes, &argTypes, &typeArray)) 1.610 + { 1.611 + return false; 1.612 + } 1.613 + 1.614 + if (!analysis().init(alloc(), gsn)) 1.615 + return false; 1.616 + 1.617 + // The baseline script normally has the bytecode type map, but compute 1.618 + // it ourselves if we do not have a baseline script. 1.619 + if (script()->hasBaselineScript()) { 1.620 + bytecodeTypeMap = script()->baselineScript()->bytecodeTypeMap(); 1.621 + } else { 1.622 + bytecodeTypeMap = alloc_->lifoAlloc()->newArrayUninitialized<uint32_t>(script()->nTypeSets()); 1.623 + if (!bytecodeTypeMap) 1.624 + return false; 1.625 + types::FillBytecodeTypeMap(script(), bytecodeTypeMap); 1.626 + } 1.627 + 1.628 + return true; 1.629 +} 1.630 + 1.631 +bool 1.632 +IonBuilder::build() 1.633 +{ 1.634 + if (!init()) 1.635 + return false; 1.636 + 1.637 + if (!setCurrentAndSpecializePhis(newBlock(pc))) 1.638 + return false; 1.639 + if (!current) 1.640 + return false; 1.641 + 1.642 +#ifdef DEBUG 1.643 + if (info().executionMode() == SequentialExecution && script()->hasIonScript()) { 1.644 + IonSpew(IonSpew_Scripts, "Recompiling script %s:%d (%p) (usecount=%d, level=%s)", 1.645 + script()->filename(), script()->lineno(), (void *)script(), 1.646 + (int)script()->getUseCount(), OptimizationLevelString(optimizationInfo().level())); 1.647 + } else { 1.648 + IonSpew(IonSpew_Scripts, "Analyzing script %s:%d (%p) (usecount=%d, level=%s)", 1.649 + script()->filename(), script()->lineno(), (void *)script(), 1.650 + (int)script()->getUseCount(), OptimizationLevelString(optimizationInfo().level())); 1.651 + } 1.652 +#endif 1.653 + 1.654 + initParameters(); 1.655 + 1.656 + // Initialize local variables. 1.657 + for (uint32_t i = 0; i < info().nlocals(); i++) { 1.658 + MConstant *undef = MConstant::New(alloc(), UndefinedValue()); 1.659 + current->add(undef); 1.660 + current->initSlot(info().localSlot(i), undef); 1.661 + } 1.662 + 1.663 + // Initialize something for the scope chain. We can bail out before the 1.664 + // start instruction, but the snapshot is encoded *at* the start 1.665 + // instruction, which means generating any code that could load into 1.666 + // registers is illegal. 1.667 + MInstruction *scope = MConstant::New(alloc(), UndefinedValue()); 1.668 + current->add(scope); 1.669 + current->initSlot(info().scopeChainSlot(), scope); 1.670 + 1.671 + // Initialize the return value. 1.672 + MInstruction *returnValue = MConstant::New(alloc(), UndefinedValue()); 1.673 + current->add(returnValue); 1.674 + current->initSlot(info().returnValueSlot(), returnValue); 1.675 + 1.676 + // Initialize the arguments object slot to undefined if necessary. 1.677 + if (info().hasArguments()) { 1.678 + MInstruction *argsObj = MConstant::New(alloc(), UndefinedValue()); 1.679 + current->add(argsObj); 1.680 + current->initSlot(info().argsObjSlot(), argsObj); 1.681 + } 1.682 + 1.683 + // Emit the start instruction, so we can begin real instructions. 1.684 + current->makeStart(MStart::New(alloc(), MStart::StartType_Default)); 1.685 + if (instrumentedProfiling()) 1.686 + current->add(MProfilerStackOp::New(alloc(), script(), MProfilerStackOp::Enter)); 1.687 + 1.688 + // Guard against over-recursion. Do this before we start unboxing, since 1.689 + // this will create an OSI point that will read the incoming argument 1.690 + // values, which is nice to do before their last real use, to minimize 1.691 + // register/stack pressure. 1.692 + MCheckOverRecursed *check = MCheckOverRecursed::New(alloc()); 1.693 + current->add(check); 1.694 + check->setResumePoint(current->entryResumePoint()); 1.695 + 1.696 + // Parameters have been checked to correspond to the typeset, now we unbox 1.697 + // what we can in an infallible manner. 1.698 + rewriteParameters(); 1.699 + 1.700 + // It's safe to start emitting actual IR, so now build the scope chain. 1.701 + if (!initScopeChain()) 1.702 + return false; 1.703 + 1.704 + if (info().needsArgsObj() && !initArgumentsObject()) 1.705 + return false; 1.706 + 1.707 + // Prevent |this| from being DCE'd: necessary for constructors. 1.708 + if (info().funMaybeLazy()) 1.709 + current->getSlot(info().thisSlot())->setGuard(); 1.710 + 1.711 + // The type analysis phase attempts to insert unbox operations near 1.712 + // definitions of values. It also attempts to replace uses in resume points 1.713 + // with the narrower, unboxed variants. However, we must prevent this 1.714 + // replacement from happening on values in the entry snapshot. Otherwise we 1.715 + // could get this: 1.716 + // 1.717 + // v0 = MParameter(0) 1.718 + // v1 = MParameter(1) 1.719 + // -- ResumePoint(v2, v3) 1.720 + // v2 = Unbox(v0, INT32) 1.721 + // v3 = Unbox(v1, INT32) 1.722 + // 1.723 + // So we attach the initial resume point to each parameter, which the type 1.724 + // analysis explicitly checks (this is the same mechanism used for 1.725 + // effectful operations). 1.726 + for (uint32_t i = 0; i < info().endArgSlot(); i++) { 1.727 + MInstruction *ins = current->getEntrySlot(i)->toInstruction(); 1.728 + if (ins->type() == MIRType_Value) 1.729 + ins->setResumePoint(current->entryResumePoint()); 1.730 + } 1.731 + 1.732 + // lazyArguments should never be accessed in |argsObjAliasesFormals| scripts. 1.733 + if (info().hasArguments() && !info().argsObjAliasesFormals()) { 1.734 + lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS)); 1.735 + current->add(lazyArguments_); 1.736 + } 1.737 + 1.738 + insertRecompileCheck(); 1.739 + 1.740 + if (!traverseBytecode()) 1.741 + return false; 1.742 + 1.743 + if (!maybeAddOsrTypeBarriers()) 1.744 + return false; 1.745 + 1.746 + if (!processIterators()) 1.747 + return false; 1.748 + 1.749 + JS_ASSERT(loopDepth_ == 0); 1.750 + abortReason_ = AbortReason_NoAbort; 1.751 + return true; 1.752 +} 1.753 + 1.754 +bool 1.755 +IonBuilder::processIterators() 1.756 +{ 1.757 + // Find phis that must directly hold an iterator live. 1.758 + Vector<MPhi *, 0, SystemAllocPolicy> worklist; 1.759 + for (size_t i = 0; i < iterators_.length(); i++) { 1.760 + MInstruction *ins = iterators_[i]; 1.761 + for (MUseDefIterator iter(ins); iter; iter++) { 1.762 + if (iter.def()->isPhi()) { 1.763 + if (!worklist.append(iter.def()->toPhi())) 1.764 + return false; 1.765 + } 1.766 + } 1.767 + } 1.768 + 1.769 + // Propagate the iterator and live status of phis to all other connected 1.770 + // phis. 1.771 + while (!worklist.empty()) { 1.772 + MPhi *phi = worklist.popCopy(); 1.773 + phi->setIterator(); 1.774 + phi->setImplicitlyUsedUnchecked(); 1.775 + 1.776 + for (MUseDefIterator iter(phi); iter; iter++) { 1.777 + if (iter.def()->isPhi()) { 1.778 + MPhi *other = iter.def()->toPhi(); 1.779 + if (!other->isIterator() && !worklist.append(other)) 1.780 + return false; 1.781 + } 1.782 + } 1.783 + } 1.784 + 1.785 + return true; 1.786 +} 1.787 + 1.788 +bool 1.789 +IonBuilder::buildInline(IonBuilder *callerBuilder, MResumePoint *callerResumePoint, 1.790 + CallInfo &callInfo) 1.791 +{ 1.792 + if (!init()) 1.793 + return false; 1.794 + 1.795 + inlineCallInfo_ = &callInfo; 1.796 + 1.797 + IonSpew(IonSpew_Scripts, "Inlining script %s:%d (%p)", 1.798 + script()->filename(), script()->lineno(), (void *)script()); 1.799 + 1.800 + callerBuilder_ = callerBuilder; 1.801 + callerResumePoint_ = callerResumePoint; 1.802 + 1.803 + if (callerBuilder->failedBoundsCheck_) 1.804 + failedBoundsCheck_ = true; 1.805 + 1.806 + if (callerBuilder->failedShapeGuard_) 1.807 + failedShapeGuard_ = true; 1.808 + 1.809 + // Generate single entrance block. 1.810 + if (!setCurrentAndSpecializePhis(newBlock(pc))) 1.811 + return false; 1.812 + if (!current) 1.813 + return false; 1.814 + 1.815 + current->setCallerResumePoint(callerResumePoint); 1.816 + 1.817 + // Connect the entrance block to the last block in the caller's graph. 1.818 + MBasicBlock *predecessor = callerBuilder->current; 1.819 + JS_ASSERT(predecessor == callerResumePoint->block()); 1.820 + 1.821 + // All further instructions generated in from this scope should be 1.822 + // considered as part of the function that we're inlining. We also need to 1.823 + // keep track of the inlining depth because all scripts inlined on the same 1.824 + // level contiguously have only one InlineExit node. 1.825 + if (instrumentedProfiling()) { 1.826 + predecessor->add(MProfilerStackOp::New(alloc(), script(), 1.827 + MProfilerStackOp::InlineEnter, 1.828 + inliningDepth_)); 1.829 + } 1.830 + 1.831 + predecessor->end(MGoto::New(alloc(), current)); 1.832 + if (!current->addPredecessorWithoutPhis(predecessor)) 1.833 + return false; 1.834 + 1.835 + // Initialize scope chain slot to Undefined. It's set later by |initScopeChain|. 1.836 + MInstruction *scope = MConstant::New(alloc(), UndefinedValue()); 1.837 + current->add(scope); 1.838 + current->initSlot(info().scopeChainSlot(), scope); 1.839 + 1.840 + // Initialize |return value| slot. 1.841 + MInstruction *returnValue = MConstant::New(alloc(), UndefinedValue()); 1.842 + current->add(returnValue); 1.843 + current->initSlot(info().returnValueSlot(), returnValue); 1.844 + 1.845 + // Initialize |arguments| slot. 1.846 + if (info().hasArguments()) { 1.847 + MInstruction *argsObj = MConstant::New(alloc(), UndefinedValue()); 1.848 + current->add(argsObj); 1.849 + current->initSlot(info().argsObjSlot(), argsObj); 1.850 + } 1.851 + 1.852 + // Initialize |this| slot. 1.853 + current->initSlot(info().thisSlot(), callInfo.thisArg()); 1.854 + 1.855 + IonSpew(IonSpew_Inlining, "Initializing %u arg slots", info().nargs()); 1.856 + 1.857 + // NB: Ion does not inline functions which |needsArgsObj|. So using argSlot() 1.858 + // instead of argSlotUnchecked() below is OK 1.859 + JS_ASSERT(!info().needsArgsObj()); 1.860 + 1.861 + // Initialize actually set arguments. 1.862 + uint32_t existing_args = Min<uint32_t>(callInfo.argc(), info().nargs()); 1.863 + for (size_t i = 0; i < existing_args; ++i) { 1.864 + MDefinition *arg = callInfo.getArg(i); 1.865 + current->initSlot(info().argSlot(i), arg); 1.866 + } 1.867 + 1.868 + // Pass Undefined for missing arguments 1.869 + for (size_t i = callInfo.argc(); i < info().nargs(); ++i) { 1.870 + MConstant *arg = MConstant::New(alloc(), UndefinedValue()); 1.871 + current->add(arg); 1.872 + current->initSlot(info().argSlot(i), arg); 1.873 + } 1.874 + 1.875 + // Initialize the scope chain now that args are initialized. 1.876 + if (!initScopeChain(callInfo.fun())) 1.877 + return false; 1.878 + 1.879 + IonSpew(IonSpew_Inlining, "Initializing %u local slots", info().nlocals()); 1.880 + 1.881 + // Initialize local variables. 1.882 + for (uint32_t i = 0; i < info().nlocals(); i++) { 1.883 + MConstant *undef = MConstant::New(alloc(), UndefinedValue()); 1.884 + current->add(undef); 1.885 + current->initSlot(info().localSlot(i), undef); 1.886 + } 1.887 + 1.888 + IonSpew(IonSpew_Inlining, "Inline entry block MResumePoint %p, %u operands", 1.889 + (void *) current->entryResumePoint(), current->entryResumePoint()->numOperands()); 1.890 + 1.891 + // +2 for the scope chain and |this|, maybe another +1 for arguments object slot. 1.892 + JS_ASSERT(current->entryResumePoint()->numOperands() == info().totalSlots()); 1.893 + 1.894 + if (script_->argumentsHasVarBinding()) { 1.895 + lazyArguments_ = MConstant::New(alloc(), MagicValue(JS_OPTIMIZED_ARGUMENTS)); 1.896 + current->add(lazyArguments_); 1.897 + } 1.898 + 1.899 + insertRecompileCheck(); 1.900 + 1.901 + if (!traverseBytecode()) 1.902 + return false; 1.903 + 1.904 + return true; 1.905 +} 1.906 + 1.907 +void 1.908 +IonBuilder::rewriteParameter(uint32_t slotIdx, MDefinition *param, int32_t argIndex) 1.909 +{ 1.910 + JS_ASSERT(param->isParameter() || param->isGetArgumentsObjectArg()); 1.911 + 1.912 + types::TemporaryTypeSet *types = param->resultTypeSet(); 1.913 + MDefinition *actual = ensureDefiniteType(param, types->getKnownMIRType()); 1.914 + if (actual == param) 1.915 + return; 1.916 + 1.917 + // Careful! We leave the original MParameter in the entry resume point. The 1.918 + // arguments still need to be checked unless proven otherwise at the call 1.919 + // site, and these checks can bailout. We can end up: 1.920 + // v0 = Parameter(0) 1.921 + // v1 = Unbox(v0, INT32) 1.922 + // -- ResumePoint(v0) 1.923 + // 1.924 + // As usual, it would be invalid for v1 to be captured in the initial 1.925 + // resume point, rather than v0. 1.926 + current->rewriteSlot(slotIdx, actual); 1.927 +} 1.928 + 1.929 +// Apply Type Inference information to parameters early on, unboxing them if 1.930 +// they have a definitive type. The actual guards will be emitted by the code 1.931 +// generator, explicitly, as part of the function prologue. 1.932 +void 1.933 +IonBuilder::rewriteParameters() 1.934 +{ 1.935 + JS_ASSERT(info().scopeChainSlot() == 0); 1.936 + 1.937 + if (!info().funMaybeLazy()) 1.938 + return; 1.939 + 1.940 + for (uint32_t i = info().startArgSlot(); i < info().endArgSlot(); i++) { 1.941 + MDefinition *param = current->getSlot(i); 1.942 + rewriteParameter(i, param, param->toParameter()->index()); 1.943 + } 1.944 +} 1.945 + 1.946 +void 1.947 +IonBuilder::initParameters() 1.948 +{ 1.949 + if (!info().funMaybeLazy()) 1.950 + return; 1.951 + 1.952 + // If we are doing OSR on a frame which initially executed in the 1.953 + // interpreter and didn't accumulate type information, try to use that OSR 1.954 + // frame to determine possible initial types for 'this' and parameters. 1.955 + 1.956 + if (thisTypes->empty() && baselineFrame_) 1.957 + thisTypes->addType(baselineFrame_->thisType, alloc_->lifoAlloc()); 1.958 + 1.959 + MParameter *param = MParameter::New(alloc(), MParameter::THIS_SLOT, thisTypes); 1.960 + current->add(param); 1.961 + current->initSlot(info().thisSlot(), param); 1.962 + 1.963 + for (uint32_t i = 0; i < info().nargs(); i++) { 1.964 + types::TemporaryTypeSet *types = &argTypes[i]; 1.965 + if (types->empty() && baselineFrame_ && 1.966 + !script_->baselineScript()->modifiesArguments()) 1.967 + { 1.968 + types->addType(baselineFrame_->argTypes[i], alloc_->lifoAlloc()); 1.969 + } 1.970 + 1.971 + param = MParameter::New(alloc(), i, types); 1.972 + current->add(param); 1.973 + current->initSlot(info().argSlotUnchecked(i), param); 1.974 + } 1.975 +} 1.976 + 1.977 +bool 1.978 +IonBuilder::initScopeChain(MDefinition *callee) 1.979 +{ 1.980 + MInstruction *scope = nullptr; 1.981 + 1.982 + // If the script doesn't use the scopechain, then it's already initialized 1.983 + // from earlier. However, always make a scope chain when |needsArgsObj| is true 1.984 + // for the script, since arguments object construction requires the scope chain 1.985 + // to be passed in. 1.986 + if (!info().needsArgsObj() && !analysis().usesScopeChain()) 1.987 + return true; 1.988 + 1.989 + // The scope chain is only tracked in scripts that have NAME opcodes which 1.990 + // will try to access the scope. For other scripts, the scope instructions 1.991 + // will be held live by resume points and code will still be generated for 1.992 + // them, so just use a constant undefined value. 1.993 + if (!script()->compileAndGo()) 1.994 + return abort("non-CNG global scripts are not supported"); 1.995 + 1.996 + if (JSFunction *fun = info().funMaybeLazy()) { 1.997 + if (!callee) { 1.998 + MCallee *calleeIns = MCallee::New(alloc()); 1.999 + current->add(calleeIns); 1.1000 + callee = calleeIns; 1.1001 + } 1.1002 + scope = MFunctionEnvironment::New(alloc(), callee); 1.1003 + current->add(scope); 1.1004 + 1.1005 + // This reproduce what is done in CallObject::createForFunction. Skip 1.1006 + // this for analyses, as the script might not have a baseline script 1.1007 + // with template objects yet. 1.1008 + if (fun->isHeavyweight() && !info().executionModeIsAnalysis()) { 1.1009 + if (fun->isNamedLambda()) { 1.1010 + scope = createDeclEnvObject(callee, scope); 1.1011 + if (!scope) 1.1012 + return false; 1.1013 + } 1.1014 + 1.1015 + scope = createCallObject(callee, scope); 1.1016 + if (!scope) 1.1017 + return false; 1.1018 + } 1.1019 + } else { 1.1020 + scope = constant(ObjectValue(script()->global())); 1.1021 + } 1.1022 + 1.1023 + current->setScopeChain(scope); 1.1024 + return true; 1.1025 +} 1.1026 + 1.1027 +bool 1.1028 +IonBuilder::initArgumentsObject() 1.1029 +{ 1.1030 + IonSpew(IonSpew_MIR, "%s:%d - Emitting code to initialize arguments object! block=%p", 1.1031 + script()->filename(), script()->lineno(), current); 1.1032 + JS_ASSERT(info().needsArgsObj()); 1.1033 + MCreateArgumentsObject *argsObj = MCreateArgumentsObject::New(alloc(), current->scopeChain()); 1.1034 + current->add(argsObj); 1.1035 + current->setArgumentsObject(argsObj); 1.1036 + return true; 1.1037 +} 1.1038 + 1.1039 +bool 1.1040 +IonBuilder::addOsrValueTypeBarrier(uint32_t slot, MInstruction **def_, 1.1041 + MIRType type, types::TemporaryTypeSet *typeSet) 1.1042 +{ 1.1043 + MInstruction *&def = *def_; 1.1044 + MBasicBlock *osrBlock = def->block(); 1.1045 + 1.1046 + // Clear bogus type information added in newOsrPreheader(). 1.1047 + def->setResultType(MIRType_Value); 1.1048 + def->setResultTypeSet(nullptr); 1.1049 + 1.1050 + if (typeSet && !typeSet->unknown()) { 1.1051 + MInstruction *barrier = MTypeBarrier::New(alloc(), def, typeSet); 1.1052 + osrBlock->insertBefore(osrBlock->lastIns(), barrier); 1.1053 + osrBlock->rewriteSlot(slot, barrier); 1.1054 + def = barrier; 1.1055 + } else if (type == MIRType_Null || 1.1056 + type == MIRType_Undefined || 1.1057 + type == MIRType_MagicOptimizedArguments) 1.1058 + { 1.1059 + // No unbox instruction will be added below, so check the type by 1.1060 + // adding a type barrier for a singleton type set. 1.1061 + types::Type ntype = types::Type::PrimitiveType(ValueTypeFromMIRType(type)); 1.1062 + typeSet = alloc_->lifoAlloc()->new_<types::TemporaryTypeSet>(ntype); 1.1063 + if (!typeSet) 1.1064 + return false; 1.1065 + MInstruction *barrier = MTypeBarrier::New(alloc(), def, typeSet); 1.1066 + osrBlock->insertBefore(osrBlock->lastIns(), barrier); 1.1067 + osrBlock->rewriteSlot(slot, barrier); 1.1068 + def = barrier; 1.1069 + } 1.1070 + 1.1071 + switch (type) { 1.1072 + case MIRType_Boolean: 1.1073 + case MIRType_Int32: 1.1074 + case MIRType_Double: 1.1075 + case MIRType_String: 1.1076 + case MIRType_Object: 1.1077 + if (type != def->type()) { 1.1078 + MUnbox *unbox = MUnbox::New(alloc(), def, type, MUnbox::Fallible); 1.1079 + osrBlock->insertBefore(osrBlock->lastIns(), unbox); 1.1080 + osrBlock->rewriteSlot(slot, unbox); 1.1081 + def = unbox; 1.1082 + } 1.1083 + break; 1.1084 + 1.1085 + case MIRType_Null: 1.1086 + { 1.1087 + MConstant *c = MConstant::New(alloc(), NullValue()); 1.1088 + osrBlock->insertBefore(osrBlock->lastIns(), c); 1.1089 + osrBlock->rewriteSlot(slot, c); 1.1090 + def = c; 1.1091 + break; 1.1092 + } 1.1093 + 1.1094 + case MIRType_Undefined: 1.1095 + { 1.1096 + MConstant *c = MConstant::New(alloc(), UndefinedValue()); 1.1097 + osrBlock->insertBefore(osrBlock->lastIns(), c); 1.1098 + osrBlock->rewriteSlot(slot, c); 1.1099 + def = c; 1.1100 + break; 1.1101 + } 1.1102 + 1.1103 + case MIRType_MagicOptimizedArguments: 1.1104 + JS_ASSERT(lazyArguments_); 1.1105 + osrBlock->rewriteSlot(slot, lazyArguments_); 1.1106 + def = lazyArguments_; 1.1107 + break; 1.1108 + 1.1109 + default: 1.1110 + break; 1.1111 + } 1.1112 + 1.1113 + JS_ASSERT(def == osrBlock->getSlot(slot)); 1.1114 + return true; 1.1115 +} 1.1116 + 1.1117 +bool 1.1118 +IonBuilder::maybeAddOsrTypeBarriers() 1.1119 +{ 1.1120 + if (!info().osrPc()) 1.1121 + return true; 1.1122 + 1.1123 + // The loop has successfully been processed, and the loop header phis 1.1124 + // have their final type. Add unboxes and type barriers in the OSR 1.1125 + // block to check that the values have the appropriate type, and update 1.1126 + // the types in the preheader. 1.1127 + 1.1128 + MBasicBlock *osrBlock = graph().osrBlock(); 1.1129 + if (!osrBlock) { 1.1130 + // Because IonBuilder does not compile catch blocks, it's possible to 1.1131 + // end up without an OSR block if the OSR pc is only reachable via a 1.1132 + // break-statement inside the catch block. For instance: 1.1133 + // 1.1134 + // for (;;) { 1.1135 + // try { 1.1136 + // throw 3; 1.1137 + // } catch(e) { 1.1138 + // break; 1.1139 + // } 1.1140 + // } 1.1141 + // while (..) { } // <= OSR here, only reachable via catch block. 1.1142 + // 1.1143 + // For now we just abort in this case. 1.1144 + JS_ASSERT(graph().hasTryBlock()); 1.1145 + return abort("OSR block only reachable through catch block"); 1.1146 + } 1.1147 + 1.1148 + MBasicBlock *preheader = osrBlock->getSuccessor(0); 1.1149 + MBasicBlock *header = preheader->getSuccessor(0); 1.1150 + static const size_t OSR_PHI_POSITION = 1; 1.1151 + JS_ASSERT(preheader->getPredecessor(OSR_PHI_POSITION) == osrBlock); 1.1152 + 1.1153 + MPhiIterator headerPhi = header->phisBegin(); 1.1154 + while (headerPhi != header->phisEnd() && headerPhi->slot() < info().startArgSlot()) 1.1155 + headerPhi++; 1.1156 + 1.1157 + for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++, headerPhi++) { 1.1158 + // Aliased slots are never accessed, since they need to go through 1.1159 + // the callobject. The typebarriers are added there and can be 1.1160 + // discarded here. 1.1161 + if (info().isSlotAliasedAtOsr(i)) 1.1162 + continue; 1.1163 + 1.1164 + MInstruction *def = osrBlock->getSlot(i)->toInstruction(); 1.1165 + 1.1166 + JS_ASSERT(headerPhi->slot() == i); 1.1167 + MPhi *preheaderPhi = preheader->getSlot(i)->toPhi(); 1.1168 + 1.1169 + MIRType type = headerPhi->type(); 1.1170 + types::TemporaryTypeSet *typeSet = headerPhi->resultTypeSet(); 1.1171 + 1.1172 + if (!addOsrValueTypeBarrier(i, &def, type, typeSet)) 1.1173 + return false; 1.1174 + 1.1175 + preheaderPhi->replaceOperand(OSR_PHI_POSITION, def); 1.1176 + preheaderPhi->setResultType(type); 1.1177 + preheaderPhi->setResultTypeSet(typeSet); 1.1178 + } 1.1179 + 1.1180 + return true; 1.1181 +} 1.1182 + 1.1183 +// We try to build a control-flow graph in the order that it would be built as 1.1184 +// if traversing the AST. This leads to a nice ordering and lets us build SSA 1.1185 +// in one pass, since the bytecode is structured. 1.1186 +// 1.1187 +// We traverse the bytecode iteratively, maintaining a current basic block. 1.1188 +// Each basic block has a mapping of local slots to instructions, as well as a 1.1189 +// stack depth. As we encounter instructions we mutate this mapping in the 1.1190 +// current block. 1.1191 +// 1.1192 +// Things get interesting when we encounter a control structure. This can be 1.1193 +// either an IFEQ, downward GOTO, or a decompiler hint stashed away in source 1.1194 +// notes. Once we encounter such an opcode, we recover the structure of the 1.1195 +// control flow (its branches and bounds), and push it on a stack. 1.1196 +// 1.1197 +// As we continue traversing the bytecode, we look for points that would 1.1198 +// terminate the topmost control flow path pushed on the stack. These are: 1.1199 +// (1) The bounds of the current structure (end of a loop or join/edge of a 1.1200 +// branch). 1.1201 +// (2) A "return", "break", or "continue" statement. 1.1202 +// 1.1203 +// For (1), we expect that there is a current block in the progress of being 1.1204 +// built, and we complete the necessary edges in the CFG. For (2), we expect 1.1205 +// that there is no active block. 1.1206 +// 1.1207 +// For normal diamond join points, we construct Phi nodes as we add 1.1208 +// predecessors. For loops, care must be taken to propagate Phi nodes back 1.1209 +// through uses in the loop body. 1.1210 +bool 1.1211 +IonBuilder::traverseBytecode() 1.1212 +{ 1.1213 + for (;;) { 1.1214 + JS_ASSERT(pc < info().limitPC()); 1.1215 + 1.1216 + for (;;) { 1.1217 + if (!alloc().ensureBallast()) 1.1218 + return false; 1.1219 + 1.1220 + // Check if we've hit an expected join point or edge in the bytecode. 1.1221 + // Leaving one control structure could place us at the edge of another, 1.1222 + // thus |while| instead of |if| so we don't skip any opcodes. 1.1223 + if (!cfgStack_.empty() && cfgStack_.back().stopAt == pc) { 1.1224 + ControlStatus status = processCfgStack(); 1.1225 + if (status == ControlStatus_Error) 1.1226 + return false; 1.1227 + if (status == ControlStatus_Abort) 1.1228 + return abort("Aborted while processing control flow"); 1.1229 + if (!current) 1.1230 + return true; 1.1231 + continue; 1.1232 + } 1.1233 + 1.1234 + // Some opcodes need to be handled early because they affect control 1.1235 + // flow, terminating the current basic block and/or instructing the 1.1236 + // traversal algorithm to continue from a new pc. 1.1237 + // 1.1238 + // (1) If the opcode does not affect control flow, then the opcode 1.1239 + // is inspected and transformed to IR. This is the process_opcode 1.1240 + // label. 1.1241 + // (2) A loop could be detected via a forward GOTO. In this case, 1.1242 + // we don't want to process the GOTO, but the following 1.1243 + // instruction. 1.1244 + // (3) A RETURN, STOP, BREAK, or CONTINUE may require processing the 1.1245 + // CFG stack to terminate open branches. 1.1246 + // 1.1247 + // Similar to above, snooping control flow could land us at another 1.1248 + // control flow point, so we iterate until it's time to inspect a real 1.1249 + // opcode. 1.1250 + ControlStatus status; 1.1251 + if ((status = snoopControlFlow(JSOp(*pc))) == ControlStatus_None) 1.1252 + break; 1.1253 + if (status == ControlStatus_Error) 1.1254 + return false; 1.1255 + if (status == ControlStatus_Abort) 1.1256 + return abort("Aborted while processing control flow"); 1.1257 + if (!current) 1.1258 + return true; 1.1259 + } 1.1260 + 1.1261 +#ifdef DEBUG 1.1262 + // In debug builds, after compiling this op, check that all values 1.1263 + // popped by this opcode either: 1.1264 + // 1.1265 + // (1) Have the ImplicitlyUsed flag set on them. 1.1266 + // (2) Have more uses than before compiling this op (the value is 1.1267 + // used as operand of a new MIR instruction). 1.1268 + // 1.1269 + // This is used to catch problems where IonBuilder pops a value without 1.1270 + // adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it. 1.1271 + Vector<MDefinition *, 4, IonAllocPolicy> popped(alloc()); 1.1272 + Vector<size_t, 4, IonAllocPolicy> poppedUses(alloc()); 1.1273 + unsigned nuses = GetUseCount(script_, script_->pcToOffset(pc)); 1.1274 + 1.1275 + for (unsigned i = 0; i < nuses; i++) { 1.1276 + MDefinition *def = current->peek(-int32_t(i + 1)); 1.1277 + if (!popped.append(def) || !poppedUses.append(def->defUseCount())) 1.1278 + return false; 1.1279 + } 1.1280 +#endif 1.1281 + 1.1282 + // Nothing in inspectOpcode() is allowed to advance the pc. 1.1283 + JSOp op = JSOp(*pc); 1.1284 + if (!inspectOpcode(op)) 1.1285 + return false; 1.1286 + 1.1287 +#ifdef DEBUG 1.1288 + for (size_t i = 0; i < popped.length(); i++) { 1.1289 + switch (op) { 1.1290 + case JSOP_POP: 1.1291 + case JSOP_POPN: 1.1292 + case JSOP_DUPAT: 1.1293 + case JSOP_DUP: 1.1294 + case JSOP_DUP2: 1.1295 + case JSOP_PICK: 1.1296 + case JSOP_SWAP: 1.1297 + case JSOP_SETARG: 1.1298 + case JSOP_SETLOCAL: 1.1299 + case JSOP_SETRVAL: 1.1300 + case JSOP_VOID: 1.1301 + // Don't require SSA uses for values popped by these ops. 1.1302 + break; 1.1303 + 1.1304 + case JSOP_POS: 1.1305 + case JSOP_TOID: 1.1306 + // These ops may leave their input on the stack without setting 1.1307 + // the ImplicitlyUsed flag. If this value will be popped immediately, 1.1308 + // we may replace it with |undefined|, but the difference is 1.1309 + // not observable. 1.1310 + JS_ASSERT(i == 0); 1.1311 + if (current->peek(-1) == popped[0]) 1.1312 + break; 1.1313 + // FALL THROUGH 1.1314 + 1.1315 + default: 1.1316 + JS_ASSERT(popped[i]->isImplicitlyUsed() || 1.1317 + 1.1318 + // MNewDerivedTypedObject instances are 1.1319 + // often dead unless they escape from the 1.1320 + // fn. See IonBuilder::loadTypedObjectData() 1.1321 + // for more details. 1.1322 + popped[i]->isNewDerivedTypedObject() || 1.1323 + 1.1324 + popped[i]->defUseCount() > poppedUses[i]); 1.1325 + break; 1.1326 + } 1.1327 + } 1.1328 +#endif 1.1329 + 1.1330 + pc += js_CodeSpec[op].length; 1.1331 + current->updateTrackedPc(pc); 1.1332 + } 1.1333 + 1.1334 + return true; 1.1335 +} 1.1336 + 1.1337 +IonBuilder::ControlStatus 1.1338 +IonBuilder::snoopControlFlow(JSOp op) 1.1339 +{ 1.1340 + switch (op) { 1.1341 + case JSOP_NOP: 1.1342 + return maybeLoop(op, info().getNote(gsn, pc)); 1.1343 + 1.1344 + case JSOP_POP: 1.1345 + return maybeLoop(op, info().getNote(gsn, pc)); 1.1346 + 1.1347 + case JSOP_RETURN: 1.1348 + case JSOP_RETRVAL: 1.1349 + return processReturn(op); 1.1350 + 1.1351 + case JSOP_THROW: 1.1352 + return processThrow(); 1.1353 + 1.1354 + case JSOP_GOTO: 1.1355 + { 1.1356 + jssrcnote *sn = info().getNote(gsn, pc); 1.1357 + switch (sn ? SN_TYPE(sn) : SRC_NULL) { 1.1358 + case SRC_BREAK: 1.1359 + case SRC_BREAK2LABEL: 1.1360 + return processBreak(op, sn); 1.1361 + 1.1362 + case SRC_CONTINUE: 1.1363 + return processContinue(op); 1.1364 + 1.1365 + case SRC_SWITCHBREAK: 1.1366 + return processSwitchBreak(op); 1.1367 + 1.1368 + case SRC_WHILE: 1.1369 + case SRC_FOR_IN: 1.1370 + case SRC_FOR_OF: 1.1371 + // while (cond) { } 1.1372 + return whileOrForInLoop(sn); 1.1373 + 1.1374 + default: 1.1375 + // Hard assert for now - make an error later. 1.1376 + MOZ_ASSUME_UNREACHABLE("unknown goto case"); 1.1377 + } 1.1378 + break; 1.1379 + } 1.1380 + 1.1381 + case JSOP_TABLESWITCH: 1.1382 + return tableSwitch(op, info().getNote(gsn, pc)); 1.1383 + 1.1384 + case JSOP_IFNE: 1.1385 + // We should never reach an IFNE, it's a stopAt point, which will 1.1386 + // trigger closing the loop. 1.1387 + MOZ_ASSUME_UNREACHABLE("we should never reach an ifne!"); 1.1388 + 1.1389 + default: 1.1390 + break; 1.1391 + } 1.1392 + return ControlStatus_None; 1.1393 +} 1.1394 + 1.1395 +bool 1.1396 +IonBuilder::inspectOpcode(JSOp op) 1.1397 +{ 1.1398 + switch (op) { 1.1399 + case JSOP_NOP: 1.1400 + case JSOP_LINENO: 1.1401 + case JSOP_LOOPENTRY: 1.1402 + return true; 1.1403 + 1.1404 + case JSOP_LABEL: 1.1405 + return jsop_label(); 1.1406 + 1.1407 + case JSOP_UNDEFINED: 1.1408 + return pushConstant(UndefinedValue()); 1.1409 + 1.1410 + case JSOP_IFEQ: 1.1411 + return jsop_ifeq(JSOP_IFEQ); 1.1412 + 1.1413 + case JSOP_TRY: 1.1414 + return jsop_try(); 1.1415 + 1.1416 + case JSOP_CONDSWITCH: 1.1417 + return jsop_condswitch(); 1.1418 + 1.1419 + case JSOP_BITNOT: 1.1420 + return jsop_bitnot(); 1.1421 + 1.1422 + case JSOP_BITAND: 1.1423 + case JSOP_BITOR: 1.1424 + case JSOP_BITXOR: 1.1425 + case JSOP_LSH: 1.1426 + case JSOP_RSH: 1.1427 + case JSOP_URSH: 1.1428 + return jsop_bitop(op); 1.1429 + 1.1430 + case JSOP_ADD: 1.1431 + case JSOP_SUB: 1.1432 + case JSOP_MUL: 1.1433 + case JSOP_DIV: 1.1434 + case JSOP_MOD: 1.1435 + return jsop_binary(op); 1.1436 + 1.1437 + case JSOP_POS: 1.1438 + return jsop_pos(); 1.1439 + 1.1440 + case JSOP_NEG: 1.1441 + return jsop_neg(); 1.1442 + 1.1443 + case JSOP_AND: 1.1444 + case JSOP_OR: 1.1445 + return jsop_andor(op); 1.1446 + 1.1447 + case JSOP_DEFVAR: 1.1448 + case JSOP_DEFCONST: 1.1449 + return jsop_defvar(GET_UINT32_INDEX(pc)); 1.1450 + 1.1451 + case JSOP_DEFFUN: 1.1452 + return jsop_deffun(GET_UINT32_INDEX(pc)); 1.1453 + 1.1454 + case JSOP_EQ: 1.1455 + case JSOP_NE: 1.1456 + case JSOP_STRICTEQ: 1.1457 + case JSOP_STRICTNE: 1.1458 + case JSOP_LT: 1.1459 + case JSOP_LE: 1.1460 + case JSOP_GT: 1.1461 + case JSOP_GE: 1.1462 + return jsop_compare(op); 1.1463 + 1.1464 + case JSOP_DOUBLE: 1.1465 + return pushConstant(info().getConst(pc)); 1.1466 + 1.1467 + case JSOP_STRING: 1.1468 + return pushConstant(StringValue(info().getAtom(pc))); 1.1469 + 1.1470 + case JSOP_ZERO: 1.1471 + return pushConstant(Int32Value(0)); 1.1472 + 1.1473 + case JSOP_ONE: 1.1474 + return pushConstant(Int32Value(1)); 1.1475 + 1.1476 + case JSOP_NULL: 1.1477 + return pushConstant(NullValue()); 1.1478 + 1.1479 + case JSOP_VOID: 1.1480 + current->pop(); 1.1481 + return pushConstant(UndefinedValue()); 1.1482 + 1.1483 + case JSOP_HOLE: 1.1484 + return pushConstant(MagicValue(JS_ELEMENTS_HOLE)); 1.1485 + 1.1486 + case JSOP_FALSE: 1.1487 + return pushConstant(BooleanValue(false)); 1.1488 + 1.1489 + case JSOP_TRUE: 1.1490 + return pushConstant(BooleanValue(true)); 1.1491 + 1.1492 + case JSOP_ARGUMENTS: 1.1493 + return jsop_arguments(); 1.1494 + 1.1495 + case JSOP_RUNONCE: 1.1496 + return jsop_runonce(); 1.1497 + 1.1498 + case JSOP_REST: 1.1499 + return jsop_rest(); 1.1500 + 1.1501 + case JSOP_GETARG: 1.1502 + if (info().argsObjAliasesFormals()) { 1.1503 + MGetArgumentsObjectArg *getArg = MGetArgumentsObjectArg::New(alloc(), 1.1504 + current->argumentsObject(), 1.1505 + GET_ARGNO(pc)); 1.1506 + current->add(getArg); 1.1507 + current->push(getArg); 1.1508 + } else { 1.1509 + current->pushArg(GET_ARGNO(pc)); 1.1510 + } 1.1511 + return true; 1.1512 + 1.1513 + case JSOP_SETARG: 1.1514 + return jsop_setarg(GET_ARGNO(pc)); 1.1515 + 1.1516 + case JSOP_GETLOCAL: 1.1517 + current->pushLocal(GET_LOCALNO(pc)); 1.1518 + return true; 1.1519 + 1.1520 + case JSOP_SETLOCAL: 1.1521 + current->setLocal(GET_LOCALNO(pc)); 1.1522 + return true; 1.1523 + 1.1524 + case JSOP_POP: 1.1525 + current->pop(); 1.1526 + 1.1527 + // POP opcodes frequently appear where values are killed, e.g. after 1.1528 + // SET* opcodes. Place a resume point afterwards to avoid capturing 1.1529 + // the dead value in later snapshots, except in places where that 1.1530 + // resume point is obviously unnecessary. 1.1531 + if (pc[JSOP_POP_LENGTH] == JSOP_POP) 1.1532 + return true; 1.1533 + return maybeInsertResume(); 1.1534 + 1.1535 + case JSOP_POPN: 1.1536 + for (uint32_t i = 0, n = GET_UINT16(pc); i < n; i++) 1.1537 + current->pop(); 1.1538 + return true; 1.1539 + 1.1540 + case JSOP_DUPAT: 1.1541 + current->pushSlot(current->stackDepth() - 1 - GET_UINT24(pc)); 1.1542 + return true; 1.1543 + 1.1544 + case JSOP_NEWINIT: 1.1545 + if (GET_UINT8(pc) == JSProto_Array) 1.1546 + return jsop_newarray(0); 1.1547 + return jsop_newobject(); 1.1548 + 1.1549 + case JSOP_NEWARRAY: 1.1550 + return jsop_newarray(GET_UINT24(pc)); 1.1551 + 1.1552 + case JSOP_NEWOBJECT: 1.1553 + return jsop_newobject(); 1.1554 + 1.1555 + case JSOP_INITELEM: 1.1556 + return jsop_initelem(); 1.1557 + 1.1558 + case JSOP_INITELEM_ARRAY: 1.1559 + return jsop_initelem_array(); 1.1560 + 1.1561 + case JSOP_INITPROP: 1.1562 + { 1.1563 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1564 + return jsop_initprop(name); 1.1565 + } 1.1566 + 1.1567 + case JSOP_MUTATEPROTO: 1.1568 + { 1.1569 + return jsop_mutateproto(); 1.1570 + } 1.1571 + 1.1572 + case JSOP_INITPROP_GETTER: 1.1573 + case JSOP_INITPROP_SETTER: { 1.1574 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1575 + return jsop_initprop_getter_setter(name); 1.1576 + } 1.1577 + 1.1578 + case JSOP_INITELEM_GETTER: 1.1579 + case JSOP_INITELEM_SETTER: 1.1580 + return jsop_initelem_getter_setter(); 1.1581 + 1.1582 + case JSOP_ENDINIT: 1.1583 + return true; 1.1584 + 1.1585 + case JSOP_FUNCALL: 1.1586 + return jsop_funcall(GET_ARGC(pc)); 1.1587 + 1.1588 + case JSOP_FUNAPPLY: 1.1589 + return jsop_funapply(GET_ARGC(pc)); 1.1590 + 1.1591 + case JSOP_CALL: 1.1592 + case JSOP_NEW: 1.1593 + return jsop_call(GET_ARGC(pc), (JSOp)*pc == JSOP_NEW); 1.1594 + 1.1595 + case JSOP_EVAL: 1.1596 + return jsop_eval(GET_ARGC(pc)); 1.1597 + 1.1598 + case JSOP_INT8: 1.1599 + return pushConstant(Int32Value(GET_INT8(pc))); 1.1600 + 1.1601 + case JSOP_UINT16: 1.1602 + return pushConstant(Int32Value(GET_UINT16(pc))); 1.1603 + 1.1604 + case JSOP_GETGNAME: 1.1605 + { 1.1606 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1607 + return jsop_getgname(name); 1.1608 + } 1.1609 + 1.1610 + case JSOP_BINDGNAME: 1.1611 + return pushConstant(ObjectValue(script()->global())); 1.1612 + 1.1613 + case JSOP_SETGNAME: 1.1614 + { 1.1615 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1616 + JSObject *obj = &script()->global(); 1.1617 + return setStaticName(obj, name); 1.1618 + } 1.1619 + 1.1620 + case JSOP_NAME: 1.1621 + { 1.1622 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1623 + return jsop_getname(name); 1.1624 + } 1.1625 + 1.1626 + case JSOP_GETINTRINSIC: 1.1627 + { 1.1628 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1629 + return jsop_intrinsic(name); 1.1630 + } 1.1631 + 1.1632 + case JSOP_BINDNAME: 1.1633 + return jsop_bindname(info().getName(pc)); 1.1634 + 1.1635 + case JSOP_DUP: 1.1636 + current->pushSlot(current->stackDepth() - 1); 1.1637 + return true; 1.1638 + 1.1639 + case JSOP_DUP2: 1.1640 + return jsop_dup2(); 1.1641 + 1.1642 + case JSOP_SWAP: 1.1643 + current->swapAt(-1); 1.1644 + return true; 1.1645 + 1.1646 + case JSOP_PICK: 1.1647 + current->pick(-GET_INT8(pc)); 1.1648 + return true; 1.1649 + 1.1650 + case JSOP_GETALIASEDVAR: 1.1651 + return jsop_getaliasedvar(ScopeCoordinate(pc)); 1.1652 + 1.1653 + case JSOP_SETALIASEDVAR: 1.1654 + return jsop_setaliasedvar(ScopeCoordinate(pc)); 1.1655 + 1.1656 + case JSOP_UINT24: 1.1657 + return pushConstant(Int32Value(GET_UINT24(pc))); 1.1658 + 1.1659 + case JSOP_INT32: 1.1660 + return pushConstant(Int32Value(GET_INT32(pc))); 1.1661 + 1.1662 + case JSOP_LOOPHEAD: 1.1663 + // JSOP_LOOPHEAD is handled when processing the loop header. 1.1664 + MOZ_ASSUME_UNREACHABLE("JSOP_LOOPHEAD outside loop"); 1.1665 + 1.1666 + case JSOP_GETELEM: 1.1667 + case JSOP_CALLELEM: 1.1668 + return jsop_getelem(); 1.1669 + 1.1670 + case JSOP_SETELEM: 1.1671 + return jsop_setelem(); 1.1672 + 1.1673 + case JSOP_LENGTH: 1.1674 + return jsop_length(); 1.1675 + 1.1676 + case JSOP_NOT: 1.1677 + return jsop_not(); 1.1678 + 1.1679 + case JSOP_THIS: 1.1680 + return jsop_this(); 1.1681 + 1.1682 + case JSOP_CALLEE: { 1.1683 + MDefinition *callee = getCallee(); 1.1684 + current->push(callee); 1.1685 + return true; 1.1686 + } 1.1687 + 1.1688 + case JSOP_GETPROP: 1.1689 + case JSOP_CALLPROP: 1.1690 + { 1.1691 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1692 + return jsop_getprop(name); 1.1693 + } 1.1694 + 1.1695 + case JSOP_SETPROP: 1.1696 + case JSOP_SETNAME: 1.1697 + { 1.1698 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1699 + return jsop_setprop(name); 1.1700 + } 1.1701 + 1.1702 + case JSOP_DELPROP: 1.1703 + { 1.1704 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.1705 + return jsop_delprop(name); 1.1706 + } 1.1707 + 1.1708 + case JSOP_DELELEM: 1.1709 + return jsop_delelem(); 1.1710 + 1.1711 + case JSOP_REGEXP: 1.1712 + return jsop_regexp(info().getRegExp(pc)); 1.1713 + 1.1714 + case JSOP_OBJECT: 1.1715 + return jsop_object(info().getObject(pc)); 1.1716 + 1.1717 + case JSOP_TYPEOF: 1.1718 + case JSOP_TYPEOFEXPR: 1.1719 + return jsop_typeof(); 1.1720 + 1.1721 + case JSOP_TOID: 1.1722 + return jsop_toid(); 1.1723 + 1.1724 + case JSOP_LAMBDA: 1.1725 + return jsop_lambda(info().getFunction(pc)); 1.1726 + 1.1727 + case JSOP_LAMBDA_ARROW: 1.1728 + return jsop_lambda_arrow(info().getFunction(pc)); 1.1729 + 1.1730 + case JSOP_ITER: 1.1731 + return jsop_iter(GET_INT8(pc)); 1.1732 + 1.1733 + case JSOP_ITERNEXT: 1.1734 + return jsop_iternext(); 1.1735 + 1.1736 + case JSOP_MOREITER: 1.1737 + return jsop_itermore(); 1.1738 + 1.1739 + case JSOP_ENDITER: 1.1740 + return jsop_iterend(); 1.1741 + 1.1742 + case JSOP_IN: 1.1743 + return jsop_in(); 1.1744 + 1.1745 + case JSOP_SETRVAL: 1.1746 + JS_ASSERT(!script()->noScriptRval()); 1.1747 + current->setSlot(info().returnValueSlot(), current->pop()); 1.1748 + return true; 1.1749 + 1.1750 + case JSOP_INSTANCEOF: 1.1751 + return jsop_instanceof(); 1.1752 + 1.1753 + case JSOP_DEBUGLEAVEBLOCK: 1.1754 + return true; 1.1755 + 1.1756 + default: 1.1757 +#ifdef DEBUG 1.1758 + return abort("Unsupported opcode: %s (line %d)", js_CodeName[op], info().lineno(pc)); 1.1759 +#else 1.1760 + return abort("Unsupported opcode: %d (line %d)", op, info().lineno(pc)); 1.1761 +#endif 1.1762 + } 1.1763 +} 1.1764 + 1.1765 +// Given that the current control flow structure has ended forcefully, 1.1766 +// via a return, break, or continue (rather than joining), propagate the 1.1767 +// termination up. For example, a return nested 5 loops deep may terminate 1.1768 +// every outer loop at once, if there are no intervening conditionals: 1.1769 +// 1.1770 +// for (...) { 1.1771 +// for (...) { 1.1772 +// return x; 1.1773 +// } 1.1774 +// } 1.1775 +// 1.1776 +// If |current| is nullptr when this function returns, then there is no more 1.1777 +// control flow to be processed. 1.1778 +IonBuilder::ControlStatus 1.1779 +IonBuilder::processControlEnd() 1.1780 +{ 1.1781 + JS_ASSERT(!current); 1.1782 + 1.1783 + if (cfgStack_.empty()) { 1.1784 + // If there is no more control flow to process, then this is the 1.1785 + // last return in the function. 1.1786 + return ControlStatus_Ended; 1.1787 + } 1.1788 + 1.1789 + return processCfgStack(); 1.1790 +} 1.1791 + 1.1792 +// Processes the top of the CFG stack. This is used from two places: 1.1793 +// (1) processControlEnd(), whereby a break, continue, or return may interrupt 1.1794 +// an in-progress CFG structure before reaching its actual termination 1.1795 +// point in the bytecode. 1.1796 +// (2) traverseBytecode(), whereby we reach the last instruction in a CFG 1.1797 +// structure. 1.1798 +IonBuilder::ControlStatus 1.1799 +IonBuilder::processCfgStack() 1.1800 +{ 1.1801 + ControlStatus status = processCfgEntry(cfgStack_.back()); 1.1802 + 1.1803 + // If this terminated a CFG structure, act like processControlEnd() and 1.1804 + // keep propagating upward. 1.1805 + while (status == ControlStatus_Ended) { 1.1806 + popCfgStack(); 1.1807 + if (cfgStack_.empty()) 1.1808 + return status; 1.1809 + status = processCfgEntry(cfgStack_.back()); 1.1810 + } 1.1811 + 1.1812 + // If some join took place, the current structure is finished. 1.1813 + if (status == ControlStatus_Joined) 1.1814 + popCfgStack(); 1.1815 + 1.1816 + return status; 1.1817 +} 1.1818 + 1.1819 +IonBuilder::ControlStatus 1.1820 +IonBuilder::processCfgEntry(CFGState &state) 1.1821 +{ 1.1822 + switch (state.state) { 1.1823 + case CFGState::IF_TRUE: 1.1824 + case CFGState::IF_TRUE_EMPTY_ELSE: 1.1825 + return processIfEnd(state); 1.1826 + 1.1827 + case CFGState::IF_ELSE_TRUE: 1.1828 + return processIfElseTrueEnd(state); 1.1829 + 1.1830 + case CFGState::IF_ELSE_FALSE: 1.1831 + return processIfElseFalseEnd(state); 1.1832 + 1.1833 + case CFGState::DO_WHILE_LOOP_BODY: 1.1834 + return processDoWhileBodyEnd(state); 1.1835 + 1.1836 + case CFGState::DO_WHILE_LOOP_COND: 1.1837 + return processDoWhileCondEnd(state); 1.1838 + 1.1839 + case CFGState::WHILE_LOOP_COND: 1.1840 + return processWhileCondEnd(state); 1.1841 + 1.1842 + case CFGState::WHILE_LOOP_BODY: 1.1843 + return processWhileBodyEnd(state); 1.1844 + 1.1845 + case CFGState::FOR_LOOP_COND: 1.1846 + return processForCondEnd(state); 1.1847 + 1.1848 + case CFGState::FOR_LOOP_BODY: 1.1849 + return processForBodyEnd(state); 1.1850 + 1.1851 + case CFGState::FOR_LOOP_UPDATE: 1.1852 + return processForUpdateEnd(state); 1.1853 + 1.1854 + case CFGState::TABLE_SWITCH: 1.1855 + return processNextTableSwitchCase(state); 1.1856 + 1.1857 + case CFGState::COND_SWITCH_CASE: 1.1858 + return processCondSwitchCase(state); 1.1859 + 1.1860 + case CFGState::COND_SWITCH_BODY: 1.1861 + return processCondSwitchBody(state); 1.1862 + 1.1863 + case CFGState::AND_OR: 1.1864 + return processAndOrEnd(state); 1.1865 + 1.1866 + case CFGState::LABEL: 1.1867 + return processLabelEnd(state); 1.1868 + 1.1869 + case CFGState::TRY: 1.1870 + return processTryEnd(state); 1.1871 + 1.1872 + default: 1.1873 + MOZ_ASSUME_UNREACHABLE("unknown cfgstate"); 1.1874 + } 1.1875 +} 1.1876 + 1.1877 +IonBuilder::ControlStatus 1.1878 +IonBuilder::processIfEnd(CFGState &state) 1.1879 +{ 1.1880 + if (current) { 1.1881 + // Here, the false block is the join point. Create an edge from the 1.1882 + // current block to the false block. Note that a RETURN opcode 1.1883 + // could have already ended the block. 1.1884 + current->end(MGoto::New(alloc(), state.branch.ifFalse)); 1.1885 + 1.1886 + if (!state.branch.ifFalse->addPredecessor(alloc(), current)) 1.1887 + return ControlStatus_Error; 1.1888 + } 1.1889 + 1.1890 + if (!setCurrentAndSpecializePhis(state.branch.ifFalse)) 1.1891 + return ControlStatus_Error; 1.1892 + graph().moveBlockToEnd(current); 1.1893 + pc = current->pc(); 1.1894 + return ControlStatus_Joined; 1.1895 +} 1.1896 + 1.1897 +IonBuilder::ControlStatus 1.1898 +IonBuilder::processIfElseTrueEnd(CFGState &state) 1.1899 +{ 1.1900 + // We've reached the end of the true branch of an if-else. Don't 1.1901 + // create an edge yet, just transition to parsing the false branch. 1.1902 + state.state = CFGState::IF_ELSE_FALSE; 1.1903 + state.branch.ifTrue = current; 1.1904 + state.stopAt = state.branch.falseEnd; 1.1905 + pc = state.branch.ifFalse->pc(); 1.1906 + if (!setCurrentAndSpecializePhis(state.branch.ifFalse)) 1.1907 + return ControlStatus_Error; 1.1908 + graph().moveBlockToEnd(current); 1.1909 + 1.1910 + if (state.branch.test) 1.1911 + filterTypesAtTest(state.branch.test); 1.1912 + 1.1913 + return ControlStatus_Jumped; 1.1914 +} 1.1915 + 1.1916 +IonBuilder::ControlStatus 1.1917 +IonBuilder::processIfElseFalseEnd(CFGState &state) 1.1918 +{ 1.1919 + // Update the state to have the latest block from the false path. 1.1920 + state.branch.ifFalse = current; 1.1921 + 1.1922 + // To create the join node, we need an incoming edge that has not been 1.1923 + // terminated yet. 1.1924 + MBasicBlock *pred = state.branch.ifTrue 1.1925 + ? state.branch.ifTrue 1.1926 + : state.branch.ifFalse; 1.1927 + MBasicBlock *other = (pred == state.branch.ifTrue) ? state.branch.ifFalse : state.branch.ifTrue; 1.1928 + 1.1929 + if (!pred) 1.1930 + return ControlStatus_Ended; 1.1931 + 1.1932 + // Create a new block to represent the join. 1.1933 + MBasicBlock *join = newBlock(pred, state.branch.falseEnd); 1.1934 + if (!join) 1.1935 + return ControlStatus_Error; 1.1936 + 1.1937 + // Create edges from the true and false blocks as needed. 1.1938 + pred->end(MGoto::New(alloc(), join)); 1.1939 + 1.1940 + if (other) { 1.1941 + other->end(MGoto::New(alloc(), join)); 1.1942 + if (!join->addPredecessor(alloc(), other)) 1.1943 + return ControlStatus_Error; 1.1944 + } 1.1945 + 1.1946 + // Ignore unreachable remainder of false block if existent. 1.1947 + if (!setCurrentAndSpecializePhis(join)) 1.1948 + return ControlStatus_Error; 1.1949 + pc = current->pc(); 1.1950 + return ControlStatus_Joined; 1.1951 +} 1.1952 + 1.1953 +IonBuilder::ControlStatus 1.1954 +IonBuilder::processBrokenLoop(CFGState &state) 1.1955 +{ 1.1956 + JS_ASSERT(!current); 1.1957 + 1.1958 + JS_ASSERT(loopDepth_); 1.1959 + loopDepth_--; 1.1960 + 1.1961 + // A broken loop is not a real loop (it has no header or backedge), so 1.1962 + // reset the loop depth. 1.1963 + for (MBasicBlockIterator i(graph().begin(state.loop.entry)); i != graph().end(); i++) { 1.1964 + if (i->loopDepth() > loopDepth_) 1.1965 + i->setLoopDepth(i->loopDepth() - 1); 1.1966 + } 1.1967 + 1.1968 + // If the loop started with a condition (while/for) then even if the 1.1969 + // structure never actually loops, the condition itself can still fail and 1.1970 + // thus we must resume at the successor, if one exists. 1.1971 + if (!setCurrentAndSpecializePhis(state.loop.successor)) 1.1972 + return ControlStatus_Error; 1.1973 + if (current) { 1.1974 + JS_ASSERT(current->loopDepth() == loopDepth_); 1.1975 + graph().moveBlockToEnd(current); 1.1976 + } 1.1977 + 1.1978 + // Join the breaks together and continue parsing. 1.1979 + if (state.loop.breaks) { 1.1980 + MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc); 1.1981 + if (!block) 1.1982 + return ControlStatus_Error; 1.1983 + 1.1984 + if (current) { 1.1985 + current->end(MGoto::New(alloc(), block)); 1.1986 + if (!block->addPredecessor(alloc(), current)) 1.1987 + return ControlStatus_Error; 1.1988 + } 1.1989 + 1.1990 + if (!setCurrentAndSpecializePhis(block)) 1.1991 + return ControlStatus_Error; 1.1992 + } 1.1993 + 1.1994 + // If the loop is not gated on a condition, and has only returns, we'll 1.1995 + // reach this case. For example: 1.1996 + // do { ... return; } while (); 1.1997 + if (!current) 1.1998 + return ControlStatus_Ended; 1.1999 + 1.2000 + // Otherwise, the loop is gated on a condition and/or has breaks so keep 1.2001 + // parsing at the successor. 1.2002 + pc = current->pc(); 1.2003 + return ControlStatus_Joined; 1.2004 +} 1.2005 + 1.2006 +IonBuilder::ControlStatus 1.2007 +IonBuilder::finishLoop(CFGState &state, MBasicBlock *successor) 1.2008 +{ 1.2009 + JS_ASSERT(current); 1.2010 + 1.2011 + JS_ASSERT(loopDepth_); 1.2012 + loopDepth_--; 1.2013 + JS_ASSERT_IF(successor, successor->loopDepth() == loopDepth_); 1.2014 + 1.2015 + // Compute phis in the loop header and propagate them throughout the loop, 1.2016 + // including the successor. 1.2017 + AbortReason r = state.loop.entry->setBackedge(current); 1.2018 + if (r == AbortReason_Alloc) 1.2019 + return ControlStatus_Error; 1.2020 + if (r == AbortReason_Disable) { 1.2021 + // If there are types for variables on the backedge that were not 1.2022 + // present at the original loop header, then uses of the variables' 1.2023 + // phis may have generated incorrect nodes. The new types have been 1.2024 + // incorporated into the header phis, so remove all blocks for the 1.2025 + // loop body and restart with the new types. 1.2026 + return restartLoop(state); 1.2027 + } 1.2028 + 1.2029 + if (successor) { 1.2030 + graph().moveBlockToEnd(successor); 1.2031 + successor->inheritPhis(state.loop.entry); 1.2032 + } 1.2033 + 1.2034 + if (state.loop.breaks) { 1.2035 + // Propagate phis placed in the header to individual break exit points. 1.2036 + DeferredEdge *edge = state.loop.breaks; 1.2037 + while (edge) { 1.2038 + edge->block->inheritPhis(state.loop.entry); 1.2039 + edge = edge->next; 1.2040 + } 1.2041 + 1.2042 + // Create a catch block to join all break exits. 1.2043 + MBasicBlock *block = createBreakCatchBlock(state.loop.breaks, state.loop.exitpc); 1.2044 + if (!block) 1.2045 + return ControlStatus_Error; 1.2046 + 1.2047 + if (successor) { 1.2048 + // Finally, create an unconditional edge from the successor to the 1.2049 + // catch block. 1.2050 + successor->end(MGoto::New(alloc(), block)); 1.2051 + if (!block->addPredecessor(alloc(), successor)) 1.2052 + return ControlStatus_Error; 1.2053 + } 1.2054 + successor = block; 1.2055 + } 1.2056 + 1.2057 + if (!setCurrentAndSpecializePhis(successor)) 1.2058 + return ControlStatus_Error; 1.2059 + 1.2060 + // An infinite loop (for (;;) { }) will not have a successor. 1.2061 + if (!current) 1.2062 + return ControlStatus_Ended; 1.2063 + 1.2064 + pc = current->pc(); 1.2065 + return ControlStatus_Joined; 1.2066 +} 1.2067 + 1.2068 +IonBuilder::ControlStatus 1.2069 +IonBuilder::restartLoop(CFGState state) 1.2070 +{ 1.2071 + spew("New types at loop header, restarting loop body"); 1.2072 + 1.2073 + if (js_JitOptions.limitScriptSize) { 1.2074 + if (++numLoopRestarts_ >= MAX_LOOP_RESTARTS) 1.2075 + return ControlStatus_Abort; 1.2076 + } 1.2077 + 1.2078 + MBasicBlock *header = state.loop.entry; 1.2079 + 1.2080 + // Remove all blocks in the loop body other than the header, which has phis 1.2081 + // of the appropriate type and incoming edges to preserve. 1.2082 + graph().removeBlocksAfter(header); 1.2083 + 1.2084 + // Remove all instructions from the header itself, and all resume points 1.2085 + // except the entry resume point. 1.2086 + header->discardAllInstructions(); 1.2087 + header->discardAllResumePoints(/* discardEntry = */ false); 1.2088 + header->setStackDepth(header->getPredecessor(0)->stackDepth()); 1.2089 + 1.2090 + popCfgStack(); 1.2091 + 1.2092 + loopDepth_++; 1.2093 + 1.2094 + if (!pushLoop(state.loop.initialState, state.loop.initialStopAt, header, state.loop.osr, 1.2095 + state.loop.loopHead, state.loop.initialPc, 1.2096 + state.loop.bodyStart, state.loop.bodyEnd, 1.2097 + state.loop.exitpc, state.loop.continuepc)) 1.2098 + { 1.2099 + return ControlStatus_Error; 1.2100 + } 1.2101 + 1.2102 + CFGState &nstate = cfgStack_.back(); 1.2103 + 1.2104 + nstate.loop.condpc = state.loop.condpc; 1.2105 + nstate.loop.updatepc = state.loop.updatepc; 1.2106 + nstate.loop.updateEnd = state.loop.updateEnd; 1.2107 + 1.2108 + // Don't specializePhis(), as the header has been visited before and the 1.2109 + // phis have already had their type set. 1.2110 + setCurrent(header); 1.2111 + 1.2112 + if (!jsop_loophead(nstate.loop.loopHead)) 1.2113 + return ControlStatus_Error; 1.2114 + 1.2115 + pc = nstate.loop.initialPc; 1.2116 + return ControlStatus_Jumped; 1.2117 +} 1.2118 + 1.2119 +IonBuilder::ControlStatus 1.2120 +IonBuilder::processDoWhileBodyEnd(CFGState &state) 1.2121 +{ 1.2122 + if (!processDeferredContinues(state)) 1.2123 + return ControlStatus_Error; 1.2124 + 1.2125 + // No current means control flow cannot reach the condition, so this will 1.2126 + // never loop. 1.2127 + if (!current) 1.2128 + return processBrokenLoop(state); 1.2129 + 1.2130 + MBasicBlock *header = newBlock(current, state.loop.updatepc); 1.2131 + if (!header) 1.2132 + return ControlStatus_Error; 1.2133 + current->end(MGoto::New(alloc(), header)); 1.2134 + 1.2135 + state.state = CFGState::DO_WHILE_LOOP_COND; 1.2136 + state.stopAt = state.loop.updateEnd; 1.2137 + pc = state.loop.updatepc; 1.2138 + if (!setCurrentAndSpecializePhis(header)) 1.2139 + return ControlStatus_Error; 1.2140 + return ControlStatus_Jumped; 1.2141 +} 1.2142 + 1.2143 +IonBuilder::ControlStatus 1.2144 +IonBuilder::processDoWhileCondEnd(CFGState &state) 1.2145 +{ 1.2146 + JS_ASSERT(JSOp(*pc) == JSOP_IFNE); 1.2147 + 1.2148 + // We're guaranteed a |current|, it's impossible to break or return from 1.2149 + // inside the conditional expression. 1.2150 + JS_ASSERT(current); 1.2151 + 1.2152 + // Pop the last value, and create the successor block. 1.2153 + MDefinition *vins = current->pop(); 1.2154 + MBasicBlock *successor = newBlock(current, GetNextPc(pc), loopDepth_ - 1); 1.2155 + if (!successor) 1.2156 + return ControlStatus_Error; 1.2157 + 1.2158 + // Test for do {} while(false) and don't create a loop in that case. 1.2159 + if (vins->isConstant()) { 1.2160 + MConstant *cte = vins->toConstant(); 1.2161 + if (cte->value().isBoolean() && !cte->value().toBoolean()) { 1.2162 + current->end(MGoto::New(alloc(), successor)); 1.2163 + current = nullptr; 1.2164 + 1.2165 + state.loop.successor = successor; 1.2166 + return processBrokenLoop(state); 1.2167 + } 1.2168 + } 1.2169 + 1.2170 + // Create the test instruction and end the current block. 1.2171 + MTest *test = MTest::New(alloc(), vins, state.loop.entry, successor); 1.2172 + current->end(test); 1.2173 + return finishLoop(state, successor); 1.2174 +} 1.2175 + 1.2176 +IonBuilder::ControlStatus 1.2177 +IonBuilder::processWhileCondEnd(CFGState &state) 1.2178 +{ 1.2179 + JS_ASSERT(JSOp(*pc) == JSOP_IFNE || JSOp(*pc) == JSOP_IFEQ); 1.2180 + 1.2181 + // Balance the stack past the IFNE. 1.2182 + MDefinition *ins = current->pop(); 1.2183 + 1.2184 + // Create the body and successor blocks. 1.2185 + MBasicBlock *body = newBlock(current, state.loop.bodyStart); 1.2186 + state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1); 1.2187 + if (!body || !state.loop.successor) 1.2188 + return ControlStatus_Error; 1.2189 + 1.2190 + MTest *test; 1.2191 + if (JSOp(*pc) == JSOP_IFNE) 1.2192 + test = MTest::New(alloc(), ins, body, state.loop.successor); 1.2193 + else 1.2194 + test = MTest::New(alloc(), ins, state.loop.successor, body); 1.2195 + current->end(test); 1.2196 + 1.2197 + state.state = CFGState::WHILE_LOOP_BODY; 1.2198 + state.stopAt = state.loop.bodyEnd; 1.2199 + pc = state.loop.bodyStart; 1.2200 + if (!setCurrentAndSpecializePhis(body)) 1.2201 + return ControlStatus_Error; 1.2202 + return ControlStatus_Jumped; 1.2203 +} 1.2204 + 1.2205 +IonBuilder::ControlStatus 1.2206 +IonBuilder::processWhileBodyEnd(CFGState &state) 1.2207 +{ 1.2208 + if (!processDeferredContinues(state)) 1.2209 + return ControlStatus_Error; 1.2210 + 1.2211 + if (!current) 1.2212 + return processBrokenLoop(state); 1.2213 + 1.2214 + current->end(MGoto::New(alloc(), state.loop.entry)); 1.2215 + return finishLoop(state, state.loop.successor); 1.2216 +} 1.2217 + 1.2218 +IonBuilder::ControlStatus 1.2219 +IonBuilder::processForCondEnd(CFGState &state) 1.2220 +{ 1.2221 + JS_ASSERT(JSOp(*pc) == JSOP_IFNE); 1.2222 + 1.2223 + // Balance the stack past the IFNE. 1.2224 + MDefinition *ins = current->pop(); 1.2225 + 1.2226 + // Create the body and successor blocks. 1.2227 + MBasicBlock *body = newBlock(current, state.loop.bodyStart); 1.2228 + state.loop.successor = newBlock(current, state.loop.exitpc, loopDepth_ - 1); 1.2229 + if (!body || !state.loop.successor) 1.2230 + return ControlStatus_Error; 1.2231 + 1.2232 + MTest *test = MTest::New(alloc(), ins, body, state.loop.successor); 1.2233 + current->end(test); 1.2234 + 1.2235 + state.state = CFGState::FOR_LOOP_BODY; 1.2236 + state.stopAt = state.loop.bodyEnd; 1.2237 + pc = state.loop.bodyStart; 1.2238 + if (!setCurrentAndSpecializePhis(body)) 1.2239 + return ControlStatus_Error; 1.2240 + return ControlStatus_Jumped; 1.2241 +} 1.2242 + 1.2243 +IonBuilder::ControlStatus 1.2244 +IonBuilder::processForBodyEnd(CFGState &state) 1.2245 +{ 1.2246 + if (!processDeferredContinues(state)) 1.2247 + return ControlStatus_Error; 1.2248 + 1.2249 + // If there is no updatepc, just go right to processing what would be the 1.2250 + // end of the update clause. Otherwise, |current| might be nullptr; if this is 1.2251 + // the case, the udpate is unreachable anyway. 1.2252 + if (!state.loop.updatepc || !current) 1.2253 + return processForUpdateEnd(state); 1.2254 + 1.2255 + pc = state.loop.updatepc; 1.2256 + 1.2257 + state.state = CFGState::FOR_LOOP_UPDATE; 1.2258 + state.stopAt = state.loop.updateEnd; 1.2259 + return ControlStatus_Jumped; 1.2260 +} 1.2261 + 1.2262 +IonBuilder::ControlStatus 1.2263 +IonBuilder::processForUpdateEnd(CFGState &state) 1.2264 +{ 1.2265 + // If there is no current, we couldn't reach the loop edge and there was no 1.2266 + // update clause. 1.2267 + if (!current) 1.2268 + return processBrokenLoop(state); 1.2269 + 1.2270 + current->end(MGoto::New(alloc(), state.loop.entry)); 1.2271 + return finishLoop(state, state.loop.successor); 1.2272 +} 1.2273 + 1.2274 +IonBuilder::DeferredEdge * 1.2275 +IonBuilder::filterDeadDeferredEdges(DeferredEdge *edge) 1.2276 +{ 1.2277 + DeferredEdge *head = edge, *prev = nullptr; 1.2278 + 1.2279 + while (edge) { 1.2280 + if (edge->block->isDead()) { 1.2281 + if (prev) 1.2282 + prev->next = edge->next; 1.2283 + else 1.2284 + head = edge->next; 1.2285 + } else { 1.2286 + prev = edge; 1.2287 + } 1.2288 + edge = edge->next; 1.2289 + } 1.2290 + 1.2291 + // There must be at least one deferred edge from a block that was not 1.2292 + // deleted; blocks are deleted when restarting processing of a loop, and 1.2293 + // the final version of the loop body will have edges from live blocks. 1.2294 + JS_ASSERT(head); 1.2295 + 1.2296 + return head; 1.2297 +} 1.2298 + 1.2299 +bool 1.2300 +IonBuilder::processDeferredContinues(CFGState &state) 1.2301 +{ 1.2302 + // If there are any continues for this loop, and there is an update block, 1.2303 + // then we need to create a new basic block to house the update. 1.2304 + if (state.loop.continues) { 1.2305 + DeferredEdge *edge = filterDeadDeferredEdges(state.loop.continues); 1.2306 + 1.2307 + MBasicBlock *update = newBlock(edge->block, loops_.back().continuepc); 1.2308 + if (!update) 1.2309 + return false; 1.2310 + 1.2311 + if (current) { 1.2312 + current->end(MGoto::New(alloc(), update)); 1.2313 + if (!update->addPredecessor(alloc(), current)) 1.2314 + return false; 1.2315 + } 1.2316 + 1.2317 + // No need to use addPredecessor for first edge, 1.2318 + // because it is already predecessor. 1.2319 + edge->block->end(MGoto::New(alloc(), update)); 1.2320 + edge = edge->next; 1.2321 + 1.2322 + // Remaining edges 1.2323 + while (edge) { 1.2324 + edge->block->end(MGoto::New(alloc(), update)); 1.2325 + if (!update->addPredecessor(alloc(), edge->block)) 1.2326 + return false; 1.2327 + edge = edge->next; 1.2328 + } 1.2329 + state.loop.continues = nullptr; 1.2330 + 1.2331 + if (!setCurrentAndSpecializePhis(update)) 1.2332 + return ControlStatus_Error; 1.2333 + } 1.2334 + 1.2335 + return true; 1.2336 +} 1.2337 + 1.2338 +MBasicBlock * 1.2339 +IonBuilder::createBreakCatchBlock(DeferredEdge *edge, jsbytecode *pc) 1.2340 +{ 1.2341 + edge = filterDeadDeferredEdges(edge); 1.2342 + 1.2343 + // Create block, using the first break statement as predecessor 1.2344 + MBasicBlock *successor = newBlock(edge->block, pc); 1.2345 + if (!successor) 1.2346 + return nullptr; 1.2347 + 1.2348 + // No need to use addPredecessor for first edge, 1.2349 + // because it is already predecessor. 1.2350 + edge->block->end(MGoto::New(alloc(), successor)); 1.2351 + edge = edge->next; 1.2352 + 1.2353 + // Finish up remaining breaks. 1.2354 + while (edge) { 1.2355 + edge->block->end(MGoto::New(alloc(), successor)); 1.2356 + if (!successor->addPredecessor(alloc(), edge->block)) 1.2357 + return nullptr; 1.2358 + edge = edge->next; 1.2359 + } 1.2360 + 1.2361 + return successor; 1.2362 +} 1.2363 + 1.2364 +IonBuilder::ControlStatus 1.2365 +IonBuilder::processNextTableSwitchCase(CFGState &state) 1.2366 +{ 1.2367 + JS_ASSERT(state.state == CFGState::TABLE_SWITCH); 1.2368 + 1.2369 + state.tableswitch.currentBlock++; 1.2370 + 1.2371 + // Test if there are still unprocessed successors (cases/default) 1.2372 + if (state.tableswitch.currentBlock >= state.tableswitch.ins->numBlocks()) 1.2373 + return processSwitchEnd(state.tableswitch.breaks, state.tableswitch.exitpc); 1.2374 + 1.2375 + // Get the next successor 1.2376 + MBasicBlock *successor = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock); 1.2377 + 1.2378 + // Add current block as predecessor if available. 1.2379 + // This means the previous case didn't have a break statement. 1.2380 + // So flow will continue in this block. 1.2381 + if (current) { 1.2382 + current->end(MGoto::New(alloc(), successor)); 1.2383 + if (!successor->addPredecessor(alloc(), current)) 1.2384 + return ControlStatus_Error; 1.2385 + } 1.2386 + 1.2387 + // Insert successor after the current block, to maintain RPO. 1.2388 + graph().moveBlockToEnd(successor); 1.2389 + 1.2390 + // If this is the last successor the block should stop at the end of the tableswitch 1.2391 + // Else it should stop at the start of the next successor 1.2392 + if (state.tableswitch.currentBlock+1 < state.tableswitch.ins->numBlocks()) 1.2393 + state.stopAt = state.tableswitch.ins->getBlock(state.tableswitch.currentBlock+1)->pc(); 1.2394 + else 1.2395 + state.stopAt = state.tableswitch.exitpc; 1.2396 + 1.2397 + if (!setCurrentAndSpecializePhis(successor)) 1.2398 + return ControlStatus_Error; 1.2399 + pc = current->pc(); 1.2400 + return ControlStatus_Jumped; 1.2401 +} 1.2402 + 1.2403 +IonBuilder::ControlStatus 1.2404 +IonBuilder::processAndOrEnd(CFGState &state) 1.2405 +{ 1.2406 + // We just processed the RHS of an && or || expression. 1.2407 + // Now jump to the join point (the false block). 1.2408 + current->end(MGoto::New(alloc(), state.branch.ifFalse)); 1.2409 + 1.2410 + if (!state.branch.ifFalse->addPredecessor(alloc(), current)) 1.2411 + return ControlStatus_Error; 1.2412 + 1.2413 + if (!setCurrentAndSpecializePhis(state.branch.ifFalse)) 1.2414 + return ControlStatus_Error; 1.2415 + graph().moveBlockToEnd(current); 1.2416 + pc = current->pc(); 1.2417 + return ControlStatus_Joined; 1.2418 +} 1.2419 + 1.2420 +IonBuilder::ControlStatus 1.2421 +IonBuilder::processLabelEnd(CFGState &state) 1.2422 +{ 1.2423 + JS_ASSERT(state.state == CFGState::LABEL); 1.2424 + 1.2425 + // If there are no breaks and no current, controlflow is terminated. 1.2426 + if (!state.label.breaks && !current) 1.2427 + return ControlStatus_Ended; 1.2428 + 1.2429 + // If there are no breaks to this label, there's nothing to do. 1.2430 + if (!state.label.breaks) 1.2431 + return ControlStatus_Joined; 1.2432 + 1.2433 + MBasicBlock *successor = createBreakCatchBlock(state.label.breaks, state.stopAt); 1.2434 + if (!successor) 1.2435 + return ControlStatus_Error; 1.2436 + 1.2437 + if (current) { 1.2438 + current->end(MGoto::New(alloc(), successor)); 1.2439 + if (!successor->addPredecessor(alloc(), current)) 1.2440 + return ControlStatus_Error; 1.2441 + } 1.2442 + 1.2443 + pc = state.stopAt; 1.2444 + if (!setCurrentAndSpecializePhis(successor)) 1.2445 + return ControlStatus_Error; 1.2446 + return ControlStatus_Joined; 1.2447 +} 1.2448 + 1.2449 +IonBuilder::ControlStatus 1.2450 +IonBuilder::processTryEnd(CFGState &state) 1.2451 +{ 1.2452 + JS_ASSERT(state.state == CFGState::TRY); 1.2453 + 1.2454 + if (!state.try_.successor) { 1.2455 + JS_ASSERT(!current); 1.2456 + return ControlStatus_Ended; 1.2457 + } 1.2458 + 1.2459 + if (current) { 1.2460 + current->end(MGoto::New(alloc(), state.try_.successor)); 1.2461 + 1.2462 + if (!state.try_.successor->addPredecessor(alloc(), current)) 1.2463 + return ControlStatus_Error; 1.2464 + } 1.2465 + 1.2466 + // Start parsing the code after this try-catch statement. 1.2467 + if (!setCurrentAndSpecializePhis(state.try_.successor)) 1.2468 + return ControlStatus_Error; 1.2469 + graph().moveBlockToEnd(current); 1.2470 + pc = current->pc(); 1.2471 + return ControlStatus_Joined; 1.2472 +} 1.2473 + 1.2474 +IonBuilder::ControlStatus 1.2475 +IonBuilder::processBreak(JSOp op, jssrcnote *sn) 1.2476 +{ 1.2477 + JS_ASSERT(op == JSOP_GOTO); 1.2478 + 1.2479 + JS_ASSERT(SN_TYPE(sn) == SRC_BREAK || 1.2480 + SN_TYPE(sn) == SRC_BREAK2LABEL); 1.2481 + 1.2482 + // Find the break target. 1.2483 + jsbytecode *target = pc + GetJumpOffset(pc); 1.2484 + DebugOnly<bool> found = false; 1.2485 + 1.2486 + if (SN_TYPE(sn) == SRC_BREAK2LABEL) { 1.2487 + for (size_t i = labels_.length() - 1; i < labels_.length(); i--) { 1.2488 + CFGState &cfg = cfgStack_[labels_[i].cfgEntry]; 1.2489 + JS_ASSERT(cfg.state == CFGState::LABEL); 1.2490 + if (cfg.stopAt == target) { 1.2491 + cfg.label.breaks = new(alloc()) DeferredEdge(current, cfg.label.breaks); 1.2492 + found = true; 1.2493 + break; 1.2494 + } 1.2495 + } 1.2496 + } else { 1.2497 + for (size_t i = loops_.length() - 1; i < loops_.length(); i--) { 1.2498 + CFGState &cfg = cfgStack_[loops_[i].cfgEntry]; 1.2499 + JS_ASSERT(cfg.isLoop()); 1.2500 + if (cfg.loop.exitpc == target) { 1.2501 + cfg.loop.breaks = new(alloc()) DeferredEdge(current, cfg.loop.breaks); 1.2502 + found = true; 1.2503 + break; 1.2504 + } 1.2505 + } 1.2506 + } 1.2507 + 1.2508 + JS_ASSERT(found); 1.2509 + 1.2510 + setCurrent(nullptr); 1.2511 + pc += js_CodeSpec[op].length; 1.2512 + return processControlEnd(); 1.2513 +} 1.2514 + 1.2515 +static inline jsbytecode * 1.2516 +EffectiveContinue(jsbytecode *pc) 1.2517 +{ 1.2518 + if (JSOp(*pc) == JSOP_GOTO) 1.2519 + return pc + GetJumpOffset(pc); 1.2520 + return pc; 1.2521 +} 1.2522 + 1.2523 +IonBuilder::ControlStatus 1.2524 +IonBuilder::processContinue(JSOp op) 1.2525 +{ 1.2526 + JS_ASSERT(op == JSOP_GOTO); 1.2527 + 1.2528 + // Find the target loop. 1.2529 + CFGState *found = nullptr; 1.2530 + jsbytecode *target = pc + GetJumpOffset(pc); 1.2531 + for (size_t i = loops_.length() - 1; i < loops_.length(); i--) { 1.2532 + if (loops_[i].continuepc == target || 1.2533 + EffectiveContinue(loops_[i].continuepc) == target) 1.2534 + { 1.2535 + found = &cfgStack_[loops_[i].cfgEntry]; 1.2536 + break; 1.2537 + } 1.2538 + } 1.2539 + 1.2540 + // There must always be a valid target loop structure. If not, there's 1.2541 + // probably an off-by-something error in which pc we track. 1.2542 + JS_ASSERT(found); 1.2543 + CFGState &state = *found; 1.2544 + 1.2545 + state.loop.continues = new(alloc()) DeferredEdge(current, state.loop.continues); 1.2546 + 1.2547 + setCurrent(nullptr); 1.2548 + pc += js_CodeSpec[op].length; 1.2549 + return processControlEnd(); 1.2550 +} 1.2551 + 1.2552 +IonBuilder::ControlStatus 1.2553 +IonBuilder::processSwitchBreak(JSOp op) 1.2554 +{ 1.2555 + JS_ASSERT(op == JSOP_GOTO); 1.2556 + 1.2557 + // Find the target switch. 1.2558 + CFGState *found = nullptr; 1.2559 + jsbytecode *target = pc + GetJumpOffset(pc); 1.2560 + for (size_t i = switches_.length() - 1; i < switches_.length(); i--) { 1.2561 + if (switches_[i].continuepc == target) { 1.2562 + found = &cfgStack_[switches_[i].cfgEntry]; 1.2563 + break; 1.2564 + } 1.2565 + } 1.2566 + 1.2567 + // There must always be a valid target loop structure. If not, there's 1.2568 + // probably an off-by-something error in which pc we track. 1.2569 + JS_ASSERT(found); 1.2570 + CFGState &state = *found; 1.2571 + 1.2572 + DeferredEdge **breaks = nullptr; 1.2573 + switch (state.state) { 1.2574 + case CFGState::TABLE_SWITCH: 1.2575 + breaks = &state.tableswitch.breaks; 1.2576 + break; 1.2577 + case CFGState::COND_SWITCH_BODY: 1.2578 + breaks = &state.condswitch.breaks; 1.2579 + break; 1.2580 + default: 1.2581 + MOZ_ASSUME_UNREACHABLE("Unexpected switch state."); 1.2582 + } 1.2583 + 1.2584 + *breaks = new(alloc()) DeferredEdge(current, *breaks); 1.2585 + 1.2586 + setCurrent(nullptr); 1.2587 + pc += js_CodeSpec[op].length; 1.2588 + return processControlEnd(); 1.2589 +} 1.2590 + 1.2591 +IonBuilder::ControlStatus 1.2592 +IonBuilder::processSwitchEnd(DeferredEdge *breaks, jsbytecode *exitpc) 1.2593 +{ 1.2594 + // No break statements, no current. 1.2595 + // This means that control flow is cut-off from this point 1.2596 + // (e.g. all cases have return statements). 1.2597 + if (!breaks && !current) 1.2598 + return ControlStatus_Ended; 1.2599 + 1.2600 + // Create successor block. 1.2601 + // If there are breaks, create block with breaks as predecessor 1.2602 + // Else create a block with current as predecessor 1.2603 + MBasicBlock *successor = nullptr; 1.2604 + if (breaks) 1.2605 + successor = createBreakCatchBlock(breaks, exitpc); 1.2606 + else 1.2607 + successor = newBlock(current, exitpc); 1.2608 + 1.2609 + if (!successor) 1.2610 + return ControlStatus_Ended; 1.2611 + 1.2612 + // If there is current, the current block flows into this one. 1.2613 + // So current is also a predecessor to this block 1.2614 + if (current) { 1.2615 + current->end(MGoto::New(alloc(), successor)); 1.2616 + if (breaks) { 1.2617 + if (!successor->addPredecessor(alloc(), current)) 1.2618 + return ControlStatus_Error; 1.2619 + } 1.2620 + } 1.2621 + 1.2622 + pc = exitpc; 1.2623 + if (!setCurrentAndSpecializePhis(successor)) 1.2624 + return ControlStatus_Error; 1.2625 + return ControlStatus_Joined; 1.2626 +} 1.2627 + 1.2628 +IonBuilder::ControlStatus 1.2629 +IonBuilder::maybeLoop(JSOp op, jssrcnote *sn) 1.2630 +{ 1.2631 + // This function looks at the opcode and source note and tries to 1.2632 + // determine the structure of the loop. For some opcodes, like 1.2633 + // POP/NOP which are not explicitly control flow, this source note is 1.2634 + // optional. For opcodes with control flow, like GOTO, an unrecognized 1.2635 + // or not-present source note is a compilation failure. 1.2636 + switch (op) { 1.2637 + case JSOP_POP: 1.2638 + // for (init; ; update?) ... 1.2639 + if (sn && SN_TYPE(sn) == SRC_FOR) { 1.2640 + current->pop(); 1.2641 + return forLoop(op, sn); 1.2642 + } 1.2643 + break; 1.2644 + 1.2645 + case JSOP_NOP: 1.2646 + if (sn) { 1.2647 + // do { } while (cond) 1.2648 + if (SN_TYPE(sn) == SRC_WHILE) 1.2649 + return doWhileLoop(op, sn); 1.2650 + // Build a mapping such that given a basic block, whose successor 1.2651 + // has a phi 1.2652 + 1.2653 + // for (; ; update?) 1.2654 + if (SN_TYPE(sn) == SRC_FOR) 1.2655 + return forLoop(op, sn); 1.2656 + } 1.2657 + break; 1.2658 + 1.2659 + default: 1.2660 + MOZ_ASSUME_UNREACHABLE("unexpected opcode"); 1.2661 + } 1.2662 + 1.2663 + return ControlStatus_None; 1.2664 +} 1.2665 + 1.2666 +void 1.2667 +IonBuilder::assertValidLoopHeadOp(jsbytecode *pc) 1.2668 +{ 1.2669 +#ifdef DEBUG 1.2670 + JS_ASSERT(JSOp(*pc) == JSOP_LOOPHEAD); 1.2671 + 1.2672 + // Make sure this is the next opcode after the loop header, 1.2673 + // unless the for loop is unconditional. 1.2674 + CFGState &state = cfgStack_.back(); 1.2675 + JS_ASSERT_IF((JSOp)*(state.loop.entry->pc()) == JSOP_GOTO, 1.2676 + GetNextPc(state.loop.entry->pc()) == pc); 1.2677 + 1.2678 + // do-while loops have a source note. 1.2679 + jssrcnote *sn = info().getNote(gsn, pc); 1.2680 + if (sn) { 1.2681 + jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 0); 1.2682 + 1.2683 + jsbytecode *expected_ifne; 1.2684 + switch (state.state) { 1.2685 + case CFGState::DO_WHILE_LOOP_BODY: 1.2686 + expected_ifne = state.loop.updateEnd; 1.2687 + break; 1.2688 + 1.2689 + default: 1.2690 + MOZ_ASSUME_UNREACHABLE("JSOP_LOOPHEAD unexpected source note"); 1.2691 + } 1.2692 + 1.2693 + // Make sure this loop goes to the same ifne as the loop header's 1.2694 + // source notes or GOTO. 1.2695 + JS_ASSERT(ifne == expected_ifne); 1.2696 + } else { 1.2697 + JS_ASSERT(state.state != CFGState::DO_WHILE_LOOP_BODY); 1.2698 + } 1.2699 +#endif 1.2700 +} 1.2701 + 1.2702 +IonBuilder::ControlStatus 1.2703 +IonBuilder::doWhileLoop(JSOp op, jssrcnote *sn) 1.2704 +{ 1.2705 + // do { } while() loops have the following structure: 1.2706 + // NOP ; SRC_WHILE (offset to COND) 1.2707 + // LOOPHEAD ; SRC_WHILE (offset to IFNE) 1.2708 + // LOOPENTRY 1.2709 + // ... ; body 1.2710 + // ... 1.2711 + // COND ; start of condition 1.2712 + // ... 1.2713 + // IFNE -> ; goes to LOOPHEAD 1.2714 + int condition_offset = js_GetSrcNoteOffset(sn, 0); 1.2715 + jsbytecode *conditionpc = pc + condition_offset; 1.2716 + 1.2717 + jssrcnote *sn2 = info().getNote(gsn, pc+1); 1.2718 + int offset = js_GetSrcNoteOffset(sn2, 0); 1.2719 + jsbytecode *ifne = pc + offset + 1; 1.2720 + JS_ASSERT(ifne > pc); 1.2721 + 1.2722 + // Verify that the IFNE goes back to a loophead op. 1.2723 + jsbytecode *loopHead = GetNextPc(pc); 1.2724 + JS_ASSERT(JSOp(*loopHead) == JSOP_LOOPHEAD); 1.2725 + JS_ASSERT(loopHead == ifne + GetJumpOffset(ifne)); 1.2726 + 1.2727 + jsbytecode *loopEntry = GetNextPc(loopHead); 1.2728 + bool canOsr = LoopEntryCanIonOsr(loopEntry); 1.2729 + bool osr = info().hasOsrAt(loopEntry); 1.2730 + 1.2731 + if (osr) { 1.2732 + MBasicBlock *preheader = newOsrPreheader(current, loopEntry); 1.2733 + if (!preheader) 1.2734 + return ControlStatus_Error; 1.2735 + current->end(MGoto::New(alloc(), preheader)); 1.2736 + if (!setCurrentAndSpecializePhis(preheader)) 1.2737 + return ControlStatus_Error; 1.2738 + } 1.2739 + 1.2740 + unsigned stackPhiCount = 0; 1.2741 + MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount); 1.2742 + if (!header) 1.2743 + return ControlStatus_Error; 1.2744 + current->end(MGoto::New(alloc(), header)); 1.2745 + 1.2746 + jsbytecode *loophead = GetNextPc(pc); 1.2747 + jsbytecode *bodyStart = GetNextPc(loophead); 1.2748 + jsbytecode *bodyEnd = conditionpc; 1.2749 + jsbytecode *exitpc = GetNextPc(ifne); 1.2750 + if (!analyzeNewLoopTypes(header, bodyStart, exitpc)) 1.2751 + return ControlStatus_Error; 1.2752 + if (!pushLoop(CFGState::DO_WHILE_LOOP_BODY, conditionpc, header, osr, 1.2753 + loopHead, bodyStart, bodyStart, bodyEnd, exitpc, conditionpc)) 1.2754 + { 1.2755 + return ControlStatus_Error; 1.2756 + } 1.2757 + 1.2758 + CFGState &state = cfgStack_.back(); 1.2759 + state.loop.updatepc = conditionpc; 1.2760 + state.loop.updateEnd = ifne; 1.2761 + 1.2762 + if (!setCurrentAndSpecializePhis(header)) 1.2763 + return ControlStatus_Error; 1.2764 + if (!jsop_loophead(loophead)) 1.2765 + return ControlStatus_Error; 1.2766 + 1.2767 + pc = bodyStart; 1.2768 + return ControlStatus_Jumped; 1.2769 +} 1.2770 + 1.2771 +IonBuilder::ControlStatus 1.2772 +IonBuilder::whileOrForInLoop(jssrcnote *sn) 1.2773 +{ 1.2774 + // while (cond) { } loops have the following structure: 1.2775 + // GOTO cond ; SRC_WHILE (offset to IFNE) 1.2776 + // LOOPHEAD 1.2777 + // ... 1.2778 + // cond: 1.2779 + // LOOPENTRY 1.2780 + // ... 1.2781 + // IFNE ; goes to LOOPHEAD 1.2782 + // for (x in y) { } loops are similar; the cond will be a MOREITER. 1.2783 + JS_ASSERT(SN_TYPE(sn) == SRC_FOR_OF || SN_TYPE(sn) == SRC_FOR_IN || SN_TYPE(sn) == SRC_WHILE); 1.2784 + int ifneOffset = js_GetSrcNoteOffset(sn, 0); 1.2785 + jsbytecode *ifne = pc + ifneOffset; 1.2786 + JS_ASSERT(ifne > pc); 1.2787 + 1.2788 + // Verify that the IFNE goes back to a loophead op. 1.2789 + JS_ASSERT(JSOp(*GetNextPc(pc)) == JSOP_LOOPHEAD); 1.2790 + JS_ASSERT(GetNextPc(pc) == ifne + GetJumpOffset(ifne)); 1.2791 + 1.2792 + jsbytecode *loopEntry = pc + GetJumpOffset(pc); 1.2793 + bool canOsr = LoopEntryCanIonOsr(loopEntry); 1.2794 + bool osr = info().hasOsrAt(loopEntry); 1.2795 + 1.2796 + if (osr) { 1.2797 + MBasicBlock *preheader = newOsrPreheader(current, loopEntry); 1.2798 + if (!preheader) 1.2799 + return ControlStatus_Error; 1.2800 + current->end(MGoto::New(alloc(), preheader)); 1.2801 + if (!setCurrentAndSpecializePhis(preheader)) 1.2802 + return ControlStatus_Error; 1.2803 + } 1.2804 + 1.2805 + unsigned stackPhiCount; 1.2806 + if (SN_TYPE(sn) == SRC_FOR_OF) 1.2807 + stackPhiCount = 2; 1.2808 + else if (SN_TYPE(sn) == SRC_FOR_IN) 1.2809 + stackPhiCount = 1; 1.2810 + else 1.2811 + stackPhiCount = 0; 1.2812 + 1.2813 + MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount); 1.2814 + if (!header) 1.2815 + return ControlStatus_Error; 1.2816 + current->end(MGoto::New(alloc(), header)); 1.2817 + 1.2818 + // Skip past the JSOP_LOOPHEAD for the body start. 1.2819 + jsbytecode *loopHead = GetNextPc(pc); 1.2820 + jsbytecode *bodyStart = GetNextPc(loopHead); 1.2821 + jsbytecode *bodyEnd = pc + GetJumpOffset(pc); 1.2822 + jsbytecode *exitpc = GetNextPc(ifne); 1.2823 + if (!analyzeNewLoopTypes(header, bodyStart, exitpc)) 1.2824 + return ControlStatus_Error; 1.2825 + if (!pushLoop(CFGState::WHILE_LOOP_COND, ifne, header, osr, 1.2826 + loopHead, bodyEnd, bodyStart, bodyEnd, exitpc)) 1.2827 + { 1.2828 + return ControlStatus_Error; 1.2829 + } 1.2830 + 1.2831 + // Parse the condition first. 1.2832 + if (!setCurrentAndSpecializePhis(header)) 1.2833 + return ControlStatus_Error; 1.2834 + if (!jsop_loophead(loopHead)) 1.2835 + return ControlStatus_Error; 1.2836 + 1.2837 + pc = bodyEnd; 1.2838 + return ControlStatus_Jumped; 1.2839 +} 1.2840 + 1.2841 +IonBuilder::ControlStatus 1.2842 +IonBuilder::forLoop(JSOp op, jssrcnote *sn) 1.2843 +{ 1.2844 + // Skip the NOP or POP. 1.2845 + JS_ASSERT(op == JSOP_POP || op == JSOP_NOP); 1.2846 + pc = GetNextPc(pc); 1.2847 + 1.2848 + jsbytecode *condpc = pc + js_GetSrcNoteOffset(sn, 0); 1.2849 + jsbytecode *updatepc = pc + js_GetSrcNoteOffset(sn, 1); 1.2850 + jsbytecode *ifne = pc + js_GetSrcNoteOffset(sn, 2); 1.2851 + jsbytecode *exitpc = GetNextPc(ifne); 1.2852 + 1.2853 + // for loops have the following structures: 1.2854 + // 1.2855 + // NOP or POP 1.2856 + // [GOTO cond | NOP] 1.2857 + // LOOPHEAD 1.2858 + // body: 1.2859 + // ; [body] 1.2860 + // [increment:] 1.2861 + // ; [increment] 1.2862 + // [cond:] 1.2863 + // LOOPENTRY 1.2864 + // GOTO body 1.2865 + // 1.2866 + // If there is a condition (condpc != ifne), this acts similar to a while 1.2867 + // loop otherwise, it acts like a do-while loop. 1.2868 + jsbytecode *bodyStart = pc; 1.2869 + jsbytecode *bodyEnd = updatepc; 1.2870 + jsbytecode *loopEntry = condpc; 1.2871 + if (condpc != ifne) { 1.2872 + JS_ASSERT(JSOp(*bodyStart) == JSOP_GOTO); 1.2873 + JS_ASSERT(bodyStart + GetJumpOffset(bodyStart) == condpc); 1.2874 + bodyStart = GetNextPc(bodyStart); 1.2875 + } else { 1.2876 + // No loop condition, such as for(j = 0; ; j++) 1.2877 + if (op != JSOP_NOP) { 1.2878 + // If the loop starts with POP, we have to skip a NOP. 1.2879 + JS_ASSERT(JSOp(*bodyStart) == JSOP_NOP); 1.2880 + bodyStart = GetNextPc(bodyStart); 1.2881 + } 1.2882 + loopEntry = GetNextPc(bodyStart); 1.2883 + } 1.2884 + jsbytecode *loopHead = bodyStart; 1.2885 + JS_ASSERT(JSOp(*bodyStart) == JSOP_LOOPHEAD); 1.2886 + JS_ASSERT(ifne + GetJumpOffset(ifne) == bodyStart); 1.2887 + bodyStart = GetNextPc(bodyStart); 1.2888 + 1.2889 + bool osr = info().hasOsrAt(loopEntry); 1.2890 + bool canOsr = LoopEntryCanIonOsr(loopEntry); 1.2891 + 1.2892 + if (osr) { 1.2893 + MBasicBlock *preheader = newOsrPreheader(current, loopEntry); 1.2894 + if (!preheader) 1.2895 + return ControlStatus_Error; 1.2896 + current->end(MGoto::New(alloc(), preheader)); 1.2897 + if (!setCurrentAndSpecializePhis(preheader)) 1.2898 + return ControlStatus_Error; 1.2899 + } 1.2900 + 1.2901 + unsigned stackPhiCount = 0; 1.2902 + MBasicBlock *header = newPendingLoopHeader(current, pc, osr, canOsr, stackPhiCount); 1.2903 + if (!header) 1.2904 + return ControlStatus_Error; 1.2905 + current->end(MGoto::New(alloc(), header)); 1.2906 + 1.2907 + // If there is no condition, we immediately parse the body. Otherwise, we 1.2908 + // parse the condition. 1.2909 + jsbytecode *stopAt; 1.2910 + CFGState::State initial; 1.2911 + if (condpc != ifne) { 1.2912 + pc = condpc; 1.2913 + stopAt = ifne; 1.2914 + initial = CFGState::FOR_LOOP_COND; 1.2915 + } else { 1.2916 + pc = bodyStart; 1.2917 + stopAt = bodyEnd; 1.2918 + initial = CFGState::FOR_LOOP_BODY; 1.2919 + } 1.2920 + 1.2921 + if (!analyzeNewLoopTypes(header, bodyStart, exitpc)) 1.2922 + return ControlStatus_Error; 1.2923 + if (!pushLoop(initial, stopAt, header, osr, 1.2924 + loopHead, pc, bodyStart, bodyEnd, exitpc, updatepc)) 1.2925 + { 1.2926 + return ControlStatus_Error; 1.2927 + } 1.2928 + 1.2929 + CFGState &state = cfgStack_.back(); 1.2930 + state.loop.condpc = (condpc != ifne) ? condpc : nullptr; 1.2931 + state.loop.updatepc = (updatepc != condpc) ? updatepc : nullptr; 1.2932 + if (state.loop.updatepc) 1.2933 + state.loop.updateEnd = condpc; 1.2934 + 1.2935 + if (!setCurrentAndSpecializePhis(header)) 1.2936 + return ControlStatus_Error; 1.2937 + if (!jsop_loophead(loopHead)) 1.2938 + return ControlStatus_Error; 1.2939 + 1.2940 + return ControlStatus_Jumped; 1.2941 +} 1.2942 + 1.2943 +int 1.2944 +IonBuilder::CmpSuccessors(const void *a, const void *b) 1.2945 +{ 1.2946 + const MBasicBlock *a0 = * (MBasicBlock * const *)a; 1.2947 + const MBasicBlock *b0 = * (MBasicBlock * const *)b; 1.2948 + if (a0->pc() == b0->pc()) 1.2949 + return 0; 1.2950 + 1.2951 + return (a0->pc() > b0->pc()) ? 1 : -1; 1.2952 +} 1.2953 + 1.2954 +IonBuilder::ControlStatus 1.2955 +IonBuilder::tableSwitch(JSOp op, jssrcnote *sn) 1.2956 +{ 1.2957 + // TableSwitch op contains the following data 1.2958 + // (length between data is JUMP_OFFSET_LEN) 1.2959 + // 1.2960 + // 0: Offset of default case 1.2961 + // 1: Lowest number in tableswitch 1.2962 + // 2: Highest number in tableswitch 1.2963 + // 3: Offset of case low 1.2964 + // 4: Offset of case low+1 1.2965 + // .: ... 1.2966 + // .: Offset of case high 1.2967 + 1.2968 + JS_ASSERT(op == JSOP_TABLESWITCH); 1.2969 + JS_ASSERT(SN_TYPE(sn) == SRC_TABLESWITCH); 1.2970 + 1.2971 + // Pop input. 1.2972 + MDefinition *ins = current->pop(); 1.2973 + 1.2974 + // Get the default and exit pc 1.2975 + jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0); 1.2976 + jsbytecode *defaultpc = pc + GET_JUMP_OFFSET(pc); 1.2977 + 1.2978 + JS_ASSERT(defaultpc > pc && defaultpc <= exitpc); 1.2979 + 1.2980 + // Get the low and high from the tableswitch 1.2981 + jsbytecode *pc2 = pc; 1.2982 + pc2 += JUMP_OFFSET_LEN; 1.2983 + int low = GET_JUMP_OFFSET(pc2); 1.2984 + pc2 += JUMP_OFFSET_LEN; 1.2985 + int high = GET_JUMP_OFFSET(pc2); 1.2986 + pc2 += JUMP_OFFSET_LEN; 1.2987 + 1.2988 + // Create MIR instruction 1.2989 + MTableSwitch *tableswitch = MTableSwitch::New(alloc(), ins, low, high); 1.2990 + 1.2991 + // Create default case 1.2992 + MBasicBlock *defaultcase = newBlock(current, defaultpc); 1.2993 + if (!defaultcase) 1.2994 + return ControlStatus_Error; 1.2995 + tableswitch->addDefault(defaultcase); 1.2996 + tableswitch->addBlock(defaultcase); 1.2997 + 1.2998 + // Create cases 1.2999 + jsbytecode *casepc = nullptr; 1.3000 + for (int i = 0; i < high-low+1; i++) { 1.3001 + casepc = pc + GET_JUMP_OFFSET(pc2); 1.3002 + 1.3003 + JS_ASSERT(casepc >= pc && casepc <= exitpc); 1.3004 + 1.3005 + MBasicBlock *caseblock = newBlock(current, casepc); 1.3006 + if (!caseblock) 1.3007 + return ControlStatus_Error; 1.3008 + 1.3009 + // If the casepc equals the current pc, it is not a written case, 1.3010 + // but a filled gap. That way we can use a tableswitch instead of 1.3011 + // condswitch, even if not all numbers are consecutive. 1.3012 + // In that case this block goes to the default case 1.3013 + if (casepc == pc) { 1.3014 + caseblock->end(MGoto::New(alloc(), defaultcase)); 1.3015 + if (!defaultcase->addPredecessor(alloc(), caseblock)) 1.3016 + return ControlStatus_Error; 1.3017 + } 1.3018 + 1.3019 + tableswitch->addCase(tableswitch->addSuccessor(caseblock)); 1.3020 + 1.3021 + // If this is an actual case (not filled gap), 1.3022 + // add this block to the list that still needs to get processed 1.3023 + if (casepc != pc) 1.3024 + tableswitch->addBlock(caseblock); 1.3025 + 1.3026 + pc2 += JUMP_OFFSET_LEN; 1.3027 + } 1.3028 + 1.3029 + // Move defaultcase to the end, to maintain RPO. 1.3030 + graph().moveBlockToEnd(defaultcase); 1.3031 + 1.3032 + JS_ASSERT(tableswitch->numCases() == (uint32_t)(high - low + 1)); 1.3033 + JS_ASSERT(tableswitch->numSuccessors() > 0); 1.3034 + 1.3035 + // Sort the list of blocks that still needs to get processed by pc 1.3036 + qsort(tableswitch->blocks(), tableswitch->numBlocks(), 1.3037 + sizeof(MBasicBlock*), CmpSuccessors); 1.3038 + 1.3039 + // Create info 1.3040 + ControlFlowInfo switchinfo(cfgStack_.length(), exitpc); 1.3041 + if (!switches_.append(switchinfo)) 1.3042 + return ControlStatus_Error; 1.3043 + 1.3044 + // Use a state to retrieve some information 1.3045 + CFGState state = CFGState::TableSwitch(exitpc, tableswitch); 1.3046 + 1.3047 + // Save the MIR instruction as last instruction of this block. 1.3048 + current->end(tableswitch); 1.3049 + 1.3050 + // If there is only one successor the block should stop at the end of the switch 1.3051 + // Else it should stop at the start of the next successor 1.3052 + if (tableswitch->numBlocks() > 1) 1.3053 + state.stopAt = tableswitch->getBlock(1)->pc(); 1.3054 + if (!setCurrentAndSpecializePhis(tableswitch->getBlock(0))) 1.3055 + return ControlStatus_Error; 1.3056 + 1.3057 + if (!cfgStack_.append(state)) 1.3058 + return ControlStatus_Error; 1.3059 + 1.3060 + pc = current->pc(); 1.3061 + return ControlStatus_Jumped; 1.3062 +} 1.3063 + 1.3064 +bool 1.3065 +IonBuilder::filterTypesAtTest(MTest *test) 1.3066 +{ 1.3067 + JS_ASSERT(test->ifTrue() == current || test->ifFalse() == current); 1.3068 + 1.3069 + bool trueBranch = test->ifTrue() == current; 1.3070 + 1.3071 + MDefinition *subject = nullptr; 1.3072 + bool removeUndefined; 1.3073 + bool removeNull; 1.3074 + 1.3075 + test->filtersUndefinedOrNull(trueBranch, &subject, &removeUndefined, &removeNull); 1.3076 + 1.3077 + // The test filters no undefined or null. 1.3078 + if (!subject) 1.3079 + return true; 1.3080 + 1.3081 + // There is no TypeSet that can get filtered. 1.3082 + if (!subject->resultTypeSet() || subject->resultTypeSet()->unknown()) 1.3083 + return true; 1.3084 + 1.3085 + // Only do this optimization if the typeset does contains null or undefined. 1.3086 + if ((!(removeUndefined && subject->resultTypeSet()->hasType(types::Type::UndefinedType())) && 1.3087 + !(removeNull && subject->resultTypeSet()->hasType(types::Type::NullType())))) 1.3088 + { 1.3089 + return true; 1.3090 + } 1.3091 + 1.3092 + // Find all values on the stack that correspond to the subject 1.3093 + // and replace it with a MIR with filtered TypeSet information. 1.3094 + // Create the replacement MIR lazily upon first occurence. 1.3095 + MDefinition *replace = nullptr; 1.3096 + for (uint32_t i = 0; i < current->stackDepth(); i++) { 1.3097 + if (current->getSlot(i) != subject) 1.3098 + continue; 1.3099 + 1.3100 + // Create replacement MIR with filtered TypesSet. 1.3101 + if (!replace) { 1.3102 + types::TemporaryTypeSet *type = 1.3103 + subject->resultTypeSet()->filter(alloc_->lifoAlloc(), removeUndefined, 1.3104 + removeNull); 1.3105 + if (!type) 1.3106 + return false; 1.3107 + 1.3108 + replace = ensureDefiniteTypeSet(subject, type); 1.3109 + // Make sure we don't hoist it above the MTest, we can use the 1.3110 + // 'dependency' of an MInstruction. This is normally used by 1.3111 + // Alias Analysis, but won't get overwritten, since this 1.3112 + // instruction doesn't have an AliasSet. 1.3113 + replace->setDependency(test); 1.3114 + } 1.3115 + 1.3116 + current->setSlot(i, replace); 1.3117 + } 1.3118 + 1.3119 + return true; 1.3120 +} 1.3121 + 1.3122 +bool 1.3123 +IonBuilder::jsop_label() 1.3124 +{ 1.3125 + JS_ASSERT(JSOp(*pc) == JSOP_LABEL); 1.3126 + 1.3127 + jsbytecode *endpc = pc + GET_JUMP_OFFSET(pc); 1.3128 + JS_ASSERT(endpc > pc); 1.3129 + 1.3130 + ControlFlowInfo label(cfgStack_.length(), endpc); 1.3131 + if (!labels_.append(label)) 1.3132 + return false; 1.3133 + 1.3134 + return cfgStack_.append(CFGState::Label(endpc)); 1.3135 +} 1.3136 + 1.3137 +bool 1.3138 +IonBuilder::jsop_condswitch() 1.3139 +{ 1.3140 + // CondSwitch op looks as follows: 1.3141 + // condswitch [length +exit_pc; first case offset +next-case ] 1.3142 + // { 1.3143 + // { 1.3144 + // ... any code ... 1.3145 + // case (+jump) [pcdelta offset +next-case] 1.3146 + // }+ 1.3147 + // default (+jump) 1.3148 + // ... jump targets ... 1.3149 + // } 1.3150 + // 1.3151 + // The default case is always emitted even if there is no default case in 1.3152 + // the source. The last case statement pcdelta source note might have a 0 1.3153 + // offset on the last case (not all the time). 1.3154 + // 1.3155 + // A conditional evaluate the condition of each case and compare it to the 1.3156 + // switch value with a strict equality. Cases conditions are iterated 1.3157 + // linearly until one is matching. If one case succeeds, the flow jumps into 1.3158 + // the corresponding body block. The body block might alias others and 1.3159 + // might continue in the next body block if the body is not terminated with 1.3160 + // a break. 1.3161 + // 1.3162 + // Algorithm: 1.3163 + // 1/ Loop over the case chain to reach the default target 1.3164 + // & Estimate the number of uniq bodies. 1.3165 + // 2/ Generate code for all cases (see processCondSwitchCase). 1.3166 + // 3/ Generate code for all bodies (see processCondSwitchBody). 1.3167 + 1.3168 + JS_ASSERT(JSOp(*pc) == JSOP_CONDSWITCH); 1.3169 + jssrcnote *sn = info().getNote(gsn, pc); 1.3170 + JS_ASSERT(SN_TYPE(sn) == SRC_CONDSWITCH); 1.3171 + 1.3172 + // Get the exit pc 1.3173 + jsbytecode *exitpc = pc + js_GetSrcNoteOffset(sn, 0); 1.3174 + jsbytecode *firstCase = pc + js_GetSrcNoteOffset(sn, 1); 1.3175 + 1.3176 + // Iterate all cases in the conditional switch. 1.3177 + // - Stop at the default case. (always emitted after the last case) 1.3178 + // - Estimate the number of uniq bodies. This estimation might be off by 1 1.3179 + // if the default body alias a case body. 1.3180 + jsbytecode *curCase = firstCase; 1.3181 + jsbytecode *lastTarget = GetJumpOffset(curCase) + curCase; 1.3182 + size_t nbBodies = 2; // default target and the first body. 1.3183 + 1.3184 + JS_ASSERT(pc < curCase && curCase <= exitpc); 1.3185 + while (JSOp(*curCase) == JSOP_CASE) { 1.3186 + // Fetch the next case. 1.3187 + jssrcnote *caseSn = info().getNote(gsn, curCase); 1.3188 + JS_ASSERT(caseSn && SN_TYPE(caseSn) == SRC_NEXTCASE); 1.3189 + ptrdiff_t off = js_GetSrcNoteOffset(caseSn, 0); 1.3190 + curCase = off ? curCase + off : GetNextPc(curCase); 1.3191 + JS_ASSERT(pc < curCase && curCase <= exitpc); 1.3192 + 1.3193 + // Count non-aliased cases. 1.3194 + jsbytecode *curTarget = GetJumpOffset(curCase) + curCase; 1.3195 + if (lastTarget < curTarget) 1.3196 + nbBodies++; 1.3197 + lastTarget = curTarget; 1.3198 + } 1.3199 + 1.3200 + // The current case now be the default case which jump to the body of the 1.3201 + // default case, which might be behind the last target. 1.3202 + JS_ASSERT(JSOp(*curCase) == JSOP_DEFAULT); 1.3203 + jsbytecode *defaultTarget = GetJumpOffset(curCase) + curCase; 1.3204 + JS_ASSERT(curCase < defaultTarget && defaultTarget <= exitpc); 1.3205 + 1.3206 + // Allocate the current graph state. 1.3207 + CFGState state = CFGState::CondSwitch(this, exitpc, defaultTarget); 1.3208 + if (!state.condswitch.bodies || !state.condswitch.bodies->init(alloc(), nbBodies)) 1.3209 + return ControlStatus_Error; 1.3210 + 1.3211 + // We loop on case conditions with processCondSwitchCase. 1.3212 + JS_ASSERT(JSOp(*firstCase) == JSOP_CASE); 1.3213 + state.stopAt = firstCase; 1.3214 + state.state = CFGState::COND_SWITCH_CASE; 1.3215 + 1.3216 + return cfgStack_.append(state); 1.3217 +} 1.3218 + 1.3219 +IonBuilder::CFGState 1.3220 +IonBuilder::CFGState::CondSwitch(IonBuilder *builder, jsbytecode *exitpc, jsbytecode *defaultTarget) 1.3221 +{ 1.3222 + CFGState state; 1.3223 + state.state = COND_SWITCH_CASE; 1.3224 + state.stopAt = nullptr; 1.3225 + state.condswitch.bodies = (FixedList<MBasicBlock *> *)builder->alloc_->allocate( 1.3226 + sizeof(FixedList<MBasicBlock *>)); 1.3227 + state.condswitch.currentIdx = 0; 1.3228 + state.condswitch.defaultTarget = defaultTarget; 1.3229 + state.condswitch.defaultIdx = uint32_t(-1); 1.3230 + state.condswitch.exitpc = exitpc; 1.3231 + state.condswitch.breaks = nullptr; 1.3232 + return state; 1.3233 +} 1.3234 + 1.3235 +IonBuilder::CFGState 1.3236 +IonBuilder::CFGState::Label(jsbytecode *exitpc) 1.3237 +{ 1.3238 + CFGState state; 1.3239 + state.state = LABEL; 1.3240 + state.stopAt = exitpc; 1.3241 + state.label.breaks = nullptr; 1.3242 + return state; 1.3243 +} 1.3244 + 1.3245 +IonBuilder::CFGState 1.3246 +IonBuilder::CFGState::Try(jsbytecode *exitpc, MBasicBlock *successor) 1.3247 +{ 1.3248 + CFGState state; 1.3249 + state.state = TRY; 1.3250 + state.stopAt = exitpc; 1.3251 + state.try_.successor = successor; 1.3252 + return state; 1.3253 +} 1.3254 + 1.3255 +IonBuilder::ControlStatus 1.3256 +IonBuilder::processCondSwitchCase(CFGState &state) 1.3257 +{ 1.3258 + JS_ASSERT(state.state == CFGState::COND_SWITCH_CASE); 1.3259 + JS_ASSERT(!state.condswitch.breaks); 1.3260 + JS_ASSERT(current); 1.3261 + JS_ASSERT(JSOp(*pc) == JSOP_CASE); 1.3262 + FixedList<MBasicBlock *> &bodies = *state.condswitch.bodies; 1.3263 + jsbytecode *defaultTarget = state.condswitch.defaultTarget; 1.3264 + uint32_t ¤tIdx = state.condswitch.currentIdx; 1.3265 + jsbytecode *lastTarget = currentIdx ? bodies[currentIdx - 1]->pc() : nullptr; 1.3266 + 1.3267 + // Fetch the following case in which we will continue. 1.3268 + jssrcnote *sn = info().getNote(gsn, pc); 1.3269 + ptrdiff_t off = js_GetSrcNoteOffset(sn, 0); 1.3270 + jsbytecode *casePc = off ? pc + off : GetNextPc(pc); 1.3271 + bool caseIsDefault = JSOp(*casePc) == JSOP_DEFAULT; 1.3272 + JS_ASSERT(JSOp(*casePc) == JSOP_CASE || caseIsDefault); 1.3273 + 1.3274 + // Allocate the block of the matching case. 1.3275 + bool bodyIsNew = false; 1.3276 + MBasicBlock *bodyBlock = nullptr; 1.3277 + jsbytecode *bodyTarget = pc + GetJumpOffset(pc); 1.3278 + if (lastTarget < bodyTarget) { 1.3279 + // If the default body is in the middle or aliasing the current target. 1.3280 + if (lastTarget < defaultTarget && defaultTarget <= bodyTarget) { 1.3281 + JS_ASSERT(state.condswitch.defaultIdx == uint32_t(-1)); 1.3282 + state.condswitch.defaultIdx = currentIdx; 1.3283 + bodies[currentIdx] = nullptr; 1.3284 + // If the default body does not alias any and it would be allocated 1.3285 + // later and stored in the defaultIdx location. 1.3286 + if (defaultTarget < bodyTarget) 1.3287 + currentIdx++; 1.3288 + } 1.3289 + 1.3290 + bodyIsNew = true; 1.3291 + // Pop switch and case operands. 1.3292 + bodyBlock = newBlockPopN(current, bodyTarget, 2); 1.3293 + bodies[currentIdx++] = bodyBlock; 1.3294 + } else { 1.3295 + // This body alias the previous one. 1.3296 + JS_ASSERT(lastTarget == bodyTarget); 1.3297 + JS_ASSERT(currentIdx > 0); 1.3298 + bodyBlock = bodies[currentIdx - 1]; 1.3299 + } 1.3300 + 1.3301 + if (!bodyBlock) 1.3302 + return ControlStatus_Error; 1.3303 + 1.3304 + lastTarget = bodyTarget; 1.3305 + 1.3306 + // Allocate the block of the non-matching case. This can either be a normal 1.3307 + // case or the default case. 1.3308 + bool caseIsNew = false; 1.3309 + MBasicBlock *caseBlock = nullptr; 1.3310 + if (!caseIsDefault) { 1.3311 + caseIsNew = true; 1.3312 + // Pop the case operand. 1.3313 + caseBlock = newBlockPopN(current, GetNextPc(pc), 1); 1.3314 + } else { 1.3315 + // The non-matching case is the default case, which jump directly to its 1.3316 + // body. Skip the creation of a default case block and directly create 1.3317 + // the default body if it does not alias any previous body. 1.3318 + 1.3319 + if (state.condswitch.defaultIdx == uint32_t(-1)) { 1.3320 + // The default target is the last target. 1.3321 + JS_ASSERT(lastTarget < defaultTarget); 1.3322 + state.condswitch.defaultIdx = currentIdx++; 1.3323 + caseIsNew = true; 1.3324 + } else if (bodies[state.condswitch.defaultIdx] == nullptr) { 1.3325 + // The default target is in the middle and it does not alias any 1.3326 + // case target. 1.3327 + JS_ASSERT(defaultTarget < lastTarget); 1.3328 + caseIsNew = true; 1.3329 + } else { 1.3330 + // The default target is in the middle and it alias a case target. 1.3331 + JS_ASSERT(defaultTarget <= lastTarget); 1.3332 + caseBlock = bodies[state.condswitch.defaultIdx]; 1.3333 + } 1.3334 + 1.3335 + // Allocate and register the default body. 1.3336 + if (caseIsNew) { 1.3337 + // Pop the case & switch operands. 1.3338 + caseBlock = newBlockPopN(current, defaultTarget, 2); 1.3339 + bodies[state.condswitch.defaultIdx] = caseBlock; 1.3340 + } 1.3341 + } 1.3342 + 1.3343 + if (!caseBlock) 1.3344 + return ControlStatus_Error; 1.3345 + 1.3346 + // Terminate the last case condition block by emitting the code 1.3347 + // corresponding to JSOP_CASE bytecode. 1.3348 + if (bodyBlock != caseBlock) { 1.3349 + MDefinition *caseOperand = current->pop(); 1.3350 + MDefinition *switchOperand = current->peek(-1); 1.3351 + MCompare *cmpResult = MCompare::New(alloc(), switchOperand, caseOperand, JSOP_STRICTEQ); 1.3352 + cmpResult->infer(inspector, pc); 1.3353 + JS_ASSERT(!cmpResult->isEffectful()); 1.3354 + current->add(cmpResult); 1.3355 + current->end(MTest::New(alloc(), cmpResult, bodyBlock, caseBlock)); 1.3356 + 1.3357 + // Add last case as predecessor of the body if the body is aliasing 1.3358 + // the previous case body. 1.3359 + if (!bodyIsNew && !bodyBlock->addPredecessorPopN(alloc(), current, 1)) 1.3360 + return ControlStatus_Error; 1.3361 + 1.3362 + // Add last case as predecessor of the non-matching case if the 1.3363 + // non-matching case is an aliased default case. We need to pop the 1.3364 + // switch operand as we skip the default case block and use the default 1.3365 + // body block directly. 1.3366 + JS_ASSERT_IF(!caseIsNew, caseIsDefault); 1.3367 + if (!caseIsNew && !caseBlock->addPredecessorPopN(alloc(), current, 1)) 1.3368 + return ControlStatus_Error; 1.3369 + } else { 1.3370 + // The default case alias the last case body. 1.3371 + JS_ASSERT(caseIsDefault); 1.3372 + current->pop(); // Case operand 1.3373 + current->pop(); // Switch operand 1.3374 + current->end(MGoto::New(alloc(), bodyBlock)); 1.3375 + if (!bodyIsNew && !bodyBlock->addPredecessor(alloc(), current)) 1.3376 + return ControlStatus_Error; 1.3377 + } 1.3378 + 1.3379 + if (caseIsDefault) { 1.3380 + // The last case condition is finished. Loop in processCondSwitchBody, 1.3381 + // with potential stops in processSwitchBreak. Check that the bodies 1.3382 + // fixed list is over-estimate by at most 1, and shrink the size such as 1.3383 + // length can be used as an upper bound while iterating bodies. 1.3384 + JS_ASSERT(currentIdx == bodies.length() || currentIdx + 1 == bodies.length()); 1.3385 + bodies.shrink(bodies.length() - currentIdx); 1.3386 + 1.3387 + // Handle break statements in processSwitchBreak while processing 1.3388 + // bodies. 1.3389 + ControlFlowInfo breakInfo(cfgStack_.length() - 1, state.condswitch.exitpc); 1.3390 + if (!switches_.append(breakInfo)) 1.3391 + return ControlStatus_Error; 1.3392 + 1.3393 + // Jump into the first body. 1.3394 + currentIdx = 0; 1.3395 + setCurrent(nullptr); 1.3396 + state.state = CFGState::COND_SWITCH_BODY; 1.3397 + return processCondSwitchBody(state); 1.3398 + } 1.3399 + 1.3400 + // Continue until the case condition. 1.3401 + if (!setCurrentAndSpecializePhis(caseBlock)) 1.3402 + return ControlStatus_Error; 1.3403 + pc = current->pc(); 1.3404 + state.stopAt = casePc; 1.3405 + return ControlStatus_Jumped; 1.3406 +} 1.3407 + 1.3408 +IonBuilder::ControlStatus 1.3409 +IonBuilder::processCondSwitchBody(CFGState &state) 1.3410 +{ 1.3411 + JS_ASSERT(state.state == CFGState::COND_SWITCH_BODY); 1.3412 + JS_ASSERT(pc <= state.condswitch.exitpc); 1.3413 + FixedList<MBasicBlock *> &bodies = *state.condswitch.bodies; 1.3414 + uint32_t ¤tIdx = state.condswitch.currentIdx; 1.3415 + 1.3416 + JS_ASSERT(currentIdx <= bodies.length()); 1.3417 + if (currentIdx == bodies.length()) { 1.3418 + JS_ASSERT_IF(current, pc == state.condswitch.exitpc); 1.3419 + return processSwitchEnd(state.condswitch.breaks, state.condswitch.exitpc); 1.3420 + } 1.3421 + 1.3422 + // Get the next body 1.3423 + MBasicBlock *nextBody = bodies[currentIdx++]; 1.3424 + JS_ASSERT_IF(current, pc == nextBody->pc()); 1.3425 + 1.3426 + // Fix the reverse post-order iteration. 1.3427 + graph().moveBlockToEnd(nextBody); 1.3428 + 1.3429 + // The last body continue into the new one. 1.3430 + if (current) { 1.3431 + current->end(MGoto::New(alloc(), nextBody)); 1.3432 + if (!nextBody->addPredecessor(alloc(), current)) 1.3433 + return ControlStatus_Error; 1.3434 + } 1.3435 + 1.3436 + // Continue in the next body. 1.3437 + if (!setCurrentAndSpecializePhis(nextBody)) 1.3438 + return ControlStatus_Error; 1.3439 + pc = current->pc(); 1.3440 + 1.3441 + if (currentIdx < bodies.length()) 1.3442 + state.stopAt = bodies[currentIdx]->pc(); 1.3443 + else 1.3444 + state.stopAt = state.condswitch.exitpc; 1.3445 + return ControlStatus_Jumped; 1.3446 +} 1.3447 + 1.3448 +bool 1.3449 +IonBuilder::jsop_andor(JSOp op) 1.3450 +{ 1.3451 + JS_ASSERT(op == JSOP_AND || op == JSOP_OR); 1.3452 + 1.3453 + jsbytecode *rhsStart = pc + js_CodeSpec[op].length; 1.3454 + jsbytecode *joinStart = pc + GetJumpOffset(pc); 1.3455 + JS_ASSERT(joinStart > pc); 1.3456 + 1.3457 + // We have to leave the LHS on the stack. 1.3458 + MDefinition *lhs = current->peek(-1); 1.3459 + 1.3460 + MBasicBlock *evalRhs = newBlock(current, rhsStart); 1.3461 + MBasicBlock *join = newBlock(current, joinStart); 1.3462 + if (!evalRhs || !join) 1.3463 + return false; 1.3464 + 1.3465 + MTest *test = (op == JSOP_AND) 1.3466 + ? MTest::New(alloc(), lhs, evalRhs, join) 1.3467 + : MTest::New(alloc(), lhs, join, evalRhs); 1.3468 + test->infer(); 1.3469 + current->end(test); 1.3470 + 1.3471 + if (!cfgStack_.append(CFGState::AndOr(joinStart, join))) 1.3472 + return false; 1.3473 + 1.3474 + return setCurrentAndSpecializePhis(evalRhs); 1.3475 +} 1.3476 + 1.3477 +bool 1.3478 +IonBuilder::jsop_dup2() 1.3479 +{ 1.3480 + uint32_t lhsSlot = current->stackDepth() - 2; 1.3481 + uint32_t rhsSlot = current->stackDepth() - 1; 1.3482 + current->pushSlot(lhsSlot); 1.3483 + current->pushSlot(rhsSlot); 1.3484 + return true; 1.3485 +} 1.3486 + 1.3487 +bool 1.3488 +IonBuilder::jsop_loophead(jsbytecode *pc) 1.3489 +{ 1.3490 + assertValidLoopHeadOp(pc); 1.3491 + 1.3492 + current->add(MInterruptCheck::New(alloc())); 1.3493 + insertRecompileCheck(); 1.3494 + 1.3495 + return true; 1.3496 +} 1.3497 + 1.3498 +bool 1.3499 +IonBuilder::jsop_ifeq(JSOp op) 1.3500 +{ 1.3501 + // IFEQ always has a forward offset. 1.3502 + jsbytecode *trueStart = pc + js_CodeSpec[op].length; 1.3503 + jsbytecode *falseStart = pc + GetJumpOffset(pc); 1.3504 + JS_ASSERT(falseStart > pc); 1.3505 + 1.3506 + // We only handle cases that emit source notes. 1.3507 + jssrcnote *sn = info().getNote(gsn, pc); 1.3508 + if (!sn) 1.3509 + return abort("expected sourcenote"); 1.3510 + 1.3511 + MDefinition *ins = current->pop(); 1.3512 + 1.3513 + // Create true and false branches. 1.3514 + MBasicBlock *ifTrue = newBlock(current, trueStart); 1.3515 + MBasicBlock *ifFalse = newBlock(current, falseStart); 1.3516 + if (!ifTrue || !ifFalse) 1.3517 + return false; 1.3518 + 1.3519 + MTest *test = MTest::New(alloc(), ins, ifTrue, ifFalse); 1.3520 + current->end(test); 1.3521 + 1.3522 + // The bytecode for if/ternary gets emitted either like this: 1.3523 + // 1.3524 + // IFEQ X ; src note (IF_ELSE, COND) points to the GOTO 1.3525 + // ... 1.3526 + // GOTO Z 1.3527 + // X: ... ; else/else if 1.3528 + // ... 1.3529 + // Z: ; join 1.3530 + // 1.3531 + // Or like this: 1.3532 + // 1.3533 + // IFEQ X ; src note (IF) has no offset 1.3534 + // ... 1.3535 + // Z: ... ; join 1.3536 + // 1.3537 + // We want to parse the bytecode as if we were parsing the AST, so for the 1.3538 + // IF_ELSE/COND cases, we use the source note and follow the GOTO. For the 1.3539 + // IF case, the IFEQ offset is the join point. 1.3540 + switch (SN_TYPE(sn)) { 1.3541 + case SRC_IF: 1.3542 + if (!cfgStack_.append(CFGState::If(falseStart, test))) 1.3543 + return false; 1.3544 + break; 1.3545 + 1.3546 + case SRC_IF_ELSE: 1.3547 + case SRC_COND: 1.3548 + { 1.3549 + // Infer the join point from the JSOP_GOTO[X] sitting here, then 1.3550 + // assert as we much we can that this is the right GOTO. 1.3551 + jsbytecode *trueEnd = pc + js_GetSrcNoteOffset(sn, 0); 1.3552 + JS_ASSERT(trueEnd > pc); 1.3553 + JS_ASSERT(trueEnd < falseStart); 1.3554 + JS_ASSERT(JSOp(*trueEnd) == JSOP_GOTO); 1.3555 + JS_ASSERT(!info().getNote(gsn, trueEnd)); 1.3556 + 1.3557 + jsbytecode *falseEnd = trueEnd + GetJumpOffset(trueEnd); 1.3558 + JS_ASSERT(falseEnd > trueEnd); 1.3559 + JS_ASSERT(falseEnd >= falseStart); 1.3560 + 1.3561 + if (!cfgStack_.append(CFGState::IfElse(trueEnd, falseEnd, test))) 1.3562 + return false; 1.3563 + break; 1.3564 + } 1.3565 + 1.3566 + default: 1.3567 + MOZ_ASSUME_UNREACHABLE("unexpected source note type"); 1.3568 + } 1.3569 + 1.3570 + // Switch to parsing the true branch. Note that no PC update is needed, 1.3571 + // it's the next instruction. 1.3572 + if (!setCurrentAndSpecializePhis(ifTrue)) 1.3573 + return false; 1.3574 + 1.3575 + // Filter the types in the true branch. 1.3576 + filterTypesAtTest(test); 1.3577 + 1.3578 + return true; 1.3579 +} 1.3580 + 1.3581 +bool 1.3582 +IonBuilder::jsop_try() 1.3583 +{ 1.3584 + JS_ASSERT(JSOp(*pc) == JSOP_TRY); 1.3585 + 1.3586 + if (!js_JitOptions.compileTryCatch) 1.3587 + return abort("Try-catch support disabled"); 1.3588 + 1.3589 + // Try-finally is not yet supported. 1.3590 + if (analysis().hasTryFinally()) 1.3591 + return abort("Has try-finally"); 1.3592 + 1.3593 + // Try-catch within inline frames is not yet supported. 1.3594 + JS_ASSERT(!isInlineBuilder()); 1.3595 + 1.3596 + // Try-catch during the arguments usage analysis is not yet supported. Code 1.3597 + // accessing the arguments within the 'catch' block is not accounted for. 1.3598 + if (info().executionMode() == ArgumentsUsageAnalysis) 1.3599 + return abort("Try-catch during arguments usage analysis"); 1.3600 + 1.3601 + graph().setHasTryBlock(); 1.3602 + 1.3603 + jssrcnote *sn = info().getNote(gsn, pc); 1.3604 + JS_ASSERT(SN_TYPE(sn) == SRC_TRY); 1.3605 + 1.3606 + // Get the pc of the last instruction in the try block. It's a JSOP_GOTO to 1.3607 + // jump over the catch block. 1.3608 + jsbytecode *endpc = pc + js_GetSrcNoteOffset(sn, 0); 1.3609 + JS_ASSERT(JSOp(*endpc) == JSOP_GOTO); 1.3610 + JS_ASSERT(GetJumpOffset(endpc) > 0); 1.3611 + 1.3612 + jsbytecode *afterTry = endpc + GetJumpOffset(endpc); 1.3613 + 1.3614 + // If controlflow in the try body is terminated (by a return or throw 1.3615 + // statement), the code after the try-statement may still be reachable 1.3616 + // via the catch block (which we don't compile) and OSR can enter it. 1.3617 + // For example: 1.3618 + // 1.3619 + // try { 1.3620 + // throw 3; 1.3621 + // } catch(e) { } 1.3622 + // 1.3623 + // for (var i=0; i<1000; i++) {} 1.3624 + // 1.3625 + // To handle this, we create two blocks: one for the try block and one 1.3626 + // for the code following the try-catch statement. Both blocks are 1.3627 + // connected to the graph with an MTest instruction that always jumps to 1.3628 + // the try block. This ensures the successor block always has a predecessor 1.3629 + // and later passes will optimize this MTest to a no-op. 1.3630 + // 1.3631 + // If the code after the try block is unreachable (control flow in both the 1.3632 + // try and catch blocks is terminated), only create the try block, to avoid 1.3633 + // parsing unreachable code. 1.3634 + 1.3635 + MBasicBlock *tryBlock = newBlock(current, GetNextPc(pc)); 1.3636 + if (!tryBlock) 1.3637 + return false; 1.3638 + 1.3639 + MBasicBlock *successor; 1.3640 + if (analysis().maybeInfo(afterTry)) { 1.3641 + successor = newBlock(current, afterTry); 1.3642 + if (!successor) 1.3643 + return false; 1.3644 + 1.3645 + // Add MTest(true, tryBlock, successorBlock). 1.3646 + MConstant *true_ = MConstant::New(alloc(), BooleanValue(true)); 1.3647 + current->add(true_); 1.3648 + current->end(MTest::New(alloc(), true_, tryBlock, successor)); 1.3649 + } else { 1.3650 + successor = nullptr; 1.3651 + current->end(MGoto::New(alloc(), tryBlock)); 1.3652 + } 1.3653 + 1.3654 + if (!cfgStack_.append(CFGState::Try(endpc, successor))) 1.3655 + return false; 1.3656 + 1.3657 + // The baseline compiler should not attempt to enter the catch block 1.3658 + // via OSR. 1.3659 + JS_ASSERT(info().osrPc() < endpc || info().osrPc() >= afterTry); 1.3660 + 1.3661 + // Start parsing the try block. 1.3662 + return setCurrentAndSpecializePhis(tryBlock); 1.3663 +} 1.3664 + 1.3665 +IonBuilder::ControlStatus 1.3666 +IonBuilder::processReturn(JSOp op) 1.3667 +{ 1.3668 + MDefinition *def; 1.3669 + switch (op) { 1.3670 + case JSOP_RETURN: 1.3671 + // Return the last instruction. 1.3672 + def = current->pop(); 1.3673 + break; 1.3674 + 1.3675 + case JSOP_RETRVAL: 1.3676 + // Return undefined eagerly if script doesn't use return value. 1.3677 + if (script()->noScriptRval()) { 1.3678 + MInstruction *ins = MConstant::New(alloc(), UndefinedValue()); 1.3679 + current->add(ins); 1.3680 + def = ins; 1.3681 + break; 1.3682 + } 1.3683 + 1.3684 + def = current->getSlot(info().returnValueSlot()); 1.3685 + break; 1.3686 + 1.3687 + default: 1.3688 + def = nullptr; 1.3689 + MOZ_ASSUME_UNREACHABLE("unknown return op"); 1.3690 + } 1.3691 + 1.3692 + if (instrumentedProfiling()) { 1.3693 + current->add(MProfilerStackOp::New(alloc(), script(), MProfilerStackOp::Exit, 1.3694 + inliningDepth_)); 1.3695 + } 1.3696 + MReturn *ret = MReturn::New(alloc(), def); 1.3697 + current->end(ret); 1.3698 + 1.3699 + if (!graph().addReturn(current)) 1.3700 + return ControlStatus_Error; 1.3701 + 1.3702 + // Make sure no one tries to use this block now. 1.3703 + setCurrent(nullptr); 1.3704 + return processControlEnd(); 1.3705 +} 1.3706 + 1.3707 +IonBuilder::ControlStatus 1.3708 +IonBuilder::processThrow() 1.3709 +{ 1.3710 + MDefinition *def = current->pop(); 1.3711 + 1.3712 + // MThrow is not marked as effectful. This means when it throws and we 1.3713 + // are inside a try block, we could use an earlier resume point and this 1.3714 + // resume point may not be up-to-date, for example: 1.3715 + // 1.3716 + // (function() { 1.3717 + // try { 1.3718 + // var x = 1; 1.3719 + // foo(); // resume point 1.3720 + // x = 2; 1.3721 + // throw foo; 1.3722 + // } catch(e) { 1.3723 + // print(x); 1.3724 + // } 1.3725 + // ])(); 1.3726 + // 1.3727 + // If we use the resume point after the call, this will print 1 instead 1.3728 + // of 2. To fix this, we create a resume point right before the MThrow. 1.3729 + // 1.3730 + // Note that this is not a problem for instructions other than MThrow 1.3731 + // because they are either marked as effectful (have their own resume 1.3732 + // point) or cannot throw a catchable exception. 1.3733 + // 1.3734 + // We always install this resume point (instead of only when the function 1.3735 + // has a try block) in order to handle the Debugger onExceptionUnwind 1.3736 + // hook. When we need to handle the hook, we bail out to baseline right 1.3737 + // after the throw and propagate the exception when debug mode is on. This 1.3738 + // is opposed to the normal behavior of resuming directly in the 1.3739 + // associated catch block. 1.3740 + MNop *nop = MNop::New(alloc()); 1.3741 + current->add(nop); 1.3742 + 1.3743 + if (!resumeAfter(nop)) 1.3744 + return ControlStatus_Error; 1.3745 + 1.3746 + MThrow *ins = MThrow::New(alloc(), def); 1.3747 + current->end(ins); 1.3748 + 1.3749 + // Make sure no one tries to use this block now. 1.3750 + setCurrent(nullptr); 1.3751 + return processControlEnd(); 1.3752 +} 1.3753 + 1.3754 +bool 1.3755 +IonBuilder::pushConstant(const Value &v) 1.3756 +{ 1.3757 + current->push(constant(v)); 1.3758 + return true; 1.3759 +} 1.3760 + 1.3761 +bool 1.3762 +IonBuilder::jsop_bitnot() 1.3763 +{ 1.3764 + MDefinition *input = current->pop(); 1.3765 + MBitNot *ins = MBitNot::New(alloc(), input); 1.3766 + 1.3767 + current->add(ins); 1.3768 + ins->infer(); 1.3769 + 1.3770 + current->push(ins); 1.3771 + if (ins->isEffectful() && !resumeAfter(ins)) 1.3772 + return false; 1.3773 + return true; 1.3774 +} 1.3775 +bool 1.3776 +IonBuilder::jsop_bitop(JSOp op) 1.3777 +{ 1.3778 + // Pop inputs. 1.3779 + MDefinition *right = current->pop(); 1.3780 + MDefinition *left = current->pop(); 1.3781 + 1.3782 + MBinaryBitwiseInstruction *ins; 1.3783 + switch (op) { 1.3784 + case JSOP_BITAND: 1.3785 + ins = MBitAnd::New(alloc(), left, right); 1.3786 + break; 1.3787 + 1.3788 + case JSOP_BITOR: 1.3789 + ins = MBitOr::New(alloc(), left, right); 1.3790 + break; 1.3791 + 1.3792 + case JSOP_BITXOR: 1.3793 + ins = MBitXor::New(alloc(), left, right); 1.3794 + break; 1.3795 + 1.3796 + case JSOP_LSH: 1.3797 + ins = MLsh::New(alloc(), left, right); 1.3798 + break; 1.3799 + 1.3800 + case JSOP_RSH: 1.3801 + ins = MRsh::New(alloc(), left, right); 1.3802 + break; 1.3803 + 1.3804 + case JSOP_URSH: 1.3805 + ins = MUrsh::New(alloc(), left, right); 1.3806 + break; 1.3807 + 1.3808 + default: 1.3809 + MOZ_ASSUME_UNREACHABLE("unexpected bitop"); 1.3810 + } 1.3811 + 1.3812 + current->add(ins); 1.3813 + ins->infer(inspector, pc); 1.3814 + 1.3815 + current->push(ins); 1.3816 + if (ins->isEffectful() && !resumeAfter(ins)) 1.3817 + return false; 1.3818 + 1.3819 + return true; 1.3820 +} 1.3821 + 1.3822 +bool 1.3823 +IonBuilder::jsop_binary(JSOp op, MDefinition *left, MDefinition *right) 1.3824 +{ 1.3825 + // Do a string concatenation if adding two inputs that are int or string 1.3826 + // and at least one is a string. 1.3827 + if (op == JSOP_ADD && 1.3828 + ((left->type() == MIRType_String && 1.3829 + (right->type() == MIRType_String || 1.3830 + right->type() == MIRType_Int32 || 1.3831 + right->type() == MIRType_Double)) || 1.3832 + (left->type() == MIRType_Int32 && 1.3833 + right->type() == MIRType_String) || 1.3834 + (left->type() == MIRType_Double && 1.3835 + right->type() == MIRType_String))) 1.3836 + { 1.3837 + MConcat *ins = MConcat::New(alloc(), left, right); 1.3838 + current->add(ins); 1.3839 + current->push(ins); 1.3840 + return maybeInsertResume(); 1.3841 + } 1.3842 + 1.3843 + MBinaryArithInstruction *ins; 1.3844 + switch (op) { 1.3845 + case JSOP_ADD: 1.3846 + ins = MAdd::New(alloc(), left, right); 1.3847 + break; 1.3848 + 1.3849 + case JSOP_SUB: 1.3850 + ins = MSub::New(alloc(), left, right); 1.3851 + break; 1.3852 + 1.3853 + case JSOP_MUL: 1.3854 + ins = MMul::New(alloc(), left, right); 1.3855 + break; 1.3856 + 1.3857 + case JSOP_DIV: 1.3858 + ins = MDiv::New(alloc(), left, right); 1.3859 + break; 1.3860 + 1.3861 + case JSOP_MOD: 1.3862 + ins = MMod::New(alloc(), left, right); 1.3863 + break; 1.3864 + 1.3865 + default: 1.3866 + MOZ_ASSUME_UNREACHABLE("unexpected binary opcode"); 1.3867 + } 1.3868 + 1.3869 + current->add(ins); 1.3870 + ins->infer(alloc(), inspector, pc); 1.3871 + current->push(ins); 1.3872 + 1.3873 + if (ins->isEffectful()) 1.3874 + return resumeAfter(ins); 1.3875 + return maybeInsertResume(); 1.3876 +} 1.3877 + 1.3878 +bool 1.3879 +IonBuilder::jsop_binary(JSOp op) 1.3880 +{ 1.3881 + MDefinition *right = current->pop(); 1.3882 + MDefinition *left = current->pop(); 1.3883 + 1.3884 + return jsop_binary(op, left, right); 1.3885 +} 1.3886 + 1.3887 +bool 1.3888 +IonBuilder::jsop_pos() 1.3889 +{ 1.3890 + if (IsNumberType(current->peek(-1)->type())) { 1.3891 + // Already int32 or double. Set the operand as implicitly used so it 1.3892 + // doesn't get optimized out if it has no other uses, as we could bail 1.3893 + // out. 1.3894 + current->peek(-1)->setImplicitlyUsedUnchecked(); 1.3895 + return true; 1.3896 + } 1.3897 + 1.3898 + // Compile +x as x * 1. 1.3899 + MDefinition *value = current->pop(); 1.3900 + MConstant *one = MConstant::New(alloc(), Int32Value(1)); 1.3901 + current->add(one); 1.3902 + 1.3903 + return jsop_binary(JSOP_MUL, value, one); 1.3904 +} 1.3905 + 1.3906 +bool 1.3907 +IonBuilder::jsop_neg() 1.3908 +{ 1.3909 + // Since JSOP_NEG does not use a slot, we cannot push the MConstant. 1.3910 + // The MConstant is therefore passed to JSOP_MUL without slot traffic. 1.3911 + MConstant *negator = MConstant::New(alloc(), Int32Value(-1)); 1.3912 + current->add(negator); 1.3913 + 1.3914 + MDefinition *right = current->pop(); 1.3915 + 1.3916 + if (!jsop_binary(JSOP_MUL, negator, right)) 1.3917 + return false; 1.3918 + return true; 1.3919 +} 1.3920 + 1.3921 +class AutoAccumulateReturns 1.3922 +{ 1.3923 + MIRGraph &graph_; 1.3924 + MIRGraphReturns *prev_; 1.3925 + 1.3926 + public: 1.3927 + AutoAccumulateReturns(MIRGraph &graph, MIRGraphReturns &returns) 1.3928 + : graph_(graph) 1.3929 + { 1.3930 + prev_ = graph_.returnAccumulator(); 1.3931 + graph_.setReturnAccumulator(&returns); 1.3932 + } 1.3933 + ~AutoAccumulateReturns() { 1.3934 + graph_.setReturnAccumulator(prev_); 1.3935 + } 1.3936 +}; 1.3937 + 1.3938 +bool 1.3939 +IonBuilder::inlineScriptedCall(CallInfo &callInfo, JSFunction *target) 1.3940 +{ 1.3941 + JS_ASSERT(target->hasScript()); 1.3942 + JS_ASSERT(IsIonInlinablePC(pc)); 1.3943 + 1.3944 + callInfo.setImplicitlyUsedUnchecked(); 1.3945 + 1.3946 + // Ensure sufficient space in the slots: needed for inlining from FUNAPPLY. 1.3947 + uint32_t depth = current->stackDepth() + callInfo.numFormals(); 1.3948 + if (depth > current->nslots()) { 1.3949 + if (!current->increaseSlots(depth - current->nslots())) 1.3950 + return false; 1.3951 + } 1.3952 + 1.3953 + // Create new |this| on the caller-side for inlined constructors. 1.3954 + if (callInfo.constructing()) { 1.3955 + MDefinition *thisDefn = createThis(target, callInfo.fun()); 1.3956 + if (!thisDefn) 1.3957 + return false; 1.3958 + callInfo.setThis(thisDefn); 1.3959 + } 1.3960 + 1.3961 + // Capture formals in the outer resume point. 1.3962 + callInfo.pushFormals(current); 1.3963 + 1.3964 + MResumePoint *outerResumePoint = 1.3965 + MResumePoint::New(alloc(), current, pc, callerResumePoint_, MResumePoint::Outer); 1.3966 + if (!outerResumePoint) 1.3967 + return false; 1.3968 + 1.3969 + // Pop formals again, except leave |fun| on stack for duration of call. 1.3970 + callInfo.popFormals(current); 1.3971 + current->push(callInfo.fun()); 1.3972 + 1.3973 + JSScript *calleeScript = target->nonLazyScript(); 1.3974 + BaselineInspector inspector(calleeScript); 1.3975 + 1.3976 + // Improve type information of |this| when not set. 1.3977 + if (callInfo.constructing() && 1.3978 + !callInfo.thisArg()->resultTypeSet() && 1.3979 + calleeScript->types) 1.3980 + { 1.3981 + types::StackTypeSet *types = types::TypeScript::ThisTypes(calleeScript); 1.3982 + if (!types->unknown()) { 1.3983 + types::TemporaryTypeSet *clonedTypes = types->clone(alloc_->lifoAlloc()); 1.3984 + if (!clonedTypes) 1.3985 + return oom(); 1.3986 + MTypeBarrier *barrier = MTypeBarrier::New(alloc(), callInfo.thisArg(), clonedTypes); 1.3987 + current->add(barrier); 1.3988 + callInfo.setThis(barrier); 1.3989 + } 1.3990 + } 1.3991 + 1.3992 + // Start inlining. 1.3993 + LifoAlloc *lifoAlloc = alloc_->lifoAlloc(); 1.3994 + CompileInfo *info = lifoAlloc->new_<CompileInfo>(calleeScript, target, 1.3995 + (jsbytecode *)nullptr, callInfo.constructing(), 1.3996 + this->info().executionMode(), 1.3997 + /* needsArgsObj = */ false); 1.3998 + if (!info) 1.3999 + return false; 1.4000 + 1.4001 + MIRGraphReturns returns(alloc()); 1.4002 + AutoAccumulateReturns aar(graph(), returns); 1.4003 + 1.4004 + // Build the graph. 1.4005 + IonBuilder inlineBuilder(analysisContext, compartment, options, &alloc(), &graph(), constraints(), 1.4006 + &inspector, info, &optimizationInfo(), nullptr, inliningDepth_ + 1, 1.4007 + loopDepth_); 1.4008 + if (!inlineBuilder.buildInline(this, outerResumePoint, callInfo)) { 1.4009 + if (analysisContext && analysisContext->isExceptionPending()) { 1.4010 + IonSpew(IonSpew_Abort, "Inline builder raised exception."); 1.4011 + abortReason_ = AbortReason_Error; 1.4012 + return false; 1.4013 + } 1.4014 + 1.4015 + // Inlining the callee failed. Mark the callee as uninlineable only if 1.4016 + // the inlining was aborted for a non-exception reason. 1.4017 + if (inlineBuilder.abortReason_ == AbortReason_Disable) { 1.4018 + calleeScript->setUninlineable(); 1.4019 + abortReason_ = AbortReason_Inlining; 1.4020 + } else if (inlineBuilder.abortReason_ == AbortReason_Inlining) { 1.4021 + abortReason_ = AbortReason_Inlining; 1.4022 + } 1.4023 + 1.4024 + return false; 1.4025 + } 1.4026 + 1.4027 + // Create return block. 1.4028 + jsbytecode *postCall = GetNextPc(pc); 1.4029 + MBasicBlock *returnBlock = newBlock(nullptr, postCall); 1.4030 + if (!returnBlock) 1.4031 + return false; 1.4032 + returnBlock->setCallerResumePoint(callerResumePoint_); 1.4033 + 1.4034 + // When profiling add InlineExit instruction to indicate end of inlined function. 1.4035 + if (instrumentedProfiling()) 1.4036 + returnBlock->add(MProfilerStackOp::New(alloc(), nullptr, MProfilerStackOp::InlineExit)); 1.4037 + 1.4038 + // Inherit the slots from current and pop |fun|. 1.4039 + returnBlock->inheritSlots(current); 1.4040 + returnBlock->pop(); 1.4041 + 1.4042 + // Accumulate return values. 1.4043 + if (returns.empty()) { 1.4044 + // Inlining of functions that have no exit is not supported. 1.4045 + calleeScript->setUninlineable(); 1.4046 + abortReason_ = AbortReason_Inlining; 1.4047 + return false; 1.4048 + } 1.4049 + MDefinition *retvalDefn = patchInlinedReturns(callInfo, returns, returnBlock); 1.4050 + if (!retvalDefn) 1.4051 + return false; 1.4052 + returnBlock->push(retvalDefn); 1.4053 + 1.4054 + // Initialize entry slots now that the stack has been fixed up. 1.4055 + if (!returnBlock->initEntrySlots(alloc())) 1.4056 + return false; 1.4057 + 1.4058 + return setCurrentAndSpecializePhis(returnBlock); 1.4059 +} 1.4060 + 1.4061 +MDefinition * 1.4062 +IonBuilder::patchInlinedReturn(CallInfo &callInfo, MBasicBlock *exit, MBasicBlock *bottom) 1.4063 +{ 1.4064 + // Replaces the MReturn in the exit block with an MGoto. 1.4065 + MDefinition *rdef = exit->lastIns()->toReturn()->input(); 1.4066 + exit->discardLastIns(); 1.4067 + 1.4068 + // Constructors must be patched by the caller to always return an object. 1.4069 + if (callInfo.constructing()) { 1.4070 + if (rdef->type() == MIRType_Value) { 1.4071 + // Unknown return: dynamically detect objects. 1.4072 + MReturnFromCtor *filter = MReturnFromCtor::New(alloc(), rdef, callInfo.thisArg()); 1.4073 + exit->add(filter); 1.4074 + rdef = filter; 1.4075 + } else if (rdef->type() != MIRType_Object) { 1.4076 + // Known non-object return: force |this|. 1.4077 + rdef = callInfo.thisArg(); 1.4078 + } 1.4079 + } else if (callInfo.isSetter()) { 1.4080 + // Setters return their argument, not whatever value is returned. 1.4081 + rdef = callInfo.getArg(0); 1.4082 + } 1.4083 + 1.4084 + MGoto *replacement = MGoto::New(alloc(), bottom); 1.4085 + exit->end(replacement); 1.4086 + if (!bottom->addPredecessorWithoutPhis(exit)) 1.4087 + return nullptr; 1.4088 + 1.4089 + return rdef; 1.4090 +} 1.4091 + 1.4092 +MDefinition * 1.4093 +IonBuilder::patchInlinedReturns(CallInfo &callInfo, MIRGraphReturns &returns, MBasicBlock *bottom) 1.4094 +{ 1.4095 + // Replaces MReturns with MGotos, returning the MDefinition 1.4096 + // representing the return value, or nullptr. 1.4097 + JS_ASSERT(returns.length() > 0); 1.4098 + 1.4099 + if (returns.length() == 1) 1.4100 + return patchInlinedReturn(callInfo, returns[0], bottom); 1.4101 + 1.4102 + // Accumulate multiple returns with a phi. 1.4103 + MPhi *phi = MPhi::New(alloc(), bottom->stackDepth()); 1.4104 + if (!phi->reserveLength(returns.length())) 1.4105 + return nullptr; 1.4106 + 1.4107 + for (size_t i = 0; i < returns.length(); i++) { 1.4108 + MDefinition *rdef = patchInlinedReturn(callInfo, returns[i], bottom); 1.4109 + if (!rdef) 1.4110 + return nullptr; 1.4111 + phi->addInput(rdef); 1.4112 + } 1.4113 + 1.4114 + bottom->addPhi(phi); 1.4115 + return phi; 1.4116 +} 1.4117 + 1.4118 +IonBuilder::InliningDecision 1.4119 +IonBuilder::makeInliningDecision(JSFunction *target, CallInfo &callInfo) 1.4120 +{ 1.4121 + // When there is no target, inlining is impossible. 1.4122 + if (target == nullptr) 1.4123 + return InliningDecision_DontInline; 1.4124 + 1.4125 + // Never inline during the arguments usage analysis. 1.4126 + if (info().executionMode() == ArgumentsUsageAnalysis) 1.4127 + return InliningDecision_DontInline; 1.4128 + 1.4129 + // Native functions provide their own detection in inlineNativeCall(). 1.4130 + if (target->isNative()) 1.4131 + return InliningDecision_Inline; 1.4132 + 1.4133 + // Determine whether inlining is possible at callee site 1.4134 + InliningDecision decision = canInlineTarget(target, callInfo); 1.4135 + if (decision != InliningDecision_Inline) 1.4136 + return decision; 1.4137 + 1.4138 + // Heuristics! 1.4139 + JSScript *targetScript = target->nonLazyScript(); 1.4140 + 1.4141 + // Skip heuristics if we have an explicit hint to inline. 1.4142 + if (!targetScript->shouldInline()) { 1.4143 + // Cap the inlining depth. 1.4144 + if (js_JitOptions.isSmallFunction(targetScript)) { 1.4145 + if (inliningDepth_ >= optimizationInfo().smallFunctionMaxInlineDepth()) 1.4146 + return DontInline(targetScript, "Vetoed: exceeding allowed inline depth"); 1.4147 + } else { 1.4148 + if (inliningDepth_ >= optimizationInfo().maxInlineDepth()) 1.4149 + return DontInline(targetScript, "Vetoed: exceeding allowed inline depth"); 1.4150 + 1.4151 + if (targetScript->hasLoops()) 1.4152 + return DontInline(targetScript, "Vetoed: big function that contains a loop"); 1.4153 + 1.4154 + // Caller must not be excessively large. 1.4155 + if (script()->length() >= optimizationInfo().inliningMaxCallerBytecodeLength()) 1.4156 + return DontInline(targetScript, "Vetoed: caller excessively large"); 1.4157 + } 1.4158 + 1.4159 + // Callee must not be excessively large. 1.4160 + // This heuristic also applies to the callsite as a whole. 1.4161 + if (targetScript->length() > optimizationInfo().inlineMaxTotalBytecodeLength()) 1.4162 + return DontInline(targetScript, "Vetoed: callee excessively large"); 1.4163 + 1.4164 + // Callee must have been called a few times to have somewhat stable 1.4165 + // type information, except for definite properties analysis, 1.4166 + // as the caller has not run yet. 1.4167 + if (targetScript->getUseCount() < optimizationInfo().usesBeforeInlining() && 1.4168 + info().executionMode() != DefinitePropertiesAnalysis) 1.4169 + { 1.4170 + return DontInline(targetScript, "Vetoed: callee is insufficiently hot."); 1.4171 + } 1.4172 + } 1.4173 + 1.4174 + // TI calls ObjectStateChange to trigger invalidation of the caller. 1.4175 + types::TypeObjectKey *targetType = types::TypeObjectKey::get(target); 1.4176 + targetType->watchStateChangeForInlinedCall(constraints()); 1.4177 + 1.4178 + // We mustn't relazify functions that have been inlined, because there's 1.4179 + // no way to tell if it safe to do so. 1.4180 + script()->setHasBeenInlined(); 1.4181 + 1.4182 + return InliningDecision_Inline; 1.4183 +} 1.4184 + 1.4185 +bool 1.4186 +IonBuilder::selectInliningTargets(ObjectVector &targets, CallInfo &callInfo, BoolVector &choiceSet, 1.4187 + uint32_t *numInlineable) 1.4188 +{ 1.4189 + *numInlineable = 0; 1.4190 + uint32_t totalSize = 0; 1.4191 + 1.4192 + // For each target, ask whether it may be inlined. 1.4193 + if (!choiceSet.reserve(targets.length())) 1.4194 + return false; 1.4195 + 1.4196 + for (size_t i = 0; i < targets.length(); i++) { 1.4197 + JSFunction *target = &targets[i]->as<JSFunction>(); 1.4198 + bool inlineable; 1.4199 + InliningDecision decision = makeInliningDecision(target, callInfo); 1.4200 + switch (decision) { 1.4201 + case InliningDecision_Error: 1.4202 + return false; 1.4203 + case InliningDecision_DontInline: 1.4204 + inlineable = false; 1.4205 + break; 1.4206 + case InliningDecision_Inline: 1.4207 + inlineable = true; 1.4208 + break; 1.4209 + default: 1.4210 + MOZ_ASSUME_UNREACHABLE("Unhandled InliningDecision value!"); 1.4211 + } 1.4212 + 1.4213 + // Enforce a maximum inlined bytecode limit at the callsite. 1.4214 + if (inlineable && target->isInterpreted()) { 1.4215 + totalSize += target->nonLazyScript()->length(); 1.4216 + if (totalSize > optimizationInfo().inlineMaxTotalBytecodeLength()) 1.4217 + inlineable = false; 1.4218 + } 1.4219 + 1.4220 + choiceSet.append(inlineable); 1.4221 + if (inlineable) 1.4222 + *numInlineable += 1; 1.4223 + } 1.4224 + 1.4225 + JS_ASSERT(choiceSet.length() == targets.length()); 1.4226 + return true; 1.4227 +} 1.4228 + 1.4229 +static bool 1.4230 +CanInlineGetPropertyCache(MGetPropertyCache *cache, MDefinition *thisDef) 1.4231 +{ 1.4232 + JS_ASSERT(cache->object()->type() == MIRType_Object); 1.4233 + if (cache->object() != thisDef) 1.4234 + return false; 1.4235 + 1.4236 + InlinePropertyTable *table = cache->propTable(); 1.4237 + if (!table) 1.4238 + return false; 1.4239 + if (table->numEntries() == 0) 1.4240 + return false; 1.4241 + return true; 1.4242 +} 1.4243 + 1.4244 +MGetPropertyCache * 1.4245 +IonBuilder::getInlineableGetPropertyCache(CallInfo &callInfo) 1.4246 +{ 1.4247 + if (callInfo.constructing()) 1.4248 + return nullptr; 1.4249 + 1.4250 + MDefinition *thisDef = callInfo.thisArg(); 1.4251 + if (thisDef->type() != MIRType_Object) 1.4252 + return nullptr; 1.4253 + 1.4254 + MDefinition *funcDef = callInfo.fun(); 1.4255 + if (funcDef->type() != MIRType_Object) 1.4256 + return nullptr; 1.4257 + 1.4258 + // MGetPropertyCache with no uses may be optimized away. 1.4259 + if (funcDef->isGetPropertyCache()) { 1.4260 + MGetPropertyCache *cache = funcDef->toGetPropertyCache(); 1.4261 + if (cache->hasUses()) 1.4262 + return nullptr; 1.4263 + if (!CanInlineGetPropertyCache(cache, thisDef)) 1.4264 + return nullptr; 1.4265 + return cache; 1.4266 + } 1.4267 + 1.4268 + // Optimize away the following common pattern: 1.4269 + // MTypeBarrier[MIRType_Object] <- MGetPropertyCache 1.4270 + if (funcDef->isTypeBarrier()) { 1.4271 + MTypeBarrier *barrier = funcDef->toTypeBarrier(); 1.4272 + if (barrier->hasUses()) 1.4273 + return nullptr; 1.4274 + if (barrier->type() != MIRType_Object) 1.4275 + return nullptr; 1.4276 + if (!barrier->input()->isGetPropertyCache()) 1.4277 + return nullptr; 1.4278 + 1.4279 + MGetPropertyCache *cache = barrier->input()->toGetPropertyCache(); 1.4280 + if (cache->hasUses() && !cache->hasOneUse()) 1.4281 + return nullptr; 1.4282 + if (!CanInlineGetPropertyCache(cache, thisDef)) 1.4283 + return nullptr; 1.4284 + return cache; 1.4285 + } 1.4286 + 1.4287 + return nullptr; 1.4288 +} 1.4289 + 1.4290 +IonBuilder::InliningStatus 1.4291 +IonBuilder::inlineSingleCall(CallInfo &callInfo, JSFunction *target) 1.4292 +{ 1.4293 + // Expects formals to be popped and wrapped. 1.4294 + if (target->isNative()) 1.4295 + return inlineNativeCall(callInfo, target); 1.4296 + 1.4297 + if (!inlineScriptedCall(callInfo, target)) 1.4298 + return InliningStatus_Error; 1.4299 + return InliningStatus_Inlined; 1.4300 +} 1.4301 + 1.4302 +IonBuilder::InliningStatus 1.4303 +IonBuilder::inlineCallsite(ObjectVector &targets, ObjectVector &originals, 1.4304 + bool lambda, CallInfo &callInfo) 1.4305 +{ 1.4306 + if (targets.empty()) 1.4307 + return InliningStatus_NotInlined; 1.4308 + 1.4309 + // Is the function provided by an MGetPropertyCache? 1.4310 + // If so, the cache may be movable to a fallback path, with a dispatch 1.4311 + // instruction guarding on the incoming TypeObject. 1.4312 + MGetPropertyCache *propCache = getInlineableGetPropertyCache(callInfo); 1.4313 + 1.4314 + // Inline single targets -- unless they derive from a cache, in which case 1.4315 + // avoiding the cache and guarding is still faster. 1.4316 + if (!propCache && targets.length() == 1) { 1.4317 + JSFunction *target = &targets[0]->as<JSFunction>(); 1.4318 + InliningDecision decision = makeInliningDecision(target, callInfo); 1.4319 + switch (decision) { 1.4320 + case InliningDecision_Error: 1.4321 + return InliningStatus_Error; 1.4322 + case InliningDecision_DontInline: 1.4323 + return InliningStatus_NotInlined; 1.4324 + case InliningDecision_Inline: 1.4325 + break; 1.4326 + } 1.4327 + 1.4328 + // Inlining will elminate uses of the original callee, but it needs to 1.4329 + // be preserved in phis if we bail out. Mark the old callee definition as 1.4330 + // implicitly used to ensure this happens. 1.4331 + callInfo.fun()->setImplicitlyUsedUnchecked(); 1.4332 + 1.4333 + // If the callee is not going to be a lambda (which may vary across 1.4334 + // different invocations), then the callee definition can be replaced by a 1.4335 + // constant. 1.4336 + if (!lambda) { 1.4337 + // Replace the function with an MConstant. 1.4338 + MConstant *constFun = constant(ObjectValue(*target)); 1.4339 + callInfo.setFun(constFun); 1.4340 + } 1.4341 + 1.4342 + return inlineSingleCall(callInfo, target); 1.4343 + } 1.4344 + 1.4345 + // Choose a subset of the targets for polymorphic inlining. 1.4346 + BoolVector choiceSet(alloc()); 1.4347 + uint32_t numInlined; 1.4348 + if (!selectInliningTargets(targets, callInfo, choiceSet, &numInlined)) 1.4349 + return InliningStatus_Error; 1.4350 + if (numInlined == 0) 1.4351 + return InliningStatus_NotInlined; 1.4352 + 1.4353 + // Perform a polymorphic dispatch. 1.4354 + if (!inlineCalls(callInfo, targets, originals, choiceSet, propCache)) 1.4355 + return InliningStatus_Error; 1.4356 + 1.4357 + return InliningStatus_Inlined; 1.4358 +} 1.4359 + 1.4360 +bool 1.4361 +IonBuilder::inlineGenericFallback(JSFunction *target, CallInfo &callInfo, MBasicBlock *dispatchBlock, 1.4362 + bool clonedAtCallsite) 1.4363 +{ 1.4364 + // Generate a new block with all arguments on-stack. 1.4365 + MBasicBlock *fallbackBlock = newBlock(dispatchBlock, pc); 1.4366 + if (!fallbackBlock) 1.4367 + return false; 1.4368 + 1.4369 + // Create a new CallInfo to track modified state within this block. 1.4370 + CallInfo fallbackInfo(alloc(), callInfo.constructing()); 1.4371 + if (!fallbackInfo.init(callInfo)) 1.4372 + return false; 1.4373 + fallbackInfo.popFormals(fallbackBlock); 1.4374 + 1.4375 + // Generate an MCall, which uses stateful |current|. 1.4376 + if (!setCurrentAndSpecializePhis(fallbackBlock)) 1.4377 + return false; 1.4378 + if (!makeCall(target, fallbackInfo, clonedAtCallsite)) 1.4379 + return false; 1.4380 + 1.4381 + // Pass return block to caller as |current|. 1.4382 + return true; 1.4383 +} 1.4384 + 1.4385 +bool 1.4386 +IonBuilder::inlineTypeObjectFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock, 1.4387 + MTypeObjectDispatch *dispatch, MGetPropertyCache *cache, 1.4388 + MBasicBlock **fallbackTarget) 1.4389 +{ 1.4390 + // Getting here implies the following: 1.4391 + // 1. The call function is an MGetPropertyCache, or an MGetPropertyCache 1.4392 + // followed by an MTypeBarrier. 1.4393 + JS_ASSERT(callInfo.fun()->isGetPropertyCache() || callInfo.fun()->isTypeBarrier()); 1.4394 + 1.4395 + // 2. The MGetPropertyCache has inlineable cases by guarding on the TypeObject. 1.4396 + JS_ASSERT(dispatch->numCases() > 0); 1.4397 + 1.4398 + // 3. The MGetPropertyCache (and, if applicable, MTypeBarrier) only 1.4399 + // have at most a single use. 1.4400 + JS_ASSERT_IF(callInfo.fun()->isGetPropertyCache(), !cache->hasUses()); 1.4401 + JS_ASSERT_IF(callInfo.fun()->isTypeBarrier(), cache->hasOneUse()); 1.4402 + 1.4403 + // This means that no resume points yet capture the MGetPropertyCache, 1.4404 + // so everything from the MGetPropertyCache up until the call is movable. 1.4405 + // We now move the MGetPropertyCache and friends into a fallback path. 1.4406 + 1.4407 + // Create a new CallInfo to track modified state within the fallback path. 1.4408 + CallInfo fallbackInfo(alloc(), callInfo.constructing()); 1.4409 + if (!fallbackInfo.init(callInfo)) 1.4410 + return false; 1.4411 + 1.4412 + // Capture stack prior to the call operation. This captures the function. 1.4413 + MResumePoint *preCallResumePoint = 1.4414 + MResumePoint::New(alloc(), dispatchBlock, pc, callerResumePoint_, MResumePoint::ResumeAt); 1.4415 + if (!preCallResumePoint) 1.4416 + return false; 1.4417 + 1.4418 + DebugOnly<size_t> preCallFuncIndex = preCallResumePoint->numOperands() - callInfo.numFormals(); 1.4419 + JS_ASSERT(preCallResumePoint->getOperand(preCallFuncIndex) == fallbackInfo.fun()); 1.4420 + 1.4421 + // In the dispatch block, replace the function's slot entry with Undefined. 1.4422 + MConstant *undefined = MConstant::New(alloc(), UndefinedValue()); 1.4423 + dispatchBlock->add(undefined); 1.4424 + dispatchBlock->rewriteAtDepth(-int(callInfo.numFormals()), undefined); 1.4425 + 1.4426 + // Construct a block that does nothing but remove formals from the stack. 1.4427 + // This is effectively changing the entry resume point of the later fallback block. 1.4428 + MBasicBlock *prepBlock = newBlock(dispatchBlock, pc); 1.4429 + if (!prepBlock) 1.4430 + return false; 1.4431 + fallbackInfo.popFormals(prepBlock); 1.4432 + 1.4433 + // Construct a block into which the MGetPropertyCache can be moved. 1.4434 + // This is subtle: the pc and resume point are those of the MGetPropertyCache! 1.4435 + InlinePropertyTable *propTable = cache->propTable(); 1.4436 + JS_ASSERT(propTable->pc() != nullptr); 1.4437 + JS_ASSERT(propTable->priorResumePoint() != nullptr); 1.4438 + MBasicBlock *getPropBlock = newBlock(prepBlock, propTable->pc(), propTable->priorResumePoint()); 1.4439 + if (!getPropBlock) 1.4440 + return false; 1.4441 + 1.4442 + prepBlock->end(MGoto::New(alloc(), getPropBlock)); 1.4443 + 1.4444 + // Since the getPropBlock inherited the stack from right before the MGetPropertyCache, 1.4445 + // the target of the MGetPropertyCache is still on the stack. 1.4446 + DebugOnly<MDefinition *> checkObject = getPropBlock->pop(); 1.4447 + JS_ASSERT(checkObject == cache->object()); 1.4448 + 1.4449 + // Move the MGetPropertyCache and friends into the getPropBlock. 1.4450 + if (fallbackInfo.fun()->isGetPropertyCache()) { 1.4451 + JS_ASSERT(fallbackInfo.fun()->toGetPropertyCache() == cache); 1.4452 + getPropBlock->addFromElsewhere(cache); 1.4453 + getPropBlock->push(cache); 1.4454 + } else { 1.4455 + MTypeBarrier *barrier = callInfo.fun()->toTypeBarrier(); 1.4456 + JS_ASSERT(barrier->type() == MIRType_Object); 1.4457 + JS_ASSERT(barrier->input()->isGetPropertyCache()); 1.4458 + JS_ASSERT(barrier->input()->toGetPropertyCache() == cache); 1.4459 + 1.4460 + getPropBlock->addFromElsewhere(cache); 1.4461 + getPropBlock->addFromElsewhere(barrier); 1.4462 + getPropBlock->push(barrier); 1.4463 + } 1.4464 + 1.4465 + // Construct an end block with the correct resume point. 1.4466 + MBasicBlock *preCallBlock = newBlock(getPropBlock, pc, preCallResumePoint); 1.4467 + if (!preCallBlock) 1.4468 + return false; 1.4469 + getPropBlock->end(MGoto::New(alloc(), preCallBlock)); 1.4470 + 1.4471 + // Now inline the MCallGeneric, using preCallBlock as the dispatch point. 1.4472 + if (!inlineGenericFallback(nullptr, fallbackInfo, preCallBlock, false)) 1.4473 + return false; 1.4474 + 1.4475 + // inlineGenericFallback() set the return block as |current|. 1.4476 + preCallBlock->end(MGoto::New(alloc(), current)); 1.4477 + *fallbackTarget = prepBlock; 1.4478 + return true; 1.4479 +} 1.4480 + 1.4481 +bool 1.4482 +IonBuilder::inlineCalls(CallInfo &callInfo, ObjectVector &targets, 1.4483 + ObjectVector &originals, BoolVector &choiceSet, 1.4484 + MGetPropertyCache *maybeCache) 1.4485 +{ 1.4486 + // Only handle polymorphic inlining. 1.4487 + JS_ASSERT(IsIonInlinablePC(pc)); 1.4488 + JS_ASSERT(choiceSet.length() == targets.length()); 1.4489 + JS_ASSERT_IF(!maybeCache, targets.length() >= 2); 1.4490 + JS_ASSERT_IF(maybeCache, targets.length() >= 1); 1.4491 + 1.4492 + MBasicBlock *dispatchBlock = current; 1.4493 + callInfo.setImplicitlyUsedUnchecked(); 1.4494 + callInfo.pushFormals(dispatchBlock); 1.4495 + 1.4496 + // Patch any InlinePropertyTable to only contain functions that are inlineable. 1.4497 + // 1.4498 + // Note that we trim using originals, as callsite clones are not user 1.4499 + // visible. We don't patch the entries inside the table with the cloned 1.4500 + // targets, as the entries should only be used for comparison. 1.4501 + // 1.4502 + // The InlinePropertyTable will also be patched at the end to exclude native functions 1.4503 + // that vetoed inlining. 1.4504 + if (maybeCache) { 1.4505 + InlinePropertyTable *propTable = maybeCache->propTable(); 1.4506 + propTable->trimToTargets(originals); 1.4507 + if (propTable->numEntries() == 0) 1.4508 + maybeCache = nullptr; 1.4509 + } 1.4510 + 1.4511 + // Generate a dispatch based on guard kind. 1.4512 + MDispatchInstruction *dispatch; 1.4513 + if (maybeCache) { 1.4514 + dispatch = MTypeObjectDispatch::New(alloc(), maybeCache->object(), maybeCache->propTable()); 1.4515 + callInfo.fun()->setImplicitlyUsedUnchecked(); 1.4516 + } else { 1.4517 + dispatch = MFunctionDispatch::New(alloc(), callInfo.fun()); 1.4518 + } 1.4519 + 1.4520 + // Generate a return block to host the rval-collecting MPhi. 1.4521 + jsbytecode *postCall = GetNextPc(pc); 1.4522 + MBasicBlock *returnBlock = newBlock(nullptr, postCall); 1.4523 + if (!returnBlock) 1.4524 + return false; 1.4525 + returnBlock->setCallerResumePoint(callerResumePoint_); 1.4526 + 1.4527 + // Set up stack, used to manually create a post-call resume point. 1.4528 + returnBlock->inheritSlots(dispatchBlock); 1.4529 + callInfo.popFormals(returnBlock); 1.4530 + 1.4531 + MPhi *retPhi = MPhi::New(alloc(), returnBlock->stackDepth()); 1.4532 + returnBlock->addPhi(retPhi); 1.4533 + returnBlock->push(retPhi); 1.4534 + 1.4535 + // Create a resume point from current stack state. 1.4536 + returnBlock->initEntrySlots(alloc()); 1.4537 + 1.4538 + // Reserve the capacity for the phi. 1.4539 + // Note: this is an upperbound. Unreachable targets and uninlineable natives are also counted. 1.4540 + uint32_t count = 1; // Possible fallback block. 1.4541 + for (uint32_t i = 0; i < targets.length(); i++) { 1.4542 + if (choiceSet[i]) 1.4543 + count++; 1.4544 + } 1.4545 + retPhi->reserveLength(count); 1.4546 + 1.4547 + // During inlining the 'this' value is assigned a type set which is 1.4548 + // specialized to the type objects which can generate that inlining target. 1.4549 + // After inlining the original type set is restored. 1.4550 + types::TemporaryTypeSet *cacheObjectTypeSet = 1.4551 + maybeCache ? maybeCache->object()->resultTypeSet() : nullptr; 1.4552 + 1.4553 + // Inline each of the inlineable targets. 1.4554 + JS_ASSERT(targets.length() == originals.length()); 1.4555 + for (uint32_t i = 0; i < targets.length(); i++) { 1.4556 + // When original != target, the target is a callsite clone. The 1.4557 + // original should be used for guards, and the target should be the 1.4558 + // actual function inlined. 1.4559 + JSFunction *original = &originals[i]->as<JSFunction>(); 1.4560 + JSFunction *target = &targets[i]->as<JSFunction>(); 1.4561 + 1.4562 + // Target must be inlineable. 1.4563 + if (!choiceSet[i]) 1.4564 + continue; 1.4565 + 1.4566 + // Target must be reachable by the MDispatchInstruction. 1.4567 + if (maybeCache && !maybeCache->propTable()->hasFunction(original)) { 1.4568 + choiceSet[i] = false; 1.4569 + continue; 1.4570 + } 1.4571 + 1.4572 + MBasicBlock *inlineBlock = newBlock(dispatchBlock, pc); 1.4573 + if (!inlineBlock) 1.4574 + return false; 1.4575 + 1.4576 + // Create a function MConstant to use in the entry ResumePoint. 1.4577 + MConstant *funcDef = MConstant::New(alloc(), ObjectValue(*target), constraints()); 1.4578 + funcDef->setImplicitlyUsedUnchecked(); 1.4579 + dispatchBlock->add(funcDef); 1.4580 + 1.4581 + // Use the MConstant in the inline resume point and on stack. 1.4582 + int funIndex = inlineBlock->entryResumePoint()->numOperands() - callInfo.numFormals(); 1.4583 + inlineBlock->entryResumePoint()->replaceOperand(funIndex, funcDef); 1.4584 + inlineBlock->rewriteSlot(funIndex, funcDef); 1.4585 + 1.4586 + // Create a new CallInfo to track modified state within the inline block. 1.4587 + CallInfo inlineInfo(alloc(), callInfo.constructing()); 1.4588 + if (!inlineInfo.init(callInfo)) 1.4589 + return false; 1.4590 + inlineInfo.popFormals(inlineBlock); 1.4591 + inlineInfo.setFun(funcDef); 1.4592 + 1.4593 + if (maybeCache) { 1.4594 + JS_ASSERT(callInfo.thisArg() == maybeCache->object()); 1.4595 + types::TemporaryTypeSet *targetThisTypes = 1.4596 + maybeCache->propTable()->buildTypeSetForFunction(original); 1.4597 + if (!targetThisTypes) 1.4598 + return false; 1.4599 + maybeCache->object()->setResultTypeSet(targetThisTypes); 1.4600 + } 1.4601 + 1.4602 + // Inline the call into the inlineBlock. 1.4603 + if (!setCurrentAndSpecializePhis(inlineBlock)) 1.4604 + return false; 1.4605 + InliningStatus status = inlineSingleCall(inlineInfo, target); 1.4606 + if (status == InliningStatus_Error) 1.4607 + return false; 1.4608 + 1.4609 + // Natives may veto inlining. 1.4610 + if (status == InliningStatus_NotInlined) { 1.4611 + JS_ASSERT(target->isNative()); 1.4612 + JS_ASSERT(current == inlineBlock); 1.4613 + inlineBlock->discardAllResumePoints(); 1.4614 + graph().removeBlock(inlineBlock); 1.4615 + choiceSet[i] = false; 1.4616 + continue; 1.4617 + } 1.4618 + 1.4619 + // inlineSingleCall() changed |current| to the inline return block. 1.4620 + MBasicBlock *inlineReturnBlock = current; 1.4621 + setCurrent(dispatchBlock); 1.4622 + 1.4623 + // Connect the inline path to the returnBlock. 1.4624 + // 1.4625 + // Note that guarding is on the original function pointer even 1.4626 + // if there is a clone, since cloning occurs at the callsite. 1.4627 + dispatch->addCase(original, inlineBlock); 1.4628 + 1.4629 + MDefinition *retVal = inlineReturnBlock->peek(-1); 1.4630 + retPhi->addInput(retVal); 1.4631 + inlineReturnBlock->end(MGoto::New(alloc(), returnBlock)); 1.4632 + if (!returnBlock->addPredecessorWithoutPhis(inlineReturnBlock)) 1.4633 + return false; 1.4634 + } 1.4635 + 1.4636 + // Patch the InlinePropertyTable to not dispatch to vetoed paths. 1.4637 + // 1.4638 + // Note that like above, we trim using originals instead of targets. 1.4639 + if (maybeCache) { 1.4640 + maybeCache->object()->setResultTypeSet(cacheObjectTypeSet); 1.4641 + 1.4642 + InlinePropertyTable *propTable = maybeCache->propTable(); 1.4643 + propTable->trimTo(originals, choiceSet); 1.4644 + 1.4645 + // If all paths were vetoed, output only a generic fallback path. 1.4646 + if (propTable->numEntries() == 0) { 1.4647 + JS_ASSERT(dispatch->numCases() == 0); 1.4648 + maybeCache = nullptr; 1.4649 + } 1.4650 + } 1.4651 + 1.4652 + // If necessary, generate a fallback path. 1.4653 + // MTypeObjectDispatch always uses a fallback path. 1.4654 + if (maybeCache || dispatch->numCases() < targets.length()) { 1.4655 + // Generate fallback blocks, and set |current| to the fallback return block. 1.4656 + if (maybeCache) { 1.4657 + MBasicBlock *fallbackTarget; 1.4658 + if (!inlineTypeObjectFallback(callInfo, dispatchBlock, (MTypeObjectDispatch *)dispatch, 1.4659 + maybeCache, &fallbackTarget)) 1.4660 + { 1.4661 + return false; 1.4662 + } 1.4663 + dispatch->addFallback(fallbackTarget); 1.4664 + } else { 1.4665 + JSFunction *remaining = nullptr; 1.4666 + bool clonedAtCallsite = false; 1.4667 + 1.4668 + // If there is only 1 remaining case, we can annotate the fallback call 1.4669 + // with the target information. 1.4670 + if (dispatch->numCases() + 1 == originals.length()) { 1.4671 + for (uint32_t i = 0; i < originals.length(); i++) { 1.4672 + if (choiceSet[i]) 1.4673 + continue; 1.4674 + 1.4675 + remaining = &targets[i]->as<JSFunction>(); 1.4676 + clonedAtCallsite = targets[i] != originals[i]; 1.4677 + break; 1.4678 + } 1.4679 + } 1.4680 + 1.4681 + if (!inlineGenericFallback(remaining, callInfo, dispatchBlock, clonedAtCallsite)) 1.4682 + return false; 1.4683 + dispatch->addFallback(current); 1.4684 + } 1.4685 + 1.4686 + MBasicBlock *fallbackReturnBlock = current; 1.4687 + 1.4688 + // Connect fallback case to return infrastructure. 1.4689 + MDefinition *retVal = fallbackReturnBlock->peek(-1); 1.4690 + retPhi->addInput(retVal); 1.4691 + fallbackReturnBlock->end(MGoto::New(alloc(), returnBlock)); 1.4692 + if (!returnBlock->addPredecessorWithoutPhis(fallbackReturnBlock)) 1.4693 + return false; 1.4694 + } 1.4695 + 1.4696 + // Finally add the dispatch instruction. 1.4697 + // This must be done at the end so that add() may be called above. 1.4698 + dispatchBlock->end(dispatch); 1.4699 + 1.4700 + // Check the depth change: +1 for retval 1.4701 + JS_ASSERT(returnBlock->stackDepth() == dispatchBlock->stackDepth() - callInfo.numFormals() + 1); 1.4702 + 1.4703 + graph().moveBlockToEnd(returnBlock); 1.4704 + return setCurrentAndSpecializePhis(returnBlock); 1.4705 +} 1.4706 + 1.4707 +MInstruction * 1.4708 +IonBuilder::createDeclEnvObject(MDefinition *callee, MDefinition *scope) 1.4709 +{ 1.4710 + // Get a template CallObject that we'll use to generate inline object 1.4711 + // creation. 1.4712 + DeclEnvObject *templateObj = inspector->templateDeclEnvObject(); 1.4713 + 1.4714 + // One field is added to the function to handle its name. This cannot be a 1.4715 + // dynamic slot because there is still plenty of room on the DeclEnv object. 1.4716 + JS_ASSERT(!templateObj->hasDynamicSlots()); 1.4717 + 1.4718 + // Allocate the actual object. It is important that no intervening 1.4719 + // instructions could potentially bailout, thus leaking the dynamic slots 1.4720 + // pointer. 1.4721 + MInstruction *declEnvObj = MNewDeclEnvObject::New(alloc(), templateObj); 1.4722 + current->add(declEnvObj); 1.4723 + 1.4724 + // Initialize the object's reserved slots. No post barrier is needed here: 1.4725 + // the object will be allocated in the nursery if possible, and if the 1.4726 + // tenured heap is used instead, a minor collection will have been performed 1.4727 + // that moved scope/callee to the tenured heap. 1.4728 + current->add(MStoreFixedSlot::New(alloc(), declEnvObj, DeclEnvObject::enclosingScopeSlot(), scope)); 1.4729 + current->add(MStoreFixedSlot::New(alloc(), declEnvObj, DeclEnvObject::lambdaSlot(), callee)); 1.4730 + 1.4731 + return declEnvObj; 1.4732 +} 1.4733 + 1.4734 +MInstruction * 1.4735 +IonBuilder::createCallObject(MDefinition *callee, MDefinition *scope) 1.4736 +{ 1.4737 + // Get a template CallObject that we'll use to generate inline object 1.4738 + // creation. 1.4739 + CallObject *templateObj = inspector->templateCallObject(); 1.4740 + 1.4741 + // If the CallObject needs dynamic slots, allocate those now. 1.4742 + MInstruction *slots; 1.4743 + if (templateObj->hasDynamicSlots()) { 1.4744 + size_t nslots = JSObject::dynamicSlotsCount(templateObj->numFixedSlots(), 1.4745 + templateObj->lastProperty()->slotSpan(templateObj->getClass()), 1.4746 + templateObj->getClass()); 1.4747 + slots = MNewSlots::New(alloc(), nslots); 1.4748 + } else { 1.4749 + slots = MConstant::New(alloc(), NullValue()); 1.4750 + } 1.4751 + current->add(slots); 1.4752 + 1.4753 + // Allocate the actual object. It is important that no intervening 1.4754 + // instructions could potentially bailout, thus leaking the dynamic slots 1.4755 + // pointer. Run-once scripts need a singleton type, so always do a VM call 1.4756 + // in such cases. 1.4757 + MUnaryInstruction *callObj; 1.4758 + if (script()->treatAsRunOnce()) 1.4759 + callObj = MNewRunOnceCallObject::New(alloc(), templateObj, slots); 1.4760 + else 1.4761 + callObj = MNewCallObject::New(alloc(), templateObj, slots); 1.4762 + current->add(callObj); 1.4763 + 1.4764 + // Initialize the object's reserved slots. No post barrier is needed here, 1.4765 + // for the same reason as in createDeclEnvObject. 1.4766 + current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::enclosingScopeSlot(), scope)); 1.4767 + current->add(MStoreFixedSlot::New(alloc(), callObj, CallObject::calleeSlot(), callee)); 1.4768 + 1.4769 + // Initialize argument slots. 1.4770 + for (AliasedFormalIter i(script()); i; i++) { 1.4771 + unsigned slot = i.scopeSlot(); 1.4772 + unsigned formal = i.frameIndex(); 1.4773 + MDefinition *param = current->getSlot(info().argSlotUnchecked(formal)); 1.4774 + if (slot >= templateObj->numFixedSlots()) 1.4775 + current->add(MStoreSlot::New(alloc(), slots, slot - templateObj->numFixedSlots(), param)); 1.4776 + else 1.4777 + current->add(MStoreFixedSlot::New(alloc(), callObj, slot, param)); 1.4778 + } 1.4779 + 1.4780 + return callObj; 1.4781 +} 1.4782 + 1.4783 +MDefinition * 1.4784 +IonBuilder::createThisScripted(MDefinition *callee) 1.4785 +{ 1.4786 + // Get callee.prototype. 1.4787 + // 1.4788 + // This instruction MUST be idempotent: since it does not correspond to an 1.4789 + // explicit operation in the bytecode, we cannot use resumeAfter(). 1.4790 + // Getters may not override |prototype| fetching, so this operation is indeed idempotent. 1.4791 + // - First try an idempotent property cache. 1.4792 + // - Upon failing idempotent property cache, we can't use a non-idempotent cache, 1.4793 + // therefore we fallback to CallGetProperty 1.4794 + // 1.4795 + // Note: both CallGetProperty and GetPropertyCache can trigger a GC, 1.4796 + // and thus invalidation. 1.4797 + MInstruction *getProto; 1.4798 + if (!invalidatedIdempotentCache()) { 1.4799 + MGetPropertyCache *getPropCache = MGetPropertyCache::New(alloc(), callee, names().prototype, 1.4800 + /* monitored = */ false); 1.4801 + getPropCache->setIdempotent(); 1.4802 + getProto = getPropCache; 1.4803 + } else { 1.4804 + MCallGetProperty *callGetProp = MCallGetProperty::New(alloc(), callee, names().prototype, 1.4805 + /* callprop = */ false); 1.4806 + callGetProp->setIdempotent(); 1.4807 + getProto = callGetProp; 1.4808 + } 1.4809 + current->add(getProto); 1.4810 + 1.4811 + // Create this from prototype 1.4812 + MCreateThisWithProto *createThis = MCreateThisWithProto::New(alloc(), callee, getProto); 1.4813 + current->add(createThis); 1.4814 + 1.4815 + return createThis; 1.4816 +} 1.4817 + 1.4818 +JSObject * 1.4819 +IonBuilder::getSingletonPrototype(JSFunction *target) 1.4820 +{ 1.4821 + if (!target || !target->hasSingletonType()) 1.4822 + return nullptr; 1.4823 + types::TypeObjectKey *targetType = types::TypeObjectKey::get(target); 1.4824 + if (targetType->unknownProperties()) 1.4825 + return nullptr; 1.4826 + 1.4827 + jsid protoid = NameToId(names().prototype); 1.4828 + types::HeapTypeSetKey protoProperty = targetType->property(protoid); 1.4829 + 1.4830 + return protoProperty.singleton(constraints()); 1.4831 +} 1.4832 + 1.4833 +MDefinition * 1.4834 +IonBuilder::createThisScriptedSingleton(JSFunction *target, MDefinition *callee) 1.4835 +{ 1.4836 + // Get the singleton prototype (if exists) 1.4837 + JSObject *proto = getSingletonPrototype(target); 1.4838 + if (!proto) 1.4839 + return nullptr; 1.4840 + 1.4841 + JSObject *templateObject = inspector->getTemplateObject(pc); 1.4842 + if (!templateObject || !templateObject->is<JSObject>()) 1.4843 + return nullptr; 1.4844 + if (!templateObject->hasTenuredProto() || templateObject->getProto() != proto) 1.4845 + return nullptr; 1.4846 + 1.4847 + if (!target->nonLazyScript()->types) 1.4848 + return nullptr; 1.4849 + if (!types::TypeScript::ThisTypes(target->nonLazyScript())->hasType(types::Type::ObjectType(templateObject))) 1.4850 + return nullptr; 1.4851 + 1.4852 + // For template objects with NewScript info, the appropriate allocation 1.4853 + // kind to use may change due to dynamic property adds. In these cases 1.4854 + // calling Ion code will be invalidated, but any baseline template object 1.4855 + // may be stale. Update to the correct template object in this case. 1.4856 + types::TypeObject *templateType = templateObject->type(); 1.4857 + if (templateType->hasNewScript()) { 1.4858 + templateObject = templateType->newScript()->templateObject; 1.4859 + JS_ASSERT(templateObject->type() == templateType); 1.4860 + 1.4861 + // Trigger recompilation if the templateObject changes. 1.4862 + types::TypeObjectKey::get(templateType)->watchStateChangeForNewScriptTemplate(constraints()); 1.4863 + } 1.4864 + 1.4865 + // Generate an inline path to create a new |this| object with 1.4866 + // the given singleton prototype. 1.4867 + MCreateThisWithTemplate *createThis = 1.4868 + MCreateThisWithTemplate::New(alloc(), constraints(), templateObject, 1.4869 + templateObject->type()->initialHeap(constraints())); 1.4870 + current->add(createThis); 1.4871 + 1.4872 + return createThis; 1.4873 +} 1.4874 + 1.4875 +MDefinition * 1.4876 +IonBuilder::createThis(JSFunction *target, MDefinition *callee) 1.4877 +{ 1.4878 + // Create this for unknown target 1.4879 + if (!target) { 1.4880 + MCreateThis *createThis = MCreateThis::New(alloc(), callee); 1.4881 + current->add(createThis); 1.4882 + return createThis; 1.4883 + } 1.4884 + 1.4885 + // Native constructors build the new Object themselves. 1.4886 + if (target->isNative()) { 1.4887 + if (!target->isNativeConstructor()) 1.4888 + return nullptr; 1.4889 + 1.4890 + MConstant *magic = MConstant::New(alloc(), MagicValue(JS_IS_CONSTRUCTING)); 1.4891 + current->add(magic); 1.4892 + return magic; 1.4893 + } 1.4894 + 1.4895 + // Try baking in the prototype. 1.4896 + MDefinition *createThis = createThisScriptedSingleton(target, callee); 1.4897 + if (createThis) 1.4898 + return createThis; 1.4899 + 1.4900 + return createThisScripted(callee); 1.4901 +} 1.4902 + 1.4903 +bool 1.4904 +IonBuilder::jsop_funcall(uint32_t argc) 1.4905 +{ 1.4906 + // Stack for JSOP_FUNCALL: 1.4907 + // 1: arg0 1.4908 + // ... 1.4909 + // argc: argN 1.4910 + // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position. 1.4911 + // argc+2: The native 'call' function. 1.4912 + 1.4913 + int calleeDepth = -((int)argc + 2); 1.4914 + int funcDepth = -((int)argc + 1); 1.4915 + 1.4916 + // If |Function.prototype.call| may be overridden, don't optimize callsite. 1.4917 + types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); 1.4918 + JSFunction *native = getSingleCallTarget(calleeTypes); 1.4919 + if (!native || !native->isNative() || native->native() != &js_fun_call) { 1.4920 + CallInfo callInfo(alloc(), false); 1.4921 + if (!callInfo.init(current, argc)) 1.4922 + return false; 1.4923 + return makeCall(native, callInfo, false); 1.4924 + } 1.4925 + current->peek(calleeDepth)->setImplicitlyUsedUnchecked(); 1.4926 + 1.4927 + // Extract call target. 1.4928 + types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet(); 1.4929 + JSFunction *target = getSingleCallTarget(funTypes); 1.4930 + 1.4931 + // Shimmy the slots down to remove the native 'call' function. 1.4932 + current->shimmySlots(funcDepth - 1); 1.4933 + 1.4934 + bool zeroArguments = (argc == 0); 1.4935 + 1.4936 + // If no |this| argument was provided, explicitly pass Undefined. 1.4937 + // Pushing is safe here, since one stack slot has been removed. 1.4938 + if (zeroArguments) { 1.4939 + pushConstant(UndefinedValue()); 1.4940 + } else { 1.4941 + // |this| becomes implicit in the call. 1.4942 + argc -= 1; 1.4943 + } 1.4944 + 1.4945 + CallInfo callInfo(alloc(), false); 1.4946 + if (!callInfo.init(current, argc)) 1.4947 + return false; 1.4948 + 1.4949 + // Try to inline the call. 1.4950 + if (!zeroArguments) { 1.4951 + InliningDecision decision = makeInliningDecision(target, callInfo); 1.4952 + switch (decision) { 1.4953 + case InliningDecision_Error: 1.4954 + return false; 1.4955 + case InliningDecision_DontInline: 1.4956 + break; 1.4957 + case InliningDecision_Inline: 1.4958 + if (target->isInterpreted()) 1.4959 + return inlineScriptedCall(callInfo, target); 1.4960 + break; 1.4961 + } 1.4962 + } 1.4963 + 1.4964 + // Call without inlining. 1.4965 + return makeCall(target, callInfo, false); 1.4966 +} 1.4967 + 1.4968 +bool 1.4969 +IonBuilder::jsop_funapply(uint32_t argc) 1.4970 +{ 1.4971 + int calleeDepth = -((int)argc + 2); 1.4972 + 1.4973 + types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); 1.4974 + JSFunction *native = getSingleCallTarget(calleeTypes); 1.4975 + if (argc != 2) { 1.4976 + CallInfo callInfo(alloc(), false); 1.4977 + if (!callInfo.init(current, argc)) 1.4978 + return false; 1.4979 + return makeCall(native, callInfo, false); 1.4980 + } 1.4981 + 1.4982 + // Disable compilation if the second argument to |apply| cannot be guaranteed 1.4983 + // to be either definitely |arguments| or definitely not |arguments|. 1.4984 + MDefinition *argument = current->peek(-1); 1.4985 + if (script()->argumentsHasVarBinding() && 1.4986 + argument->mightBeType(MIRType_MagicOptimizedArguments) && 1.4987 + argument->type() != MIRType_MagicOptimizedArguments) 1.4988 + { 1.4989 + return abort("fun.apply with MaybeArguments"); 1.4990 + } 1.4991 + 1.4992 + // Fallback to regular call if arg 2 is not definitely |arguments|. 1.4993 + if (argument->type() != MIRType_MagicOptimizedArguments) { 1.4994 + CallInfo callInfo(alloc(), false); 1.4995 + if (!callInfo.init(current, argc)) 1.4996 + return false; 1.4997 + return makeCall(native, callInfo, false); 1.4998 + } 1.4999 + 1.5000 + if (!native || 1.5001 + !native->isNative() || 1.5002 + native->native() != js_fun_apply) 1.5003 + { 1.5004 + return abort("fun.apply speculation failed"); 1.5005 + } 1.5006 + 1.5007 + current->peek(calleeDepth)->setImplicitlyUsedUnchecked(); 1.5008 + 1.5009 + // Use funapply that definitely uses |arguments| 1.5010 + return jsop_funapplyarguments(argc); 1.5011 +} 1.5012 + 1.5013 +bool 1.5014 +IonBuilder::jsop_funapplyarguments(uint32_t argc) 1.5015 +{ 1.5016 + // Stack for JSOP_FUNAPPLY: 1.5017 + // 1: Vp 1.5018 + // 2: This 1.5019 + // argc+1: JSFunction*, the 'f' in |f.call()|, in |this| position. 1.5020 + // argc+2: The native 'apply' function. 1.5021 + 1.5022 + int funcDepth = -((int)argc + 1); 1.5023 + 1.5024 + // Extract call target. 1.5025 + types::TemporaryTypeSet *funTypes = current->peek(funcDepth)->resultTypeSet(); 1.5026 + JSFunction *target = getSingleCallTarget(funTypes); 1.5027 + 1.5028 + // When this script isn't inlined, use MApplyArgs, 1.5029 + // to copy the arguments from the stack and call the function 1.5030 + if (inliningDepth_ == 0 && info().executionMode() != DefinitePropertiesAnalysis) { 1.5031 + // The array argument corresponds to the arguments object. As the JIT 1.5032 + // is implicitly reading the arguments object in the next instruction, 1.5033 + // we need to prevent the deletion of the arguments object from resume 1.5034 + // points, so that Baseline will behave correctly after a bailout. 1.5035 + MDefinition *vp = current->pop(); 1.5036 + vp->setImplicitlyUsedUnchecked(); 1.5037 + 1.5038 + MDefinition *argThis = current->pop(); 1.5039 + 1.5040 + // Unwrap the (JSFunction *) parameter. 1.5041 + MDefinition *argFunc = current->pop(); 1.5042 + 1.5043 + // Pop apply function. 1.5044 + current->pop(); 1.5045 + 1.5046 + MArgumentsLength *numArgs = MArgumentsLength::New(alloc()); 1.5047 + current->add(numArgs); 1.5048 + 1.5049 + MApplyArgs *apply = MApplyArgs::New(alloc(), target, argFunc, numArgs, argThis); 1.5050 + current->add(apply); 1.5051 + current->push(apply); 1.5052 + if (!resumeAfter(apply)) 1.5053 + return false; 1.5054 + 1.5055 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.5056 + return pushTypeBarrier(apply, types, true); 1.5057 + } 1.5058 + 1.5059 + // When inlining we have the arguments the function gets called with 1.5060 + // and can optimize even more, by just calling the functions with the args. 1.5061 + // We also try this path when doing the definite properties analysis, as we 1.5062 + // can inline the apply() target and don't care about the actual arguments 1.5063 + // that were passed in. 1.5064 + 1.5065 + CallInfo callInfo(alloc(), false); 1.5066 + 1.5067 + // Vp 1.5068 + MDefinition *vp = current->pop(); 1.5069 + vp->setImplicitlyUsedUnchecked(); 1.5070 + 1.5071 + // Arguments 1.5072 + MDefinitionVector args(alloc()); 1.5073 + if (inliningDepth_) { 1.5074 + if (!args.appendAll(inlineCallInfo_->argv())) 1.5075 + return false; 1.5076 + } 1.5077 + callInfo.setArgs(&args); 1.5078 + 1.5079 + // This 1.5080 + MDefinition *argThis = current->pop(); 1.5081 + callInfo.setThis(argThis); 1.5082 + 1.5083 + // Pop function parameter. 1.5084 + MDefinition *argFunc = current->pop(); 1.5085 + callInfo.setFun(argFunc); 1.5086 + 1.5087 + // Pop apply function. 1.5088 + current->pop(); 1.5089 + 1.5090 + // Try to inline the call. 1.5091 + InliningDecision decision = makeInliningDecision(target, callInfo); 1.5092 + switch (decision) { 1.5093 + case InliningDecision_Error: 1.5094 + return false; 1.5095 + case InliningDecision_DontInline: 1.5096 + break; 1.5097 + case InliningDecision_Inline: 1.5098 + if (target->isInterpreted()) 1.5099 + return inlineScriptedCall(callInfo, target); 1.5100 + } 1.5101 + 1.5102 + return makeCall(target, callInfo, false); 1.5103 +} 1.5104 + 1.5105 +bool 1.5106 +IonBuilder::jsop_call(uint32_t argc, bool constructing) 1.5107 +{ 1.5108 + // If this call has never executed, try to seed the observed type set 1.5109 + // based on how the call result is used. 1.5110 + types::TemporaryTypeSet *observed = bytecodeTypes(pc); 1.5111 + if (observed->empty()) { 1.5112 + if (BytecodeFlowsToBitop(pc)) { 1.5113 + observed->addType(types::Type::Int32Type(), alloc_->lifoAlloc()); 1.5114 + } else if (*GetNextPc(pc) == JSOP_POS) { 1.5115 + // Note: this is lame, overspecialized on the code patterns used 1.5116 + // by asm.js and should be replaced by a more general mechanism. 1.5117 + // See bug 870847. 1.5118 + observed->addType(types::Type::DoubleType(), alloc_->lifoAlloc()); 1.5119 + } 1.5120 + } 1.5121 + 1.5122 + int calleeDepth = -((int)argc + 2); 1.5123 + 1.5124 + // Acquire known call target if existent. 1.5125 + ObjectVector originals(alloc()); 1.5126 + bool gotLambda = false; 1.5127 + types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); 1.5128 + if (calleeTypes) { 1.5129 + if (!getPolyCallTargets(calleeTypes, constructing, originals, 4, &gotLambda)) 1.5130 + return false; 1.5131 + } 1.5132 + JS_ASSERT_IF(gotLambda, originals.length() <= 1); 1.5133 + 1.5134 + // If any call targets need to be cloned, look for existing clones to use. 1.5135 + // Keep track of the originals as we need to case on them for poly inline. 1.5136 + bool hasClones = false; 1.5137 + ObjectVector targets(alloc()); 1.5138 + for (uint32_t i = 0; i < originals.length(); i++) { 1.5139 + JSFunction *fun = &originals[i]->as<JSFunction>(); 1.5140 + if (fun->hasScript() && fun->nonLazyScript()->shouldCloneAtCallsite()) { 1.5141 + if (JSFunction *clone = ExistingCloneFunctionAtCallsite(compartment->callsiteClones(), fun, script(), pc)) { 1.5142 + fun = clone; 1.5143 + hasClones = true; 1.5144 + } 1.5145 + } 1.5146 + if (!targets.append(fun)) 1.5147 + return false; 1.5148 + } 1.5149 + 1.5150 + CallInfo callInfo(alloc(), constructing); 1.5151 + if (!callInfo.init(current, argc)) 1.5152 + return false; 1.5153 + 1.5154 + // Try inlining 1.5155 + InliningStatus status = inlineCallsite(targets, originals, gotLambda, callInfo); 1.5156 + if (status == InliningStatus_Inlined) 1.5157 + return true; 1.5158 + if (status == InliningStatus_Error) 1.5159 + return false; 1.5160 + 1.5161 + // No inline, just make the call. 1.5162 + JSFunction *target = nullptr; 1.5163 + if (targets.length() == 1) 1.5164 + target = &targets[0]->as<JSFunction>(); 1.5165 + 1.5166 + return makeCall(target, callInfo, hasClones); 1.5167 +} 1.5168 + 1.5169 +MDefinition * 1.5170 +IonBuilder::makeCallsiteClone(JSFunction *target, MDefinition *fun) 1.5171 +{ 1.5172 + // Bake in the clone eagerly if we have a known target. We have arrived here 1.5173 + // because TI told us that the known target is a should-clone-at-callsite 1.5174 + // function, which means that target already is the clone. Make sure to ensure 1.5175 + // that the old definition remains in resume points. 1.5176 + if (target) { 1.5177 + fun->setImplicitlyUsedUnchecked(); 1.5178 + return constant(ObjectValue(*target)); 1.5179 + } 1.5180 + 1.5181 + // Add a callsite clone IC if we have multiple targets. Note that we 1.5182 + // should have checked already that at least some targets are marked as 1.5183 + // should-clone-at-callsite. 1.5184 + MCallsiteCloneCache *clone = MCallsiteCloneCache::New(alloc(), fun, pc); 1.5185 + current->add(clone); 1.5186 + return clone; 1.5187 +} 1.5188 + 1.5189 +bool 1.5190 +IonBuilder::testShouldDOMCall(types::TypeSet *inTypes, 1.5191 + JSFunction *func, JSJitInfo::OpType opType) 1.5192 +{ 1.5193 + if (!func->isNative() || !func->jitInfo()) 1.5194 + return false; 1.5195 + 1.5196 + // If all the DOM objects flowing through are legal with this 1.5197 + // property, we can bake in a call to the bottom half of the DOM 1.5198 + // accessor 1.5199 + DOMInstanceClassMatchesProto instanceChecker = 1.5200 + compartment->runtime()->DOMcallbacks()->instanceClassMatchesProto; 1.5201 + 1.5202 + const JSJitInfo *jinfo = func->jitInfo(); 1.5203 + if (jinfo->type() != opType) 1.5204 + return false; 1.5205 + 1.5206 + for (unsigned i = 0; i < inTypes->getObjectCount(); i++) { 1.5207 + types::TypeObjectKey *curType = inTypes->getObject(i); 1.5208 + if (!curType) 1.5209 + continue; 1.5210 + 1.5211 + if (!curType->hasTenuredProto()) 1.5212 + return false; 1.5213 + JSObject *proto = curType->proto().toObjectOrNull(); 1.5214 + if (!instanceChecker(proto, jinfo->protoID, jinfo->depth)) 1.5215 + return false; 1.5216 + } 1.5217 + 1.5218 + return true; 1.5219 +} 1.5220 + 1.5221 +static bool 1.5222 +ArgumentTypesMatch(MDefinition *def, types::StackTypeSet *calleeTypes) 1.5223 +{ 1.5224 + if (def->resultTypeSet()) { 1.5225 + JS_ASSERT(def->type() == MIRType_Value || def->mightBeType(def->type())); 1.5226 + return def->resultTypeSet()->isSubset(calleeTypes); 1.5227 + } 1.5228 + 1.5229 + if (def->type() == MIRType_Value) 1.5230 + return false; 1.5231 + 1.5232 + if (def->type() == MIRType_Object) 1.5233 + return calleeTypes->unknownObject(); 1.5234 + 1.5235 + return calleeTypes->mightBeMIRType(def->type()); 1.5236 +} 1.5237 + 1.5238 +bool 1.5239 +IonBuilder::testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo) 1.5240 +{ 1.5241 + // If we have a known target, check if the caller arg types are a subset of callee. 1.5242 + // Since typeset accumulates and can't decrease that means we don't need to check 1.5243 + // the arguments anymore. 1.5244 + if (!target->hasScript()) 1.5245 + return true; 1.5246 + 1.5247 + JSScript *targetScript = target->nonLazyScript(); 1.5248 + 1.5249 + if (!targetScript->types) 1.5250 + return true; 1.5251 + 1.5252 + if (!ArgumentTypesMatch(callInfo.thisArg(), types::TypeScript::ThisTypes(targetScript))) 1.5253 + return true; 1.5254 + uint32_t expected_args = Min<uint32_t>(callInfo.argc(), target->nargs()); 1.5255 + for (size_t i = 0; i < expected_args; i++) { 1.5256 + if (!ArgumentTypesMatch(callInfo.getArg(i), types::TypeScript::ArgTypes(targetScript, i))) 1.5257 + return true; 1.5258 + } 1.5259 + for (size_t i = callInfo.argc(); i < target->nargs(); i++) { 1.5260 + if (!types::TypeScript::ArgTypes(targetScript, i)->mightBeMIRType(MIRType_Undefined)) 1.5261 + return true; 1.5262 + } 1.5263 + 1.5264 + return false; 1.5265 +} 1.5266 + 1.5267 +MCall * 1.5268 +IonBuilder::makeCallHelper(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite) 1.5269 +{ 1.5270 + // This function may be called with mutated stack. 1.5271 + // Querying TI for popped types is invalid. 1.5272 + 1.5273 + uint32_t targetArgs = callInfo.argc(); 1.5274 + 1.5275 + // Collect number of missing arguments provided that the target is 1.5276 + // scripted. Native functions are passed an explicit 'argc' parameter. 1.5277 + if (target && !target->isNative()) 1.5278 + targetArgs = Max<uint32_t>(target->nargs(), callInfo.argc()); 1.5279 + 1.5280 + bool isDOMCall = false; 1.5281 + if (target && !callInfo.constructing()) { 1.5282 + // We know we have a single call target. Check whether the "this" types 1.5283 + // are DOM types and our function a DOM function, and if so flag the 1.5284 + // MCall accordingly. 1.5285 + types::TemporaryTypeSet *thisTypes = callInfo.thisArg()->resultTypeSet(); 1.5286 + if (thisTypes && 1.5287 + thisTypes->getKnownMIRType() == MIRType_Object && 1.5288 + thisTypes->isDOMClass() && 1.5289 + testShouldDOMCall(thisTypes, target, JSJitInfo::Method)) 1.5290 + { 1.5291 + isDOMCall = true; 1.5292 + } 1.5293 + } 1.5294 + 1.5295 + MCall *call = MCall::New(alloc(), target, targetArgs + 1, callInfo.argc(), 1.5296 + callInfo.constructing(), isDOMCall); 1.5297 + if (!call) 1.5298 + return nullptr; 1.5299 + 1.5300 + // Explicitly pad any missing arguments with |undefined|. 1.5301 + // This permits skipping the argumentsRectifier. 1.5302 + for (int i = targetArgs; i > (int)callInfo.argc(); i--) { 1.5303 + JS_ASSERT_IF(target, !target->isNative()); 1.5304 + MConstant *undef = constant(UndefinedValue()); 1.5305 + call->addArg(i, undef); 1.5306 + } 1.5307 + 1.5308 + // Add explicit arguments. 1.5309 + // Skip addArg(0) because it is reserved for this 1.5310 + for (int32_t i = callInfo.argc() - 1; i >= 0; i--) 1.5311 + call->addArg(i + 1, callInfo.getArg(i)); 1.5312 + 1.5313 + // Now that we've told it about all the args, compute whether it's movable 1.5314 + call->computeMovable(); 1.5315 + 1.5316 + // Inline the constructor on the caller-side. 1.5317 + if (callInfo.constructing()) { 1.5318 + MDefinition *create = createThis(target, callInfo.fun()); 1.5319 + if (!create) { 1.5320 + abort("Failure inlining constructor for call."); 1.5321 + return nullptr; 1.5322 + } 1.5323 + 1.5324 + callInfo.thisArg()->setImplicitlyUsedUnchecked(); 1.5325 + callInfo.setThis(create); 1.5326 + } 1.5327 + 1.5328 + // Pass |this| and function. 1.5329 + MDefinition *thisArg = callInfo.thisArg(); 1.5330 + call->addArg(0, thisArg); 1.5331 + 1.5332 + // Add a callsite clone IC for multiple targets which all should be 1.5333 + // callsite cloned, or bake in the clone for a single target. 1.5334 + if (cloneAtCallsite) { 1.5335 + MDefinition *fun = makeCallsiteClone(target, callInfo.fun()); 1.5336 + callInfo.setFun(fun); 1.5337 + } 1.5338 + 1.5339 + if (target && !testNeedsArgumentCheck(target, callInfo)) 1.5340 + call->disableArgCheck(); 1.5341 + 1.5342 + call->initFunction(callInfo.fun()); 1.5343 + 1.5344 + current->add(call); 1.5345 + return call; 1.5346 +} 1.5347 + 1.5348 +static bool 1.5349 +DOMCallNeedsBarrier(const JSJitInfo* jitinfo, types::TemporaryTypeSet *types) 1.5350 +{ 1.5351 + // If the return type of our DOM native is in "types" already, we don't 1.5352 + // actually need a barrier. 1.5353 + if (jitinfo->returnType() == JSVAL_TYPE_UNKNOWN) 1.5354 + return true; 1.5355 + 1.5356 + // JSVAL_TYPE_OBJECT doesn't tell us much; we still have to barrier on the 1.5357 + // actual type of the object. 1.5358 + if (jitinfo->returnType() == JSVAL_TYPE_OBJECT) 1.5359 + return true; 1.5360 + 1.5361 + // No need for a barrier if we're already expecting the type we'll produce. 1.5362 + return MIRTypeFromValueType(jitinfo->returnType()) != types->getKnownMIRType(); 1.5363 +} 1.5364 + 1.5365 +bool 1.5366 +IonBuilder::makeCall(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite) 1.5367 +{ 1.5368 + // Constructor calls to non-constructors should throw. We don't want to use 1.5369 + // CallKnown in this case. 1.5370 + JS_ASSERT_IF(callInfo.constructing() && target, 1.5371 + target->isInterpretedConstructor() || target->isNativeConstructor()); 1.5372 + 1.5373 + MCall *call = makeCallHelper(target, callInfo, cloneAtCallsite); 1.5374 + if (!call) 1.5375 + return false; 1.5376 + 1.5377 + current->push(call); 1.5378 + if (call->isEffectful() && !resumeAfter(call)) 1.5379 + return false; 1.5380 + 1.5381 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.5382 + 1.5383 + if (call->isCallDOMNative()) 1.5384 + return pushDOMTypeBarrier(call, types, call->getSingleTarget()); 1.5385 + 1.5386 + return pushTypeBarrier(call, types, true); 1.5387 +} 1.5388 + 1.5389 +bool 1.5390 +IonBuilder::jsop_eval(uint32_t argc) 1.5391 +{ 1.5392 + int calleeDepth = -((int)argc + 2); 1.5393 + types::TemporaryTypeSet *calleeTypes = current->peek(calleeDepth)->resultTypeSet(); 1.5394 + 1.5395 + // Emit a normal call if the eval has never executed. This keeps us from 1.5396 + // disabling compilation for the script when testing with --ion-eager. 1.5397 + if (calleeTypes && calleeTypes->empty()) 1.5398 + return jsop_call(argc, /* constructing = */ false); 1.5399 + 1.5400 + JSFunction *singleton = getSingleCallTarget(calleeTypes); 1.5401 + if (!singleton) 1.5402 + return abort("No singleton callee for eval()"); 1.5403 + 1.5404 + if (script()->global().valueIsEval(ObjectValue(*singleton))) { 1.5405 + if (argc != 1) 1.5406 + return abort("Direct eval with more than one argument"); 1.5407 + 1.5408 + if (!info().funMaybeLazy()) 1.5409 + return abort("Direct eval in global code"); 1.5410 + 1.5411 + // The 'this' value for the outer and eval scripts must be the 1.5412 + // same. This is not guaranteed if a primitive string/number/etc. 1.5413 + // is passed through to the eval invoke as the primitive may be 1.5414 + // boxed into different objects if accessed via 'this'. 1.5415 + MIRType type = thisTypes->getKnownMIRType(); 1.5416 + if (type != MIRType_Object && type != MIRType_Null && type != MIRType_Undefined) 1.5417 + return abort("Direct eval from script with maybe-primitive 'this'"); 1.5418 + 1.5419 + CallInfo callInfo(alloc(), /* constructing = */ false); 1.5420 + if (!callInfo.init(current, argc)) 1.5421 + return false; 1.5422 + callInfo.setImplicitlyUsedUnchecked(); 1.5423 + 1.5424 + callInfo.fun()->setImplicitlyUsedUnchecked(); 1.5425 + 1.5426 + MDefinition *scopeChain = current->scopeChain(); 1.5427 + MDefinition *string = callInfo.getArg(0); 1.5428 + 1.5429 + // Direct eval acts as identity on non-string types according to 1.5430 + // ES5 15.1.2.1 step 1. 1.5431 + if (!string->mightBeType(MIRType_String)) { 1.5432 + current->push(string); 1.5433 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.5434 + return pushTypeBarrier(string, types, true); 1.5435 + } 1.5436 + 1.5437 + current->pushSlot(info().thisSlot()); 1.5438 + MDefinition *thisValue = current->pop(); 1.5439 + 1.5440 + // Try to pattern match 'eval(v + "()")'. In this case v is likely a 1.5441 + // name on the scope chain and the eval is performing a call on that 1.5442 + // value. Use a dynamic scope chain lookup rather than a full eval. 1.5443 + if (string->isConcat() && 1.5444 + string->getOperand(1)->isConstant() && 1.5445 + string->getOperand(1)->toConstant()->value().isString()) 1.5446 + { 1.5447 + JSAtom *atom = &string->getOperand(1)->toConstant()->value().toString()->asAtom(); 1.5448 + 1.5449 + if (StringEqualsAscii(atom, "()")) { 1.5450 + MDefinition *name = string->getOperand(0); 1.5451 + MInstruction *dynamicName = MGetDynamicName::New(alloc(), scopeChain, name); 1.5452 + current->add(dynamicName); 1.5453 + 1.5454 + current->push(dynamicName); 1.5455 + current->push(thisValue); 1.5456 + 1.5457 + CallInfo evalCallInfo(alloc(), /* constructing = */ false); 1.5458 + if (!evalCallInfo.init(current, /* argc = */ 0)) 1.5459 + return false; 1.5460 + 1.5461 + return makeCall(nullptr, evalCallInfo, false); 1.5462 + } 1.5463 + } 1.5464 + 1.5465 + MInstruction *filterArguments = MFilterArgumentsOrEval::New(alloc(), string); 1.5466 + current->add(filterArguments); 1.5467 + 1.5468 + MInstruction *ins = MCallDirectEval::New(alloc(), scopeChain, string, thisValue, pc); 1.5469 + current->add(ins); 1.5470 + current->push(ins); 1.5471 + 1.5472 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.5473 + return resumeAfter(ins) && pushTypeBarrier(ins, types, true); 1.5474 + } 1.5475 + 1.5476 + return jsop_call(argc, /* constructing = */ false); 1.5477 +} 1.5478 + 1.5479 +bool 1.5480 +IonBuilder::jsop_compare(JSOp op) 1.5481 +{ 1.5482 + MDefinition *right = current->pop(); 1.5483 + MDefinition *left = current->pop(); 1.5484 + 1.5485 + MCompare *ins = MCompare::New(alloc(), left, right, op); 1.5486 + current->add(ins); 1.5487 + current->push(ins); 1.5488 + 1.5489 + ins->infer(inspector, pc); 1.5490 + 1.5491 + if (ins->isEffectful() && !resumeAfter(ins)) 1.5492 + return false; 1.5493 + return true; 1.5494 +} 1.5495 + 1.5496 +bool 1.5497 +IonBuilder::jsop_newarray(uint32_t count) 1.5498 +{ 1.5499 + JS_ASSERT(script()->compileAndGo()); 1.5500 + 1.5501 + JSObject *templateObject = inspector->getTemplateObject(pc); 1.5502 + if (!templateObject) 1.5503 + return abort("No template object for NEWARRAY"); 1.5504 + 1.5505 + JS_ASSERT(templateObject->is<ArrayObject>()); 1.5506 + if (templateObject->type()->unknownProperties()) { 1.5507 + // We will get confused in jsop_initelem_array if we can't find the 1.5508 + // type object being initialized. 1.5509 + return abort("New array has unknown properties"); 1.5510 + } 1.5511 + 1.5512 + MNewArray *ins = MNewArray::New(alloc(), constraints(), count, templateObject, 1.5513 + templateObject->type()->initialHeap(constraints()), 1.5514 + MNewArray::NewArray_Allocating); 1.5515 + current->add(ins); 1.5516 + current->push(ins); 1.5517 + 1.5518 + types::TemporaryTypeSet::DoubleConversion conversion = 1.5519 + ins->resultTypeSet()->convertDoubleElements(constraints()); 1.5520 + 1.5521 + if (conversion == types::TemporaryTypeSet::AlwaysConvertToDoubles) 1.5522 + templateObject->setShouldConvertDoubleElements(); 1.5523 + else 1.5524 + templateObject->clearShouldConvertDoubleElements(); 1.5525 + return true; 1.5526 +} 1.5527 + 1.5528 +bool 1.5529 +IonBuilder::jsop_newobject() 1.5530 +{ 1.5531 + // Don't bake in the TypeObject for non-CNG scripts. 1.5532 + JS_ASSERT(script()->compileAndGo()); 1.5533 + 1.5534 + JSObject *templateObject = inspector->getTemplateObject(pc); 1.5535 + if (!templateObject) 1.5536 + return abort("No template object for NEWOBJECT"); 1.5537 + 1.5538 + JS_ASSERT(templateObject->is<JSObject>()); 1.5539 + MNewObject *ins = MNewObject::New(alloc(), constraints(), templateObject, 1.5540 + templateObject->hasSingletonType() 1.5541 + ? gc::TenuredHeap 1.5542 + : templateObject->type()->initialHeap(constraints()), 1.5543 + /* templateObjectIsClassPrototype = */ false); 1.5544 + 1.5545 + current->add(ins); 1.5546 + current->push(ins); 1.5547 + 1.5548 + return resumeAfter(ins); 1.5549 +} 1.5550 + 1.5551 +bool 1.5552 +IonBuilder::jsop_initelem() 1.5553 +{ 1.5554 + MDefinition *value = current->pop(); 1.5555 + MDefinition *id = current->pop(); 1.5556 + MDefinition *obj = current->peek(-1); 1.5557 + 1.5558 + MInitElem *initElem = MInitElem::New(alloc(), obj, id, value); 1.5559 + current->add(initElem); 1.5560 + 1.5561 + return resumeAfter(initElem); 1.5562 +} 1.5563 + 1.5564 +bool 1.5565 +IonBuilder::jsop_initelem_array() 1.5566 +{ 1.5567 + MDefinition *value = current->pop(); 1.5568 + MDefinition *obj = current->peek(-1); 1.5569 + 1.5570 + // Make sure that arrays have the type being written to them by the 1.5571 + // intializer, and that arrays are marked as non-packed when writing holes 1.5572 + // to them during initialization. 1.5573 + bool needStub = false; 1.5574 + types::TypeObjectKey *initializer = obj->resultTypeSet()->getObject(0); 1.5575 + if (value->type() == MIRType_MagicHole) { 1.5576 + if (!initializer->hasFlags(constraints(), types::OBJECT_FLAG_NON_PACKED)) 1.5577 + needStub = true; 1.5578 + } else if (!initializer->unknownProperties()) { 1.5579 + types::HeapTypeSetKey elemTypes = initializer->property(JSID_VOID); 1.5580 + if (!TypeSetIncludes(elemTypes.maybeTypes(), value->type(), value->resultTypeSet())) { 1.5581 + elemTypes.freeze(constraints()); 1.5582 + needStub = true; 1.5583 + } 1.5584 + } 1.5585 + 1.5586 + if (NeedsPostBarrier(info(), value)) 1.5587 + current->add(MPostWriteBarrier::New(alloc(), obj, value)); 1.5588 + 1.5589 + if (needStub) { 1.5590 + MCallInitElementArray *store = MCallInitElementArray::New(alloc(), obj, GET_UINT24(pc), value); 1.5591 + current->add(store); 1.5592 + return resumeAfter(store); 1.5593 + } 1.5594 + 1.5595 + MConstant *id = MConstant::New(alloc(), Int32Value(GET_UINT24(pc))); 1.5596 + current->add(id); 1.5597 + 1.5598 + // Get the elements vector. 1.5599 + MElements *elements = MElements::New(alloc(), obj); 1.5600 + current->add(elements); 1.5601 + 1.5602 + JSObject *templateObject = obj->toNewArray()->templateObject(); 1.5603 + 1.5604 + if (templateObject->shouldConvertDoubleElements()) { 1.5605 + MInstruction *valueDouble = MToDouble::New(alloc(), value); 1.5606 + current->add(valueDouble); 1.5607 + value = valueDouble; 1.5608 + } 1.5609 + 1.5610 + // Store the value. 1.5611 + MStoreElement *store = MStoreElement::New(alloc(), elements, id, value, /* needsHoleCheck = */ false); 1.5612 + current->add(store); 1.5613 + 1.5614 + // Update the initialized length. (The template object for this array has 1.5615 + // the array's ultimate length, so the length field is already correct: no 1.5616 + // updating needed.) 1.5617 + MSetInitializedLength *initLength = MSetInitializedLength::New(alloc(), elements, id); 1.5618 + current->add(initLength); 1.5619 + 1.5620 + if (!resumeAfter(initLength)) 1.5621 + return false; 1.5622 + 1.5623 + return true; 1.5624 +} 1.5625 + 1.5626 +bool 1.5627 +IonBuilder::jsop_mutateproto() 1.5628 +{ 1.5629 + MDefinition *value = current->pop(); 1.5630 + MDefinition *obj = current->peek(-1); 1.5631 + 1.5632 + MMutateProto *mutate = MMutateProto::New(alloc(), obj, value); 1.5633 + current->add(mutate); 1.5634 + return resumeAfter(mutate); 1.5635 +} 1.5636 + 1.5637 +bool 1.5638 +IonBuilder::jsop_initprop(PropertyName *name) 1.5639 +{ 1.5640 + MDefinition *value = current->pop(); 1.5641 + MDefinition *obj = current->peek(-1); 1.5642 + 1.5643 + JSObject *templateObject = obj->toNewObject()->templateObject(); 1.5644 + 1.5645 + Shape *shape = templateObject->lastProperty()->searchLinear(NameToId(name)); 1.5646 + 1.5647 + if (!shape) { 1.5648 + // JSOP_NEWINIT becomes an MNewObject without preconfigured properties. 1.5649 + MInitProp *init = MInitProp::New(alloc(), obj, name, value); 1.5650 + current->add(init); 1.5651 + return resumeAfter(init); 1.5652 + } 1.5653 + 1.5654 + if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, 1.5655 + &obj, name, &value, /* canModify = */ true)) 1.5656 + { 1.5657 + // JSOP_NEWINIT becomes an MNewObject without preconfigured properties. 1.5658 + MInitProp *init = MInitProp::New(alloc(), obj, name, value); 1.5659 + current->add(init); 1.5660 + return resumeAfter(init); 1.5661 + } 1.5662 + 1.5663 + if (NeedsPostBarrier(info(), value)) 1.5664 + current->add(MPostWriteBarrier::New(alloc(), obj, value)); 1.5665 + 1.5666 + bool needsBarrier = true; 1.5667 + if (obj->resultTypeSet() && 1.5668 + !obj->resultTypeSet()->propertyNeedsBarrier(constraints(), NameToId(name))) 1.5669 + { 1.5670 + needsBarrier = false; 1.5671 + } 1.5672 + 1.5673 + // In parallel execution, we never require write barriers. See 1.5674 + // forkjoin.cpp for more information. 1.5675 + if (info().executionMode() == ParallelExecution) 1.5676 + needsBarrier = false; 1.5677 + 1.5678 + if (templateObject->isFixedSlot(shape->slot())) { 1.5679 + MStoreFixedSlot *store = MStoreFixedSlot::New(alloc(), obj, shape->slot(), value); 1.5680 + if (needsBarrier) 1.5681 + store->setNeedsBarrier(); 1.5682 + 1.5683 + current->add(store); 1.5684 + return resumeAfter(store); 1.5685 + } 1.5686 + 1.5687 + MSlots *slots = MSlots::New(alloc(), obj); 1.5688 + current->add(slots); 1.5689 + 1.5690 + uint32_t slot = templateObject->dynamicSlotIndex(shape->slot()); 1.5691 + MStoreSlot *store = MStoreSlot::New(alloc(), slots, slot, value); 1.5692 + if (needsBarrier) 1.5693 + store->setNeedsBarrier(); 1.5694 + 1.5695 + current->add(store); 1.5696 + return resumeAfter(store); 1.5697 +} 1.5698 + 1.5699 +bool 1.5700 +IonBuilder::jsop_initprop_getter_setter(PropertyName *name) 1.5701 +{ 1.5702 + MDefinition *value = current->pop(); 1.5703 + MDefinition *obj = current->peek(-1); 1.5704 + 1.5705 + MInitPropGetterSetter *init = MInitPropGetterSetter::New(alloc(), obj, name, value); 1.5706 + current->add(init); 1.5707 + return resumeAfter(init); 1.5708 +} 1.5709 + 1.5710 +bool 1.5711 +IonBuilder::jsop_initelem_getter_setter() 1.5712 +{ 1.5713 + MDefinition *value = current->pop(); 1.5714 + MDefinition *id = current->pop(); 1.5715 + MDefinition *obj = current->peek(-1); 1.5716 + 1.5717 + MInitElemGetterSetter *init = MInitElemGetterSetter::New(alloc(), obj, id, value); 1.5718 + current->add(init); 1.5719 + return resumeAfter(init); 1.5720 +} 1.5721 + 1.5722 +MBasicBlock * 1.5723 +IonBuilder::addBlock(MBasicBlock *block, uint32_t loopDepth) 1.5724 +{ 1.5725 + if (!block) 1.5726 + return nullptr; 1.5727 + graph().addBlock(block); 1.5728 + block->setLoopDepth(loopDepth); 1.5729 + return block; 1.5730 +} 1.5731 + 1.5732 +MBasicBlock * 1.5733 +IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc) 1.5734 +{ 1.5735 + MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(), 1.5736 + predecessor, pc, MBasicBlock::NORMAL); 1.5737 + return addBlock(block, loopDepth_); 1.5738 +} 1.5739 + 1.5740 +MBasicBlock * 1.5741 +IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, MResumePoint *priorResumePoint) 1.5742 +{ 1.5743 + MBasicBlock *block = MBasicBlock::NewWithResumePoint(graph(), info(), predecessor, pc, 1.5744 + priorResumePoint); 1.5745 + return addBlock(block, loopDepth_); 1.5746 +} 1.5747 + 1.5748 +MBasicBlock * 1.5749 +IonBuilder::newBlockPopN(MBasicBlock *predecessor, jsbytecode *pc, uint32_t popped) 1.5750 +{ 1.5751 + MBasicBlock *block = MBasicBlock::NewPopN(graph(), info(), predecessor, pc, MBasicBlock::NORMAL, popped); 1.5752 + return addBlock(block, loopDepth_); 1.5753 +} 1.5754 + 1.5755 +MBasicBlock * 1.5756 +IonBuilder::newBlockAfter(MBasicBlock *at, MBasicBlock *predecessor, jsbytecode *pc) 1.5757 +{ 1.5758 + MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(), 1.5759 + predecessor, pc, MBasicBlock::NORMAL); 1.5760 + if (!block) 1.5761 + return nullptr; 1.5762 + graph().insertBlockAfter(at, block); 1.5763 + return block; 1.5764 +} 1.5765 + 1.5766 +MBasicBlock * 1.5767 +IonBuilder::newBlock(MBasicBlock *predecessor, jsbytecode *pc, uint32_t loopDepth) 1.5768 +{ 1.5769 + MBasicBlock *block = MBasicBlock::New(graph(), &analysis(), info(), 1.5770 + predecessor, pc, MBasicBlock::NORMAL); 1.5771 + return addBlock(block, loopDepth); 1.5772 +} 1.5773 + 1.5774 +MBasicBlock * 1.5775 +IonBuilder::newOsrPreheader(MBasicBlock *predecessor, jsbytecode *loopEntry) 1.5776 +{ 1.5777 + JS_ASSERT(LoopEntryCanIonOsr(loopEntry)); 1.5778 + JS_ASSERT(loopEntry == info().osrPc()); 1.5779 + 1.5780 + // Create two blocks: one for the OSR entry with no predecessors, one for 1.5781 + // the preheader, which has the OSR entry block as a predecessor. The 1.5782 + // OSR block is always the second block (with id 1). 1.5783 + MBasicBlock *osrBlock = newBlockAfter(*graph().begin(), loopEntry); 1.5784 + MBasicBlock *preheader = newBlock(predecessor, loopEntry); 1.5785 + if (!osrBlock || !preheader) 1.5786 + return nullptr; 1.5787 + 1.5788 + MOsrEntry *entry = MOsrEntry::New(alloc()); 1.5789 + osrBlock->add(entry); 1.5790 + 1.5791 + // Initialize |scopeChain|. 1.5792 + { 1.5793 + uint32_t slot = info().scopeChainSlot(); 1.5794 + 1.5795 + MInstruction *scopev; 1.5796 + if (analysis().usesScopeChain()) { 1.5797 + scopev = MOsrScopeChain::New(alloc(), entry); 1.5798 + } else { 1.5799 + // Use an undefined value if the script does not need its scope 1.5800 + // chain, to match the type that is already being tracked for the 1.5801 + // slot. 1.5802 + scopev = MConstant::New(alloc(), UndefinedValue()); 1.5803 + } 1.5804 + 1.5805 + osrBlock->add(scopev); 1.5806 + osrBlock->initSlot(slot, scopev); 1.5807 + } 1.5808 + // Initialize |return value| 1.5809 + { 1.5810 + MInstruction *returnValue; 1.5811 + if (!script()->noScriptRval()) 1.5812 + returnValue = MOsrReturnValue::New(alloc(), entry); 1.5813 + else 1.5814 + returnValue = MConstant::New(alloc(), UndefinedValue()); 1.5815 + osrBlock->add(returnValue); 1.5816 + osrBlock->initSlot(info().returnValueSlot(), returnValue); 1.5817 + } 1.5818 + 1.5819 + // Initialize arguments object. 1.5820 + bool needsArgsObj = info().needsArgsObj(); 1.5821 + MInstruction *argsObj = nullptr; 1.5822 + if (info().hasArguments()) { 1.5823 + if (needsArgsObj) 1.5824 + argsObj = MOsrArgumentsObject::New(alloc(), entry); 1.5825 + else 1.5826 + argsObj = MConstant::New(alloc(), UndefinedValue()); 1.5827 + osrBlock->add(argsObj); 1.5828 + osrBlock->initSlot(info().argsObjSlot(), argsObj); 1.5829 + } 1.5830 + 1.5831 + if (info().funMaybeLazy()) { 1.5832 + // Initialize |this| parameter. 1.5833 + MParameter *thisv = MParameter::New(alloc(), MParameter::THIS_SLOT, nullptr); 1.5834 + osrBlock->add(thisv); 1.5835 + osrBlock->initSlot(info().thisSlot(), thisv); 1.5836 + 1.5837 + // Initialize arguments. 1.5838 + for (uint32_t i = 0; i < info().nargs(); i++) { 1.5839 + uint32_t slot = needsArgsObj ? info().argSlotUnchecked(i) : info().argSlot(i); 1.5840 + 1.5841 + // Only grab arguments from the arguments object if the arguments object 1.5842 + // aliases formals. If the argsobj does not alias formals, then the 1.5843 + // formals may have been assigned to during interpretation, and that change 1.5844 + // will not be reflected in the argsobj. 1.5845 + if (needsArgsObj && info().argsObjAliasesFormals()) { 1.5846 + JS_ASSERT(argsObj && argsObj->isOsrArgumentsObject()); 1.5847 + // If this is an aliased formal, then the arguments object 1.5848 + // contains a hole at this index. Any references to this 1.5849 + // variable in the jitcode will come from JSOP_*ALIASEDVAR 1.5850 + // opcodes, so the slot itself can be set to undefined. If 1.5851 + // it's not aliased, it must be retrieved from the arguments 1.5852 + // object. 1.5853 + MInstruction *osrv; 1.5854 + if (script()->formalIsAliased(i)) 1.5855 + osrv = MConstant::New(alloc(), UndefinedValue()); 1.5856 + else 1.5857 + osrv = MGetArgumentsObjectArg::New(alloc(), argsObj, i); 1.5858 + 1.5859 + osrBlock->add(osrv); 1.5860 + osrBlock->initSlot(slot, osrv); 1.5861 + } else { 1.5862 + MParameter *arg = MParameter::New(alloc(), i, nullptr); 1.5863 + osrBlock->add(arg); 1.5864 + osrBlock->initSlot(slot, arg); 1.5865 + } 1.5866 + } 1.5867 + } 1.5868 + 1.5869 + // Initialize locals. 1.5870 + for (uint32_t i = 0; i < info().nlocals(); i++) { 1.5871 + uint32_t slot = info().localSlot(i); 1.5872 + ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(i); 1.5873 + 1.5874 + MOsrValue *osrv = MOsrValue::New(alloc(), entry, offset); 1.5875 + osrBlock->add(osrv); 1.5876 + osrBlock->initSlot(slot, osrv); 1.5877 + } 1.5878 + 1.5879 + // Initialize stack. 1.5880 + uint32_t numStackSlots = preheader->stackDepth() - info().firstStackSlot(); 1.5881 + for (uint32_t i = 0; i < numStackSlots; i++) { 1.5882 + uint32_t slot = info().stackSlot(i); 1.5883 + ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(info().nlocals() + i); 1.5884 + 1.5885 + MOsrValue *osrv = MOsrValue::New(alloc(), entry, offset); 1.5886 + osrBlock->add(osrv); 1.5887 + osrBlock->initSlot(slot, osrv); 1.5888 + } 1.5889 + 1.5890 + // Create an MStart to hold the first valid MResumePoint. 1.5891 + MStart *start = MStart::New(alloc(), MStart::StartType_Osr); 1.5892 + osrBlock->add(start); 1.5893 + graph().setOsrStart(start); 1.5894 + 1.5895 + // MOsrValue instructions are infallible, so the first MResumePoint must 1.5896 + // occur after they execute, at the point of the MStart. 1.5897 + if (!resumeAt(start, loopEntry)) 1.5898 + return nullptr; 1.5899 + 1.5900 + // Link the same MResumePoint from the MStart to each MOsrValue. 1.5901 + // This causes logic in ShouldSpecializeInput() to not replace Uses with 1.5902 + // Unboxes in the MResumePiont, so that the MStart always sees Values. 1.5903 + osrBlock->linkOsrValues(start); 1.5904 + 1.5905 + // Clone types of the other predecessor of the pre-header to the osr block, 1.5906 + // such as pre-header phi's won't discard specialized type of the 1.5907 + // predecessor. 1.5908 + JS_ASSERT(predecessor->stackDepth() == osrBlock->stackDepth()); 1.5909 + JS_ASSERT(info().scopeChainSlot() == 0); 1.5910 + 1.5911 + // Treat the OSR values as having the same type as the existing values 1.5912 + // coming in to the loop. These will be fixed up with appropriate 1.5913 + // unboxing and type barriers in finishLoop, once the possible types 1.5914 + // at the loop header are known. 1.5915 + for (uint32_t i = info().startArgSlot(); i < osrBlock->stackDepth(); i++) { 1.5916 + MDefinition *existing = current->getSlot(i); 1.5917 + MDefinition *def = osrBlock->getSlot(i); 1.5918 + JS_ASSERT_IF(!needsArgsObj || !info().isSlotAliasedAtOsr(i), def->type() == MIRType_Value); 1.5919 + 1.5920 + // Aliased slots are never accessed, since they need to go through 1.5921 + // the callobject. No need to type them here. 1.5922 + if (info().isSlotAliasedAtOsr(i)) 1.5923 + continue; 1.5924 + 1.5925 + def->setResultType(existing->type()); 1.5926 + def->setResultTypeSet(existing->resultTypeSet()); 1.5927 + } 1.5928 + 1.5929 + // Finish the osrBlock. 1.5930 + osrBlock->end(MGoto::New(alloc(), preheader)); 1.5931 + if (!preheader->addPredecessor(alloc(), osrBlock)) 1.5932 + return nullptr; 1.5933 + graph().setOsrBlock(osrBlock); 1.5934 + 1.5935 + // Wrap |this| with a guaranteed use, to prevent instruction elimination. 1.5936 + // Prevent |this| from being DCE'd: necessary for constructors. 1.5937 + if (info().funMaybeLazy()) 1.5938 + preheader->getSlot(info().thisSlot())->setGuard(); 1.5939 + 1.5940 + return preheader; 1.5941 +} 1.5942 + 1.5943 +MBasicBlock * 1.5944 +IonBuilder::newPendingLoopHeader(MBasicBlock *predecessor, jsbytecode *pc, bool osr, bool canOsr, 1.5945 + unsigned stackPhiCount) 1.5946 +{ 1.5947 + loopDepth_++; 1.5948 + // If this site can OSR, all values on the expression stack are part of the loop. 1.5949 + if (canOsr) 1.5950 + stackPhiCount = predecessor->stackDepth() - info().firstStackSlot(); 1.5951 + MBasicBlock *block = MBasicBlock::NewPendingLoopHeader(graph(), info(), predecessor, pc, 1.5952 + stackPhiCount); 1.5953 + if (!addBlock(block, loopDepth_)) 1.5954 + return nullptr; 1.5955 + 1.5956 + if (osr) { 1.5957 + // Incorporate type information from the OSR frame into the loop 1.5958 + // header. The OSR frame may have unexpected types due to type changes 1.5959 + // within the loop body or due to incomplete profiling information, 1.5960 + // in which case this may avoid restarts of loop analysis or bailouts 1.5961 + // during the OSR itself. 1.5962 + 1.5963 + // Unbox the MOsrValue if it is known to be unboxable. 1.5964 + for (uint32_t i = info().startArgSlot(); i < block->stackDepth(); i++) { 1.5965 + 1.5966 + // The value of aliased args and slots are in the callobject. So we can't 1.5967 + // the value from the baseline frame. 1.5968 + if (info().isSlotAliasedAtOsr(i)) 1.5969 + continue; 1.5970 + 1.5971 + // Don't bother with expression stack values. The stack should be 1.5972 + // empty except for let variables (not Ion-compiled) or iterators. 1.5973 + if (i >= info().firstStackSlot()) 1.5974 + continue; 1.5975 + 1.5976 + MPhi *phi = block->getSlot(i)->toPhi(); 1.5977 + 1.5978 + // Get the type from the baseline frame. 1.5979 + types::Type existingType = types::Type::UndefinedType(); 1.5980 + uint32_t arg = i - info().firstArgSlot(); 1.5981 + uint32_t var = i - info().firstLocalSlot(); 1.5982 + if (info().funMaybeLazy() && i == info().thisSlot()) 1.5983 + existingType = baselineFrame_->thisType; 1.5984 + else if (arg < info().nargs()) 1.5985 + existingType = baselineFrame_->argTypes[arg]; 1.5986 + else 1.5987 + existingType = baselineFrame_->varTypes[var]; 1.5988 + 1.5989 + // Extract typeset from value. 1.5990 + types::TemporaryTypeSet *typeSet = 1.5991 + alloc_->lifoAlloc()->new_<types::TemporaryTypeSet>(existingType); 1.5992 + if (!typeSet) 1.5993 + return nullptr; 1.5994 + MIRType type = typeSet->getKnownMIRType(); 1.5995 + if (!phi->addBackedgeType(type, typeSet)) 1.5996 + return nullptr; 1.5997 + } 1.5998 + } 1.5999 + 1.6000 + return block; 1.6001 +} 1.6002 + 1.6003 +// A resume point is a mapping of stack slots to MDefinitions. It is used to 1.6004 +// capture the environment such that if a guard fails, and IonMonkey needs 1.6005 +// to exit back to the interpreter, the interpreter state can be 1.6006 +// reconstructed. 1.6007 +// 1.6008 +// We capture stack state at critical points: 1.6009 +// * (1) At the beginning of every basic block. 1.6010 +// * (2) After every effectful operation. 1.6011 +// 1.6012 +// As long as these two properties are maintained, instructions can 1.6013 +// be moved, hoisted, or, eliminated without problems, and ops without side 1.6014 +// effects do not need to worry about capturing state at precisely the 1.6015 +// right point in time. 1.6016 +// 1.6017 +// Effectful instructions, of course, need to capture state after completion, 1.6018 +// where the interpreter will not attempt to repeat the operation. For this, 1.6019 +// ResumeAfter must be used. The state is attached directly to the effectful 1.6020 +// instruction to ensure that no intermediate instructions could be injected 1.6021 +// in between by a future analysis pass. 1.6022 +// 1.6023 +// During LIR construction, if an instruction can bail back to the interpreter, 1.6024 +// we create an LSnapshot, which uses the last known resume point to request 1.6025 +// register/stack assignments for every live value. 1.6026 +bool 1.6027 +IonBuilder::resume(MInstruction *ins, jsbytecode *pc, MResumePoint::Mode mode) 1.6028 +{ 1.6029 + JS_ASSERT(ins->isEffectful() || !ins->isMovable()); 1.6030 + 1.6031 + MResumePoint *resumePoint = MResumePoint::New(alloc(), ins->block(), pc, callerResumePoint_, 1.6032 + mode); 1.6033 + if (!resumePoint) 1.6034 + return false; 1.6035 + ins->setResumePoint(resumePoint); 1.6036 + resumePoint->setInstruction(ins); 1.6037 + return true; 1.6038 +} 1.6039 + 1.6040 +bool 1.6041 +IonBuilder::resumeAt(MInstruction *ins, jsbytecode *pc) 1.6042 +{ 1.6043 + return resume(ins, pc, MResumePoint::ResumeAt); 1.6044 +} 1.6045 + 1.6046 +bool 1.6047 +IonBuilder::resumeAfter(MInstruction *ins) 1.6048 +{ 1.6049 + return resume(ins, pc, MResumePoint::ResumeAfter); 1.6050 +} 1.6051 + 1.6052 +bool 1.6053 +IonBuilder::maybeInsertResume() 1.6054 +{ 1.6055 + // Create a resume point at the current position, without an existing 1.6056 + // effectful instruction. This resume point is not necessary for correct 1.6057 + // behavior (see above), but is added to avoid holding any values from the 1.6058 + // previous resume point which are now dead. This shortens the live ranges 1.6059 + // of such values and improves register allocation. 1.6060 + // 1.6061 + // This optimization is not performed outside of loop bodies, where good 1.6062 + // register allocation is not as critical, in order to avoid creating 1.6063 + // excessive resume points. 1.6064 + 1.6065 + if (loopDepth_ == 0) 1.6066 + return true; 1.6067 + 1.6068 + MNop *ins = MNop::New(alloc()); 1.6069 + current->add(ins); 1.6070 + 1.6071 + return resumeAfter(ins); 1.6072 +} 1.6073 + 1.6074 +static bool 1.6075 +ClassHasEffectlessLookup(const Class *clasp, PropertyName *name) 1.6076 +{ 1.6077 + return clasp->isNative() && !clasp->ops.lookupGeneric; 1.6078 +} 1.6079 + 1.6080 +static bool 1.6081 +ClassHasResolveHook(CompileCompartment *comp, const Class *clasp, PropertyName *name) 1.6082 +{ 1.6083 + // While arrays do not have resolve hooks, the types of their |length| 1.6084 + // properties are not reflected in type information, so pretend there is a 1.6085 + // resolve hook for this property. 1.6086 + if (clasp == &ArrayObject::class_) 1.6087 + return name == comp->runtime()->names().length; 1.6088 + 1.6089 + if (clasp->resolve == JS_ResolveStub) 1.6090 + return false; 1.6091 + 1.6092 + if (clasp->resolve == (JSResolveOp)str_resolve) { 1.6093 + // str_resolve only resolves integers, not names. 1.6094 + return false; 1.6095 + } 1.6096 + 1.6097 + if (clasp->resolve == (JSResolveOp)fun_resolve) 1.6098 + return FunctionHasResolveHook(comp->runtime()->names(), name); 1.6099 + 1.6100 + return true; 1.6101 +} 1.6102 + 1.6103 +void 1.6104 +IonBuilder::insertRecompileCheck() 1.6105 +{ 1.6106 + // PJS doesn't recompile and doesn't need recompile checks. 1.6107 + if (info().executionMode() != SequentialExecution) 1.6108 + return; 1.6109 + 1.6110 + // No need for recompile checks if this is the highest optimization level. 1.6111 + OptimizationLevel curLevel = optimizationInfo().level(); 1.6112 + if (js_IonOptimizations.isLastLevel(curLevel)) 1.6113 + return; 1.6114 + 1.6115 + // Add recompile check. 1.6116 + 1.6117 + // Get the topmost builder. The topmost script will get recompiled when 1.6118 + // usecount is high enough to justify a higher optimization level. 1.6119 + IonBuilder *topBuilder = this; 1.6120 + while (topBuilder->callerBuilder_) 1.6121 + topBuilder = topBuilder->callerBuilder_; 1.6122 + 1.6123 + // Add recompile check to recompile when the usecount reaches the usecount 1.6124 + // of the next optimization level. 1.6125 + OptimizationLevel nextLevel = js_IonOptimizations.nextLevel(curLevel); 1.6126 + const OptimizationInfo *info = js_IonOptimizations.get(nextLevel); 1.6127 + uint32_t useCount = info->usesBeforeCompile(topBuilder->script()); 1.6128 + current->add(MRecompileCheck::New(alloc(), topBuilder->script(), useCount)); 1.6129 +} 1.6130 + 1.6131 +JSObject * 1.6132 +IonBuilder::testSingletonProperty(JSObject *obj, PropertyName *name) 1.6133 +{ 1.6134 + // We would like to completely no-op property/global accesses which can 1.6135 + // produce only a particular JSObject. When indicating the access result is 1.6136 + // definitely an object, type inference does not account for the 1.6137 + // possibility that the property is entirely missing from the input object 1.6138 + // and its prototypes (if this happens, a semantic trigger would be hit and 1.6139 + // the pushed types updated, even if there is no type barrier). 1.6140 + // 1.6141 + // If the access definitely goes through obj, either directly or on the 1.6142 + // prototype chain, and the object has singleton type, then the type 1.6143 + // information for that property reflects the value that will definitely be 1.6144 + // read on accesses to the object. If the property is later deleted or 1.6145 + // reconfigured as a getter/setter then the type information for the 1.6146 + // property will change and trigger invalidation. 1.6147 + 1.6148 + while (obj) { 1.6149 + if (!ClassHasEffectlessLookup(obj->getClass(), name)) 1.6150 + return nullptr; 1.6151 + 1.6152 + types::TypeObjectKey *objType = types::TypeObjectKey::get(obj); 1.6153 + if (analysisContext) 1.6154 + objType->ensureTrackedProperty(analysisContext, NameToId(name)); 1.6155 + 1.6156 + if (objType->unknownProperties()) 1.6157 + return nullptr; 1.6158 + 1.6159 + types::HeapTypeSetKey property = objType->property(NameToId(name)); 1.6160 + if (property.isOwnProperty(constraints())) { 1.6161 + if (obj->hasSingletonType()) 1.6162 + return property.singleton(constraints()); 1.6163 + return nullptr; 1.6164 + } 1.6165 + 1.6166 + if (ClassHasResolveHook(compartment, obj->getClass(), name)) 1.6167 + return nullptr; 1.6168 + 1.6169 + if (!obj->hasTenuredProto()) 1.6170 + return nullptr; 1.6171 + obj = obj->getProto(); 1.6172 + } 1.6173 + 1.6174 + return nullptr; 1.6175 +} 1.6176 + 1.6177 +bool 1.6178 +IonBuilder::testSingletonPropertyTypes(MDefinition *obj, JSObject *singleton, PropertyName *name, 1.6179 + bool *testObject, bool *testString) 1.6180 +{ 1.6181 + // As for TestSingletonProperty, but the input is any value in a type set 1.6182 + // rather than a specific object. If testObject is set then the constant 1.6183 + // result can only be used after ensuring the input is an object. 1.6184 + 1.6185 + *testObject = false; 1.6186 + *testString = false; 1.6187 + 1.6188 + types::TemporaryTypeSet *types = obj->resultTypeSet(); 1.6189 + if (types && types->unknownObject()) 1.6190 + return false; 1.6191 + 1.6192 + JSObject *objectSingleton = types ? types->getSingleton() : nullptr; 1.6193 + if (objectSingleton) 1.6194 + return testSingletonProperty(objectSingleton, name) == singleton; 1.6195 + 1.6196 + JSProtoKey key; 1.6197 + switch (obj->type()) { 1.6198 + case MIRType_String: 1.6199 + key = JSProto_String; 1.6200 + break; 1.6201 + 1.6202 + case MIRType_Int32: 1.6203 + case MIRType_Double: 1.6204 + key = JSProto_Number; 1.6205 + break; 1.6206 + 1.6207 + case MIRType_Boolean: 1.6208 + key = JSProto_Boolean; 1.6209 + break; 1.6210 + 1.6211 + case MIRType_Object: 1.6212 + case MIRType_Value: { 1.6213 + if (!types) 1.6214 + return false; 1.6215 + 1.6216 + if (types->hasType(types::Type::StringType())) { 1.6217 + key = JSProto_String; 1.6218 + *testString = true; 1.6219 + break; 1.6220 + } 1.6221 + 1.6222 + if (!types->maybeObject()) 1.6223 + return false; 1.6224 + 1.6225 + // For property accesses which may be on many objects, we just need to 1.6226 + // find a prototype common to all the objects; if that prototype 1.6227 + // has the singleton property, the access will not be on a missing property. 1.6228 + for (unsigned i = 0; i < types->getObjectCount(); i++) { 1.6229 + types::TypeObjectKey *object = types->getObject(i); 1.6230 + if (!object) 1.6231 + continue; 1.6232 + if (analysisContext) 1.6233 + object->ensureTrackedProperty(analysisContext, NameToId(name)); 1.6234 + 1.6235 + const Class *clasp = object->clasp(); 1.6236 + if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name)) 1.6237 + return false; 1.6238 + if (object->unknownProperties()) 1.6239 + return false; 1.6240 + types::HeapTypeSetKey property = object->property(NameToId(name)); 1.6241 + if (property.isOwnProperty(constraints())) 1.6242 + return false; 1.6243 + 1.6244 + if (!object->hasTenuredProto()) 1.6245 + return false; 1.6246 + if (JSObject *proto = object->proto().toObjectOrNull()) { 1.6247 + // Test this type. 1.6248 + if (testSingletonProperty(proto, name) != singleton) 1.6249 + return false; 1.6250 + } else { 1.6251 + // Can't be on the prototype chain with no prototypes... 1.6252 + return false; 1.6253 + } 1.6254 + } 1.6255 + // If this is not a known object, a test will be needed. 1.6256 + *testObject = (obj->type() != MIRType_Object); 1.6257 + return true; 1.6258 + } 1.6259 + default: 1.6260 + return false; 1.6261 + } 1.6262 + 1.6263 + JSObject *proto = GetBuiltinPrototypePure(&script()->global(), key); 1.6264 + if (proto) 1.6265 + return testSingletonProperty(proto, name) == singleton; 1.6266 + 1.6267 + return false; 1.6268 +} 1.6269 + 1.6270 +// Given an observed type set, annotates the IR as much as possible: 1.6271 +// (1) If no type information is provided, the value on the top of the stack is 1.6272 +// left in place. 1.6273 +// (2) If a single type definitely exists, and no type barrier is needed, 1.6274 +// then an infallible unbox instruction replaces the value on the top of 1.6275 +// the stack. 1.6276 +// (3) If a type barrier is needed, but has an unknown type set, leave the 1.6277 +// value at the top of the stack. 1.6278 +// (4) If a type barrier is needed, and has a single type, an unbox 1.6279 +// instruction replaces the top of the stack. 1.6280 +// (5) Lastly, a type barrier instruction replaces the top of the stack. 1.6281 +bool 1.6282 +IonBuilder::pushTypeBarrier(MDefinition *def, types::TemporaryTypeSet *observed, bool needsBarrier) 1.6283 +{ 1.6284 + // Barriers are never needed for instructions whose result will not be used. 1.6285 + if (BytecodeIsPopped(pc)) 1.6286 + return true; 1.6287 + 1.6288 + // If the instruction has no side effects, we'll resume the entire operation. 1.6289 + // The actual type barrier will occur in the interpreter. If the 1.6290 + // instruction is effectful, even if it has a singleton type, there 1.6291 + // must be a resume point capturing the original def, and resuming 1.6292 + // to that point will explicitly monitor the new type. 1.6293 + 1.6294 + if (!needsBarrier) { 1.6295 + MDefinition *replace = ensureDefiniteType(def, observed->getKnownMIRType()); 1.6296 + if (replace != def) { 1.6297 + current->pop(); 1.6298 + current->push(replace); 1.6299 + } 1.6300 + replace->setResultTypeSet(observed); 1.6301 + return true; 1.6302 + } 1.6303 + 1.6304 + if (observed->unknown()) 1.6305 + return true; 1.6306 + 1.6307 + current->pop(); 1.6308 + 1.6309 + MInstruction *barrier = MTypeBarrier::New(alloc(), def, observed); 1.6310 + current->add(barrier); 1.6311 + 1.6312 + if (barrier->type() == MIRType_Undefined) 1.6313 + return pushConstant(UndefinedValue()); 1.6314 + if (barrier->type() == MIRType_Null) 1.6315 + return pushConstant(NullValue()); 1.6316 + 1.6317 + current->push(barrier); 1.6318 + return true; 1.6319 +} 1.6320 + 1.6321 +bool 1.6322 +IonBuilder::pushDOMTypeBarrier(MInstruction *ins, types::TemporaryTypeSet *observed, JSFunction* func) 1.6323 +{ 1.6324 + JS_ASSERT(func && func->isNative() && func->jitInfo()); 1.6325 + 1.6326 + const JSJitInfo *jitinfo = func->jitInfo(); 1.6327 + bool barrier = DOMCallNeedsBarrier(jitinfo, observed); 1.6328 + // Need to be a bit careful: if jitinfo->returnType is JSVAL_TYPE_DOUBLE but 1.6329 + // types->getKnownMIRType() is MIRType_Int32, then don't unconditionally 1.6330 + // unbox as a double. Instead, go ahead and barrier on having an int type, 1.6331 + // since we know we need a barrier anyway due to the type mismatch. This is 1.6332 + // the only situation in which TI actually has more information about the 1.6333 + // JSValueType than codegen can, short of jitinfo->returnType just being 1.6334 + // JSVAL_TYPE_UNKNOWN. 1.6335 + MDefinition* replace = ins; 1.6336 + if (jitinfo->returnType() != JSVAL_TYPE_DOUBLE || 1.6337 + observed->getKnownMIRType() != MIRType_Int32) { 1.6338 + replace = ensureDefiniteType(ins, MIRTypeFromValueType(jitinfo->returnType())); 1.6339 + if (replace != ins) { 1.6340 + current->pop(); 1.6341 + current->push(replace); 1.6342 + } 1.6343 + } else { 1.6344 + JS_ASSERT(barrier); 1.6345 + } 1.6346 + 1.6347 + return pushTypeBarrier(replace, observed, barrier); 1.6348 +} 1.6349 + 1.6350 +MDefinition * 1.6351 +IonBuilder::ensureDefiniteType(MDefinition *def, MIRType definiteType) 1.6352 +{ 1.6353 + MInstruction *replace; 1.6354 + switch (definiteType) { 1.6355 + case MIRType_Undefined: 1.6356 + def->setImplicitlyUsedUnchecked(); 1.6357 + replace = MConstant::New(alloc(), UndefinedValue()); 1.6358 + break; 1.6359 + 1.6360 + case MIRType_Null: 1.6361 + def->setImplicitlyUsedUnchecked(); 1.6362 + replace = MConstant::New(alloc(), NullValue()); 1.6363 + break; 1.6364 + 1.6365 + case MIRType_Value: 1.6366 + return def; 1.6367 + 1.6368 + default: { 1.6369 + if (def->type() != MIRType_Value) { 1.6370 + JS_ASSERT(def->type() == definiteType); 1.6371 + return def; 1.6372 + } 1.6373 + replace = MUnbox::New(alloc(), def, definiteType, MUnbox::Infallible); 1.6374 + break; 1.6375 + } 1.6376 + } 1.6377 + 1.6378 + current->add(replace); 1.6379 + return replace; 1.6380 +} 1.6381 + 1.6382 +MDefinition * 1.6383 +IonBuilder::ensureDefiniteTypeSet(MDefinition *def, types::TemporaryTypeSet *types) 1.6384 +{ 1.6385 + // We cannot arbitrarily add a typeset to a definition. It can be shared 1.6386 + // in another path. So we always need to create a new MIR. 1.6387 + 1.6388 + // Use ensureDefiniteType to do unboxing. If that happened the type can 1.6389 + // be added on the newly created unbox operation. 1.6390 + MDefinition *replace = ensureDefiniteType(def, types->getKnownMIRType()); 1.6391 + if (replace != def) { 1.6392 + replace->setResultTypeSet(types); 1.6393 + return replace; 1.6394 + } 1.6395 + 1.6396 + // Create a NOP mir instruction to filter the typeset. 1.6397 + MFilterTypeSet *filter = MFilterTypeSet::New(alloc(), def, types); 1.6398 + current->add(filter); 1.6399 + return filter; 1.6400 +} 1.6401 + 1.6402 +static size_t 1.6403 +NumFixedSlots(JSObject *object) 1.6404 +{ 1.6405 + // Note: we can't use object->numFixedSlots() here, as this will read the 1.6406 + // shape and can race with the main thread if we are building off thread. 1.6407 + // The allocation kind and object class (which goes through the type) can 1.6408 + // be read freely, however. 1.6409 + gc::AllocKind kind = object->tenuredGetAllocKind(); 1.6410 + return gc::GetGCKindSlots(kind, object->getClass()); 1.6411 +} 1.6412 + 1.6413 +bool 1.6414 +IonBuilder::getStaticName(JSObject *staticObject, PropertyName *name, bool *psucceeded) 1.6415 +{ 1.6416 + jsid id = NameToId(name); 1.6417 + 1.6418 + JS_ASSERT(staticObject->is<GlobalObject>() || staticObject->is<CallObject>()); 1.6419 + JS_ASSERT(staticObject->hasSingletonType()); 1.6420 + 1.6421 + *psucceeded = true; 1.6422 + 1.6423 + if (staticObject->is<GlobalObject>()) { 1.6424 + // Optimize undefined, NaN, and Infinity. 1.6425 + if (name == names().undefined) 1.6426 + return pushConstant(UndefinedValue()); 1.6427 + if (name == names().NaN) 1.6428 + return pushConstant(compartment->runtime()->NaNValue()); 1.6429 + if (name == names().Infinity) 1.6430 + return pushConstant(compartment->runtime()->positiveInfinityValue()); 1.6431 + } 1.6432 + 1.6433 + types::TypeObjectKey *staticType = types::TypeObjectKey::get(staticObject); 1.6434 + if (analysisContext) 1.6435 + staticType->ensureTrackedProperty(analysisContext, NameToId(name)); 1.6436 + 1.6437 + if (staticType->unknownProperties()) { 1.6438 + *psucceeded = false; 1.6439 + return true; 1.6440 + } 1.6441 + 1.6442 + types::HeapTypeSetKey property = staticType->property(id); 1.6443 + if (!property.maybeTypes() || 1.6444 + !property.maybeTypes()->definiteProperty() || 1.6445 + property.nonData(constraints())) 1.6446 + { 1.6447 + // The property has been reconfigured as non-configurable, non-enumerable 1.6448 + // or non-writable. 1.6449 + *psucceeded = false; 1.6450 + return true; 1.6451 + } 1.6452 + 1.6453 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.6454 + bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), staticType, 1.6455 + name, types, /* updateObserved = */ true); 1.6456 + 1.6457 + JSObject *singleton = types->getSingleton(); 1.6458 + 1.6459 + MIRType knownType = types->getKnownMIRType(); 1.6460 + if (!barrier) { 1.6461 + if (singleton) { 1.6462 + // Try to inline a known constant value. 1.6463 + if (testSingletonProperty(staticObject, name) == singleton) 1.6464 + return pushConstant(ObjectValue(*singleton)); 1.6465 + } 1.6466 + if (knownType == MIRType_Undefined) 1.6467 + return pushConstant(UndefinedValue()); 1.6468 + if (knownType == MIRType_Null) 1.6469 + return pushConstant(NullValue()); 1.6470 + } 1.6471 + 1.6472 + MInstruction *obj = constant(ObjectValue(*staticObject)); 1.6473 + 1.6474 + MIRType rvalType = types->getKnownMIRType(); 1.6475 + if (barrier) 1.6476 + rvalType = MIRType_Value; 1.6477 + 1.6478 + return loadSlot(obj, property.maybeTypes()->definiteSlot(), NumFixedSlots(staticObject), 1.6479 + rvalType, barrier, types); 1.6480 +} 1.6481 + 1.6482 +// Whether 'types' includes all possible values represented by input/inputTypes. 1.6483 +bool 1.6484 +jit::TypeSetIncludes(types::TypeSet *types, MIRType input, types::TypeSet *inputTypes) 1.6485 +{ 1.6486 + if (!types) 1.6487 + return inputTypes && inputTypes->empty(); 1.6488 + 1.6489 + switch (input) { 1.6490 + case MIRType_Undefined: 1.6491 + case MIRType_Null: 1.6492 + case MIRType_Boolean: 1.6493 + case MIRType_Int32: 1.6494 + case MIRType_Double: 1.6495 + case MIRType_Float32: 1.6496 + case MIRType_String: 1.6497 + case MIRType_MagicOptimizedArguments: 1.6498 + return types->hasType(types::Type::PrimitiveType(ValueTypeFromMIRType(input))); 1.6499 + 1.6500 + case MIRType_Object: 1.6501 + return types->unknownObject() || (inputTypes && inputTypes->isSubset(types)); 1.6502 + 1.6503 + case MIRType_Value: 1.6504 + return types->unknown() || (inputTypes && inputTypes->isSubset(types)); 1.6505 + 1.6506 + default: 1.6507 + MOZ_ASSUME_UNREACHABLE("Bad input type"); 1.6508 + } 1.6509 +} 1.6510 + 1.6511 +// Whether a write of the given value may need a post-write barrier for GC purposes. 1.6512 +bool 1.6513 +jit::NeedsPostBarrier(CompileInfo &info, MDefinition *value) 1.6514 +{ 1.6515 + return info.executionMode() != ParallelExecution && value->mightBeType(MIRType_Object); 1.6516 +} 1.6517 + 1.6518 +bool 1.6519 +IonBuilder::setStaticName(JSObject *staticObject, PropertyName *name) 1.6520 +{ 1.6521 + jsid id = NameToId(name); 1.6522 + 1.6523 + JS_ASSERT(staticObject->is<GlobalObject>() || staticObject->is<CallObject>()); 1.6524 + 1.6525 + MDefinition *value = current->peek(-1); 1.6526 + 1.6527 + types::TypeObjectKey *staticType = types::TypeObjectKey::get(staticObject); 1.6528 + if (staticType->unknownProperties()) 1.6529 + return jsop_setprop(name); 1.6530 + 1.6531 + types::HeapTypeSetKey property = staticType->property(id); 1.6532 + if (!property.maybeTypes() || 1.6533 + !property.maybeTypes()->definiteProperty() || 1.6534 + property.nonData(constraints()) || 1.6535 + property.nonWritable(constraints())) 1.6536 + { 1.6537 + // The property has been reconfigured as non-configurable, non-enumerable 1.6538 + // or non-writable. 1.6539 + return jsop_setprop(name); 1.6540 + } 1.6541 + 1.6542 + if (!TypeSetIncludes(property.maybeTypes(), value->type(), value->resultTypeSet())) 1.6543 + return jsop_setprop(name); 1.6544 + 1.6545 + current->pop(); 1.6546 + 1.6547 + // Pop the bound object on the stack. 1.6548 + MDefinition *obj = current->pop(); 1.6549 + JS_ASSERT(&obj->toConstant()->value().toObject() == staticObject); 1.6550 + 1.6551 + if (NeedsPostBarrier(info(), value)) 1.6552 + current->add(MPostWriteBarrier::New(alloc(), obj, value)); 1.6553 + 1.6554 + // If the property has a known type, we may be able to optimize typed stores by not 1.6555 + // storing the type tag. 1.6556 + MIRType slotType = MIRType_None; 1.6557 + MIRType knownType = property.knownMIRType(constraints()); 1.6558 + if (knownType != MIRType_Value) 1.6559 + slotType = knownType; 1.6560 + 1.6561 + bool needsBarrier = property.needsBarrier(constraints()); 1.6562 + return storeSlot(obj, property.maybeTypes()->definiteSlot(), NumFixedSlots(staticObject), 1.6563 + value, needsBarrier, slotType); 1.6564 +} 1.6565 + 1.6566 +bool 1.6567 +IonBuilder::jsop_getgname(PropertyName *name) 1.6568 +{ 1.6569 + JSObject *obj = &script()->global(); 1.6570 + bool succeeded; 1.6571 + if (!getStaticName(obj, name, &succeeded)) 1.6572 + return false; 1.6573 + if (succeeded) 1.6574 + return true; 1.6575 + 1.6576 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.6577 + // Spoof the stack to call into the getProp path. 1.6578 + // First, make sure there's room. 1.6579 + if (!current->ensureHasSlots(1)) 1.6580 + return false; 1.6581 + pushConstant(ObjectValue(*obj)); 1.6582 + if (!getPropTryCommonGetter(&succeeded, name, types)) 1.6583 + return false; 1.6584 + if (succeeded) 1.6585 + return true; 1.6586 + 1.6587 + // Clean up the pushed global object if we were not sucessful. 1.6588 + current->pop(); 1.6589 + return jsop_getname(name); 1.6590 +} 1.6591 + 1.6592 +bool 1.6593 +IonBuilder::jsop_getname(PropertyName *name) 1.6594 +{ 1.6595 + MDefinition *object; 1.6596 + if (js_CodeSpec[*pc].format & JOF_GNAME) { 1.6597 + MInstruction *global = constant(ObjectValue(script()->global())); 1.6598 + object = global; 1.6599 + } else { 1.6600 + current->push(current->scopeChain()); 1.6601 + object = current->pop(); 1.6602 + } 1.6603 + 1.6604 + MGetNameCache *ins; 1.6605 + if (JSOp(*GetNextPc(pc)) == JSOP_TYPEOF) 1.6606 + ins = MGetNameCache::New(alloc(), object, name, MGetNameCache::NAMETYPEOF); 1.6607 + else 1.6608 + ins = MGetNameCache::New(alloc(), object, name, MGetNameCache::NAME); 1.6609 + 1.6610 + current->add(ins); 1.6611 + current->push(ins); 1.6612 + 1.6613 + if (!resumeAfter(ins)) 1.6614 + return false; 1.6615 + 1.6616 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.6617 + return pushTypeBarrier(ins, types, true); 1.6618 +} 1.6619 + 1.6620 +bool 1.6621 +IonBuilder::jsop_intrinsic(PropertyName *name) 1.6622 +{ 1.6623 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.6624 + 1.6625 + // If we haven't executed this opcode yet, we need to get the intrinsic 1.6626 + // value and monitor the result. 1.6627 + if (types->empty()) { 1.6628 + MCallGetIntrinsicValue *ins = MCallGetIntrinsicValue::New(alloc(), name); 1.6629 + 1.6630 + current->add(ins); 1.6631 + current->push(ins); 1.6632 + 1.6633 + if (!resumeAfter(ins)) 1.6634 + return false; 1.6635 + 1.6636 + return pushTypeBarrier(ins, types, true); 1.6637 + } 1.6638 + 1.6639 + // Bake in the intrinsic. Make sure that TI agrees with us on the type. 1.6640 + Value vp; 1.6641 + JS_ALWAYS_TRUE(script()->global().maybeGetIntrinsicValue(name, &vp)); 1.6642 + JS_ASSERT(types->hasType(types::GetValueType(vp))); 1.6643 + 1.6644 + pushConstant(vp); 1.6645 + return true; 1.6646 +} 1.6647 + 1.6648 +bool 1.6649 +IonBuilder::jsop_bindname(PropertyName *name) 1.6650 +{ 1.6651 + JS_ASSERT(analysis().usesScopeChain()); 1.6652 + 1.6653 + MDefinition *scopeChain = current->scopeChain(); 1.6654 + MBindNameCache *ins = MBindNameCache::New(alloc(), scopeChain, name, script(), pc); 1.6655 + 1.6656 + current->add(ins); 1.6657 + current->push(ins); 1.6658 + 1.6659 + return resumeAfter(ins); 1.6660 +} 1.6661 + 1.6662 +static MIRType 1.6663 +GetElemKnownType(bool needsHoleCheck, types::TemporaryTypeSet *types) 1.6664 +{ 1.6665 + MIRType knownType = types->getKnownMIRType(); 1.6666 + 1.6667 + // Null and undefined have no payload so they can't be specialized. 1.6668 + // Since folding null/undefined while building SSA is not safe (see the 1.6669 + // comment in IsPhiObservable), we just add an untyped load instruction 1.6670 + // and rely on pushTypeBarrier and DCE to replace it with a null/undefined 1.6671 + // constant. 1.6672 + if (knownType == MIRType_Undefined || knownType == MIRType_Null) 1.6673 + knownType = MIRType_Value; 1.6674 + 1.6675 + // Different architectures may want typed element reads which require 1.6676 + // hole checks to be done as either value or typed reads. 1.6677 + if (needsHoleCheck && !LIRGenerator::allowTypedElementHoleCheck()) 1.6678 + knownType = MIRType_Value; 1.6679 + 1.6680 + return knownType; 1.6681 +} 1.6682 + 1.6683 +bool 1.6684 +IonBuilder::jsop_getelem() 1.6685 +{ 1.6686 + MDefinition *index = current->pop(); 1.6687 + MDefinition *obj = current->pop(); 1.6688 + 1.6689 + // Always use a call if we are performing analysis and not actually 1.6690 + // emitting code, to simplify later analysis. 1.6691 + if (info().executionModeIsAnalysis()) { 1.6692 + MInstruction *ins = MCallGetElement::New(alloc(), obj, index); 1.6693 + 1.6694 + current->add(ins); 1.6695 + current->push(ins); 1.6696 + 1.6697 + if (!resumeAfter(ins)) 1.6698 + return false; 1.6699 + 1.6700 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.6701 + return pushTypeBarrier(ins, types, true); 1.6702 + } 1.6703 + 1.6704 + bool emitted = false; 1.6705 + 1.6706 + if (!getElemTryTypedObject(&emitted, obj, index) || emitted) 1.6707 + return emitted; 1.6708 + 1.6709 + if (!getElemTryDense(&emitted, obj, index) || emitted) 1.6710 + return emitted; 1.6711 + 1.6712 + if (!getElemTryTypedStatic(&emitted, obj, index) || emitted) 1.6713 + return emitted; 1.6714 + 1.6715 + if (!getElemTryTypedArray(&emitted, obj, index) || emitted) 1.6716 + return emitted; 1.6717 + 1.6718 + if (!getElemTryString(&emitted, obj, index) || emitted) 1.6719 + return emitted; 1.6720 + 1.6721 + if (!getElemTryArguments(&emitted, obj, index) || emitted) 1.6722 + return emitted; 1.6723 + 1.6724 + if (!getElemTryArgumentsInlined(&emitted, obj, index) || emitted) 1.6725 + return emitted; 1.6726 + 1.6727 + if (script()->argumentsHasVarBinding() && obj->mightBeType(MIRType_MagicOptimizedArguments)) 1.6728 + return abort("Type is not definitely lazy arguments."); 1.6729 + 1.6730 + if (!getElemTryCache(&emitted, obj, index) || emitted) 1.6731 + return emitted; 1.6732 + 1.6733 + // Emit call. 1.6734 + MInstruction *ins = MCallGetElement::New(alloc(), obj, index); 1.6735 + 1.6736 + current->add(ins); 1.6737 + current->push(ins); 1.6738 + 1.6739 + if (!resumeAfter(ins)) 1.6740 + return false; 1.6741 + 1.6742 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.6743 + return pushTypeBarrier(ins, types, true); 1.6744 +} 1.6745 + 1.6746 +bool 1.6747 +IonBuilder::getElemTryTypedObject(bool *emitted, MDefinition *obj, MDefinition *index) 1.6748 +{ 1.6749 + JS_ASSERT(*emitted == false); 1.6750 + 1.6751 + TypeDescrSet objDescrs; 1.6752 + if (!lookupTypeDescrSet(obj, &objDescrs)) 1.6753 + return false; 1.6754 + 1.6755 + if (!objDescrs.allOfArrayKind()) 1.6756 + return true; 1.6757 + 1.6758 + TypeDescrSet elemDescrs; 1.6759 + if (!objDescrs.arrayElementType(*this, &elemDescrs)) 1.6760 + return false; 1.6761 + if (elemDescrs.empty()) 1.6762 + return true; 1.6763 + 1.6764 + JS_ASSERT(TypeDescr::isSized(elemDescrs.kind())); 1.6765 + 1.6766 + int32_t elemSize; 1.6767 + if (!elemDescrs.allHaveSameSize(&elemSize)) 1.6768 + return true; 1.6769 + 1.6770 + switch (elemDescrs.kind()) { 1.6771 + case TypeDescr::X4: 1.6772 + // FIXME (bug 894105): load into a MIRType_float32x4 etc 1.6773 + return true; 1.6774 + 1.6775 + case TypeDescr::Struct: 1.6776 + case TypeDescr::SizedArray: 1.6777 + return getElemTryComplexElemOfTypedObject(emitted, 1.6778 + obj, 1.6779 + index, 1.6780 + objDescrs, 1.6781 + elemDescrs, 1.6782 + elemSize); 1.6783 + case TypeDescr::Scalar: 1.6784 + return getElemTryScalarElemOfTypedObject(emitted, 1.6785 + obj, 1.6786 + index, 1.6787 + objDescrs, 1.6788 + elemDescrs, 1.6789 + elemSize); 1.6790 + 1.6791 + case TypeDescr::Reference: 1.6792 + return true; 1.6793 + 1.6794 + case TypeDescr::UnsizedArray: 1.6795 + MOZ_ASSUME_UNREACHABLE("Unsized arrays cannot be element types"); 1.6796 + } 1.6797 + 1.6798 + MOZ_ASSUME_UNREACHABLE("Bad kind"); 1.6799 +} 1.6800 + 1.6801 +static MIRType 1.6802 +MIRTypeForTypedArrayRead(ScalarTypeDescr::Type arrayType, 1.6803 + bool observedDouble); 1.6804 + 1.6805 +bool 1.6806 +IonBuilder::checkTypedObjectIndexInBounds(int32_t elemSize, 1.6807 + MDefinition *obj, 1.6808 + MDefinition *index, 1.6809 + TypeDescrSet objDescrs, 1.6810 + MDefinition **indexAsByteOffset, 1.6811 + bool *canBeNeutered) 1.6812 +{ 1.6813 + // Ensure index is an integer. 1.6814 + MInstruction *idInt32 = MToInt32::New(alloc(), index); 1.6815 + current->add(idInt32); 1.6816 + 1.6817 + // If we know the length statically from the type, just embed it. 1.6818 + // Otherwise, load it from the appropriate reserved slot on the 1.6819 + // typed object. We know it's an int32, so we can convert from 1.6820 + // Value to int32 using truncation. 1.6821 + int32_t lenOfAll; 1.6822 + MDefinition *length; 1.6823 + if (objDescrs.hasKnownArrayLength(&lenOfAll)) { 1.6824 + length = constantInt(lenOfAll); 1.6825 + 1.6826 + // If we are not loading the length from the object itself, 1.6827 + // then we still need to check if the object was neutered. 1.6828 + *canBeNeutered = true; 1.6829 + } else { 1.6830 + MInstruction *lengthValue = MLoadFixedSlot::New(alloc(), obj, JS_TYPEDOBJ_SLOT_LENGTH); 1.6831 + current->add(lengthValue); 1.6832 + 1.6833 + MInstruction *length32 = MTruncateToInt32::New(alloc(), lengthValue); 1.6834 + current->add(length32); 1.6835 + 1.6836 + length = length32; 1.6837 + 1.6838 + // If we are loading the length from the object itself, 1.6839 + // then we do not need an extra neuter check, because the length 1.6840 + // will have been set to 0 when the object was neutered. 1.6841 + *canBeNeutered = false; 1.6842 + } 1.6843 + 1.6844 + index = addBoundsCheck(idInt32, length); 1.6845 + 1.6846 + // Since we passed the bounds check, it is impossible for the 1.6847 + // result of multiplication to overflow; so enable imul path. 1.6848 + MMul *mul = MMul::New(alloc(), index, constantInt(elemSize), 1.6849 + MIRType_Int32, MMul::Integer); 1.6850 + current->add(mul); 1.6851 + 1.6852 + *indexAsByteOffset = mul; 1.6853 + return true; 1.6854 +} 1.6855 + 1.6856 +bool 1.6857 +IonBuilder::getElemTryScalarElemOfTypedObject(bool *emitted, 1.6858 + MDefinition *obj, 1.6859 + MDefinition *index, 1.6860 + TypeDescrSet objDescrs, 1.6861 + TypeDescrSet elemDescrs, 1.6862 + int32_t elemSize) 1.6863 +{ 1.6864 + JS_ASSERT(objDescrs.allOfArrayKind()); 1.6865 + 1.6866 + // Must always be loading the same scalar type 1.6867 + ScalarTypeDescr::Type elemType; 1.6868 + if (!elemDescrs.scalarType(&elemType)) 1.6869 + return true; 1.6870 + JS_ASSERT(elemSize == ScalarTypeDescr::alignment(elemType)); 1.6871 + 1.6872 + bool canBeNeutered; 1.6873 + MDefinition *indexAsByteOffset; 1.6874 + if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objDescrs, 1.6875 + &indexAsByteOffset, &canBeNeutered)) 1.6876 + { 1.6877 + return false; 1.6878 + } 1.6879 + 1.6880 + return pushScalarLoadFromTypedObject(emitted, obj, indexAsByteOffset, elemType, canBeNeutered); 1.6881 +} 1.6882 + 1.6883 +bool 1.6884 +IonBuilder::pushScalarLoadFromTypedObject(bool *emitted, 1.6885 + MDefinition *obj, 1.6886 + MDefinition *offset, 1.6887 + ScalarTypeDescr::Type elemType, 1.6888 + bool canBeNeutered) 1.6889 +{ 1.6890 + int32_t size = ScalarTypeDescr::size(elemType); 1.6891 + JS_ASSERT(size == ScalarTypeDescr::alignment(elemType)); 1.6892 + 1.6893 + // Find location within the owner object. 1.6894 + MDefinition *elements, *scaledOffset; 1.6895 + loadTypedObjectElements(obj, offset, size, canBeNeutered, 1.6896 + &elements, &scaledOffset); 1.6897 + 1.6898 + // Load the element. 1.6899 + MLoadTypedArrayElement *load = MLoadTypedArrayElement::New(alloc(), elements, scaledOffset, elemType); 1.6900 + current->add(load); 1.6901 + current->push(load); 1.6902 + 1.6903 + // If we are reading in-bounds elements, we can use knowledge about 1.6904 + // the array type to determine the result type, even if the opcode has 1.6905 + // never executed. The known pushed type is only used to distinguish 1.6906 + // uint32 reads that may produce either doubles or integers. 1.6907 + types::TemporaryTypeSet *resultTypes = bytecodeTypes(pc); 1.6908 + bool allowDouble = resultTypes->hasType(types::Type::DoubleType()); 1.6909 + 1.6910 + // Note: knownType is not necessarily in resultTypes; e.g. if we 1.6911 + // have only observed integers coming out of float array. 1.6912 + MIRType knownType = MIRTypeForTypedArrayRead(elemType, allowDouble); 1.6913 + 1.6914 + // Note: we can ignore the type barrier here, we know the type must 1.6915 + // be valid and unbarriered. Also, need not set resultTypeSet, 1.6916 + // because knownType is scalar and a resultTypeSet would provide 1.6917 + // no useful additional info. 1.6918 + load->setResultType(knownType); 1.6919 + 1.6920 + *emitted = true; 1.6921 + return true; 1.6922 +} 1.6923 + 1.6924 +bool 1.6925 +IonBuilder::getElemTryComplexElemOfTypedObject(bool *emitted, 1.6926 + MDefinition *obj, 1.6927 + MDefinition *index, 1.6928 + TypeDescrSet objDescrs, 1.6929 + TypeDescrSet elemDescrs, 1.6930 + int32_t elemSize) 1.6931 +{ 1.6932 + JS_ASSERT(objDescrs.allOfArrayKind()); 1.6933 + 1.6934 + MDefinition *type = loadTypedObjectType(obj); 1.6935 + MDefinition *elemTypeObj = typeObjectForElementFromArrayStructType(type); 1.6936 + 1.6937 + bool canBeNeutered; 1.6938 + MDefinition *indexAsByteOffset; 1.6939 + if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objDescrs, 1.6940 + &indexAsByteOffset, &canBeNeutered)) 1.6941 + { 1.6942 + return false; 1.6943 + } 1.6944 + 1.6945 + return pushDerivedTypedObject(emitted, obj, indexAsByteOffset, 1.6946 + elemDescrs, elemTypeObj, canBeNeutered); 1.6947 +} 1.6948 + 1.6949 +bool 1.6950 +IonBuilder::pushDerivedTypedObject(bool *emitted, 1.6951 + MDefinition *obj, 1.6952 + MDefinition *offset, 1.6953 + TypeDescrSet derivedTypeDescrs, 1.6954 + MDefinition *derivedTypeObj, 1.6955 + bool canBeNeutered) 1.6956 +{ 1.6957 + // Find location within the owner object. 1.6958 + MDefinition *owner, *ownerOffset; 1.6959 + loadTypedObjectData(obj, offset, canBeNeutered, &owner, &ownerOffset); 1.6960 + 1.6961 + // Create the derived typed object. 1.6962 + MInstruction *derivedTypedObj = MNewDerivedTypedObject::New(alloc(), 1.6963 + derivedTypeDescrs, 1.6964 + derivedTypeObj, 1.6965 + owner, 1.6966 + ownerOffset); 1.6967 + current->add(derivedTypedObj); 1.6968 + current->push(derivedTypedObj); 1.6969 + 1.6970 + // Determine (if possible) the class/proto that `derivedTypedObj` 1.6971 + // will have. For derived typed objects, the class (transparent vs 1.6972 + // opaque) will be the same as the incoming object from which the 1.6973 + // derived typed object is, well, derived. The prototype will be 1.6974 + // determined based on the type descriptor (and is immutable). 1.6975 + types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); 1.6976 + const Class *expectedClass = objTypes ? objTypes->getKnownClass() : nullptr; 1.6977 + JSObject *expectedProto = derivedTypeDescrs.knownPrototype(); 1.6978 + JS_ASSERT_IF(expectedClass, IsTypedObjectClass(expectedClass)); 1.6979 + 1.6980 + // Determine (if possible) the class/proto that the observed type set 1.6981 + // describes. 1.6982 + types::TemporaryTypeSet *observedTypes = bytecodeTypes(pc); 1.6983 + const Class *observedClass = observedTypes->getKnownClass(); 1.6984 + JSObject *observedProto = observedTypes->getCommonPrototype(); 1.6985 + 1.6986 + // If expectedClass/expectedProto are both non-null (and hence 1.6987 + // known), we can predict precisely what TI type object 1.6988 + // derivedTypedObj will have. Therefore, if we observe that this 1.6989 + // TI type object is already contained in the set of 1.6990 + // observedTypes, we can skip the barrier. 1.6991 + // 1.6992 + // Barriers still wind up being needed in some relatively 1.6993 + // rare cases: 1.6994 + // 1.6995 + // - if multiple kinds of typed objects flow into this point, 1.6996 + // in which case we will not be able to predict expectedClass 1.6997 + // nor expectedProto. 1.6998 + // 1.6999 + // - if the code has never executed, in which case the set of 1.7000 + // observed types will be incomplete. 1.7001 + // 1.7002 + // Barriers are particularly expensive here because they prevent 1.7003 + // us from optimizing the MNewDerivedTypedObject away. 1.7004 + if (observedClass && observedProto && observedClass == expectedClass && 1.7005 + observedProto == expectedProto) 1.7006 + { 1.7007 + derivedTypedObj->setResultTypeSet(observedTypes); 1.7008 + } else { 1.7009 + if (!pushTypeBarrier(derivedTypedObj, observedTypes, true)) 1.7010 + return false; 1.7011 + } 1.7012 + 1.7013 + *emitted = true; 1.7014 + return true; 1.7015 +} 1.7016 + 1.7017 +bool 1.7018 +IonBuilder::getElemTryDense(bool *emitted, MDefinition *obj, MDefinition *index) 1.7019 +{ 1.7020 + JS_ASSERT(*emitted == false); 1.7021 + 1.7022 + if (!ElementAccessIsDenseNative(obj, index)) 1.7023 + return true; 1.7024 + 1.7025 + // Don't generate a fast path if there have been bounds check failures 1.7026 + // and this access might be on a sparse property. 1.7027 + if (ElementAccessHasExtraIndexedProperty(constraints(), obj) && failedBoundsCheck_) 1.7028 + return true; 1.7029 + 1.7030 + // Don't generate a fast path if this pc has seen negative indexes accessed, 1.7031 + // which will not appear to be extra indexed properties. 1.7032 + if (inspector->hasSeenNegativeIndexGetElement(pc)) 1.7033 + return true; 1.7034 + 1.7035 + // Emit dense getelem variant. 1.7036 + if (!jsop_getelem_dense(obj, index)) 1.7037 + return false; 1.7038 + 1.7039 + *emitted = true; 1.7040 + return true; 1.7041 +} 1.7042 + 1.7043 +bool 1.7044 +IonBuilder::getElemTryTypedStatic(bool *emitted, MDefinition *obj, MDefinition *index) 1.7045 +{ 1.7046 + JS_ASSERT(*emitted == false); 1.7047 + 1.7048 + ScalarTypeDescr::Type arrayType; 1.7049 + if (!ElementAccessIsTypedArray(obj, index, &arrayType)) 1.7050 + return true; 1.7051 + 1.7052 + if (!LIRGenerator::allowStaticTypedArrayAccesses()) 1.7053 + return true; 1.7054 + 1.7055 + if (ElementAccessHasExtraIndexedProperty(constraints(), obj)) 1.7056 + return true; 1.7057 + 1.7058 + if (!obj->resultTypeSet()) 1.7059 + return true; 1.7060 + 1.7061 + JSObject *tarrObj = obj->resultTypeSet()->getSingleton(); 1.7062 + if (!tarrObj) 1.7063 + return true; 1.7064 + 1.7065 + TypedArrayObject *tarr = &tarrObj->as<TypedArrayObject>(); 1.7066 + 1.7067 + types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr); 1.7068 + if (tarrType->unknownProperties()) 1.7069 + return true; 1.7070 + 1.7071 + // LoadTypedArrayElementStatic currently treats uint32 arrays as int32. 1.7072 + ArrayBufferView::ViewType viewType = (ArrayBufferView::ViewType) tarr->type(); 1.7073 + if (viewType == ArrayBufferView::TYPE_UINT32) 1.7074 + return true; 1.7075 + 1.7076 + MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType); 1.7077 + if (!ptr) 1.7078 + return true; 1.7079 + 1.7080 + // Emit LoadTypedArrayElementStatic. 1.7081 + tarrType->watchStateChangeForTypedArrayData(constraints()); 1.7082 + 1.7083 + obj->setImplicitlyUsedUnchecked(); 1.7084 + index->setImplicitlyUsedUnchecked(); 1.7085 + 1.7086 + MLoadTypedArrayElementStatic *load = MLoadTypedArrayElementStatic::New(alloc(), tarr, ptr); 1.7087 + current->add(load); 1.7088 + current->push(load); 1.7089 + 1.7090 + // The load is infallible if an undefined result will be coerced to the 1.7091 + // appropriate numeric type if the read is out of bounds. The truncation 1.7092 + // analysis picks up some of these cases, but is incomplete with respect 1.7093 + // to others. For now, sniff the bytecode for simple patterns following 1.7094 + // the load which guarantee a truncation or numeric conversion. 1.7095 + if (viewType == ArrayBufferView::TYPE_FLOAT32 || viewType == ArrayBufferView::TYPE_FLOAT64) { 1.7096 + jsbytecode *next = pc + JSOP_GETELEM_LENGTH; 1.7097 + if (*next == JSOP_POS) 1.7098 + load->setInfallible(); 1.7099 + } else { 1.7100 + jsbytecode *next = pc + JSOP_GETELEM_LENGTH; 1.7101 + if (*next == JSOP_ZERO && *(next + JSOP_ZERO_LENGTH) == JSOP_BITOR) 1.7102 + load->setInfallible(); 1.7103 + } 1.7104 + 1.7105 + *emitted = true; 1.7106 + return true; 1.7107 +} 1.7108 + 1.7109 +bool 1.7110 +IonBuilder::getElemTryTypedArray(bool *emitted, MDefinition *obj, MDefinition *index) 1.7111 +{ 1.7112 + JS_ASSERT(*emitted == false); 1.7113 + 1.7114 + ScalarTypeDescr::Type arrayType; 1.7115 + if (!ElementAccessIsTypedArray(obj, index, &arrayType)) 1.7116 + return true; 1.7117 + 1.7118 + // Emit typed getelem variant. 1.7119 + if (!jsop_getelem_typed(obj, index, arrayType)) 1.7120 + return false; 1.7121 + 1.7122 + *emitted = true; 1.7123 + return true; 1.7124 +} 1.7125 + 1.7126 +bool 1.7127 +IonBuilder::getElemTryString(bool *emitted, MDefinition *obj, MDefinition *index) 1.7128 +{ 1.7129 + JS_ASSERT(*emitted == false); 1.7130 + 1.7131 + if (obj->type() != MIRType_String || !IsNumberType(index->type())) 1.7132 + return true; 1.7133 + 1.7134 + // If the index is expected to be out-of-bounds, don't optimize to avoid 1.7135 + // frequent bailouts. 1.7136 + if (bytecodeTypes(pc)->hasType(types::Type::UndefinedType())) 1.7137 + return true; 1.7138 + 1.7139 + // Emit fast path for string[index]. 1.7140 + MInstruction *idInt32 = MToInt32::New(alloc(), index); 1.7141 + current->add(idInt32); 1.7142 + index = idInt32; 1.7143 + 1.7144 + MStringLength *length = MStringLength::New(alloc(), obj); 1.7145 + current->add(length); 1.7146 + 1.7147 + index = addBoundsCheck(index, length); 1.7148 + 1.7149 + MCharCodeAt *charCode = MCharCodeAt::New(alloc(), obj, index); 1.7150 + current->add(charCode); 1.7151 + 1.7152 + MFromCharCode *result = MFromCharCode::New(alloc(), charCode); 1.7153 + current->add(result); 1.7154 + current->push(result); 1.7155 + 1.7156 + *emitted = true; 1.7157 + return true; 1.7158 +} 1.7159 + 1.7160 +bool 1.7161 +IonBuilder::getElemTryArguments(bool *emitted, MDefinition *obj, MDefinition *index) 1.7162 +{ 1.7163 + JS_ASSERT(*emitted == false); 1.7164 + 1.7165 + if (inliningDepth_ > 0) 1.7166 + return true; 1.7167 + 1.7168 + if (obj->type() != MIRType_MagicOptimizedArguments) 1.7169 + return true; 1.7170 + 1.7171 + // Emit GetFrameArgument. 1.7172 + 1.7173 + JS_ASSERT(!info().argsObjAliasesFormals()); 1.7174 + 1.7175 + // Type Inference has guaranteed this is an optimized arguments object. 1.7176 + obj->setImplicitlyUsedUnchecked(); 1.7177 + 1.7178 + // To ensure that we are not looking above the number of actual arguments. 1.7179 + MArgumentsLength *length = MArgumentsLength::New(alloc()); 1.7180 + current->add(length); 1.7181 + 1.7182 + // Ensure index is an integer. 1.7183 + MInstruction *idInt32 = MToInt32::New(alloc(), index); 1.7184 + current->add(idInt32); 1.7185 + index = idInt32; 1.7186 + 1.7187 + // Bailouts if we read more than the number of actual arguments. 1.7188 + index = addBoundsCheck(index, length); 1.7189 + 1.7190 + // Load the argument from the actual arguments. 1.7191 + MGetFrameArgument *load = MGetFrameArgument::New(alloc(), index, analysis_.hasSetArg()); 1.7192 + current->add(load); 1.7193 + current->push(load); 1.7194 + 1.7195 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.7196 + if (!pushTypeBarrier(load, types, true)) 1.7197 + return false; 1.7198 + 1.7199 + *emitted = true; 1.7200 + return true; 1.7201 +} 1.7202 + 1.7203 +bool 1.7204 +IonBuilder::getElemTryArgumentsInlined(bool *emitted, MDefinition *obj, MDefinition *index) 1.7205 +{ 1.7206 + JS_ASSERT(*emitted == false); 1.7207 + 1.7208 + if (inliningDepth_ == 0) 1.7209 + return true; 1.7210 + 1.7211 + if (obj->type() != MIRType_MagicOptimizedArguments) 1.7212 + return true; 1.7213 + 1.7214 + // Emit inlined arguments. 1.7215 + obj->setImplicitlyUsedUnchecked(); 1.7216 + 1.7217 + JS_ASSERT(!info().argsObjAliasesFormals()); 1.7218 + 1.7219 + // When the id is constant, we can just return the corresponding inlined argument 1.7220 + if (index->isConstant() && index->toConstant()->value().isInt32()) { 1.7221 + JS_ASSERT(inliningDepth_ > 0); 1.7222 + 1.7223 + int32_t id = index->toConstant()->value().toInt32(); 1.7224 + index->setImplicitlyUsedUnchecked(); 1.7225 + 1.7226 + if (id < (int32_t)inlineCallInfo_->argc() && id >= 0) 1.7227 + current->push(inlineCallInfo_->getArg(id)); 1.7228 + else 1.7229 + pushConstant(UndefinedValue()); 1.7230 + 1.7231 + *emitted = true; 1.7232 + return true; 1.7233 + } 1.7234 + 1.7235 + // inlined not constant not supported, yet. 1.7236 + return abort("NYI inlined not constant get argument element"); 1.7237 +} 1.7238 + 1.7239 +bool 1.7240 +IonBuilder::getElemTryCache(bool *emitted, MDefinition *obj, MDefinition *index) 1.7241 +{ 1.7242 + JS_ASSERT(*emitted == false); 1.7243 + 1.7244 + // Make sure we have at least an object. 1.7245 + if (!obj->mightBeType(MIRType_Object)) 1.7246 + return true; 1.7247 + 1.7248 + // Don't cache for strings. 1.7249 + if (obj->mightBeType(MIRType_String)) 1.7250 + return true; 1.7251 + 1.7252 + // Index should be integer or string 1.7253 + if (!index->mightBeType(MIRType_Int32) && !index->mightBeType(MIRType_String)) 1.7254 + return true; 1.7255 + 1.7256 + // Turn off cacheing if the element is int32 and we've seen non-native objects as the target 1.7257 + // of this getelem. 1.7258 + bool nonNativeGetElement = inspector->hasSeenNonNativeGetElement(pc); 1.7259 + if (index->mightBeType(MIRType_Int32) && nonNativeGetElement) 1.7260 + return true; 1.7261 + 1.7262 + // Emit GetElementCache. 1.7263 + 1.7264 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.7265 + bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj, nullptr, types); 1.7266 + 1.7267 + // Always add a barrier if the index might be a string, so that the cache 1.7268 + // can attach stubs for particular properties. 1.7269 + if (index->mightBeType(MIRType_String)) 1.7270 + barrier = true; 1.7271 + 1.7272 + // See note about always needing a barrier in jsop_getprop. 1.7273 + if (needsToMonitorMissingProperties(types)) 1.7274 + barrier = true; 1.7275 + 1.7276 + MInstruction *ins = MGetElementCache::New(alloc(), obj, index, barrier); 1.7277 + 1.7278 + current->add(ins); 1.7279 + current->push(ins); 1.7280 + 1.7281 + if (!resumeAfter(ins)) 1.7282 + return false; 1.7283 + 1.7284 + // Spice up type information. 1.7285 + if (index->type() == MIRType_Int32 && !barrier) { 1.7286 + bool needHoleCheck = !ElementAccessIsPacked(constraints(), obj); 1.7287 + MIRType knownType = GetElemKnownType(needHoleCheck, types); 1.7288 + 1.7289 + if (knownType != MIRType_Value && knownType != MIRType_Double) 1.7290 + ins->setResultType(knownType); 1.7291 + } 1.7292 + 1.7293 + if (!pushTypeBarrier(ins, types, barrier)) 1.7294 + return false; 1.7295 + 1.7296 + *emitted = true; 1.7297 + return true; 1.7298 +} 1.7299 + 1.7300 +bool 1.7301 +IonBuilder::jsop_getelem_dense(MDefinition *obj, MDefinition *index) 1.7302 +{ 1.7303 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.7304 + 1.7305 + if (JSOp(*pc) == JSOP_CALLELEM && !index->mightBeType(MIRType_String)) { 1.7306 + // Indexed call on an element of an array. Populate the observed types 1.7307 + // with any objects that could be in the array, to avoid extraneous 1.7308 + // type barriers. 1.7309 + AddObjectsForPropertyRead(obj, nullptr, types); 1.7310 + } 1.7311 + 1.7312 + bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj, nullptr, types); 1.7313 + bool needsHoleCheck = !ElementAccessIsPacked(constraints(), obj); 1.7314 + 1.7315 + // Reads which are on holes in the object do not have to bail out if 1.7316 + // undefined values have been observed at this access site and the access 1.7317 + // cannot hit another indexed property on the object or its prototypes. 1.7318 + bool readOutOfBounds = 1.7319 + types->hasType(types::Type::UndefinedType()) && 1.7320 + !ElementAccessHasExtraIndexedProperty(constraints(), obj); 1.7321 + 1.7322 + MIRType knownType = MIRType_Value; 1.7323 + if (!barrier) 1.7324 + knownType = GetElemKnownType(needsHoleCheck, types); 1.7325 + 1.7326 + // Ensure index is an integer. 1.7327 + MInstruction *idInt32 = MToInt32::New(alloc(), index); 1.7328 + current->add(idInt32); 1.7329 + index = idInt32; 1.7330 + 1.7331 + // Get the elements vector. 1.7332 + MInstruction *elements = MElements::New(alloc(), obj); 1.7333 + current->add(elements); 1.7334 + 1.7335 + // Note: to help GVN, use the original MElements instruction and not 1.7336 + // MConvertElementsToDoubles as operand. This is fine because converting 1.7337 + // elements to double does not change the initialized length. 1.7338 + MInitializedLength *initLength = MInitializedLength::New(alloc(), elements); 1.7339 + current->add(initLength); 1.7340 + 1.7341 + // If we can load the element as a definite double, make sure to check that 1.7342 + // the array has been converted to homogenous doubles first. 1.7343 + // 1.7344 + // NB: We disable this optimization in parallel execution mode 1.7345 + // because it is inherently not threadsafe (how do you convert the 1.7346 + // array atomically when there might be concurrent readers)? 1.7347 + types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); 1.7348 + ExecutionMode executionMode = info().executionMode(); 1.7349 + bool loadDouble = 1.7350 + executionMode == SequentialExecution && 1.7351 + !barrier && 1.7352 + loopDepth_ && 1.7353 + !readOutOfBounds && 1.7354 + !needsHoleCheck && 1.7355 + knownType == MIRType_Double && 1.7356 + objTypes && 1.7357 + objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles; 1.7358 + if (loadDouble) 1.7359 + elements = addConvertElementsToDoubles(elements); 1.7360 + 1.7361 + MInstruction *load; 1.7362 + 1.7363 + if (!readOutOfBounds) { 1.7364 + // This load should not return undefined, so likely we're reading 1.7365 + // in-bounds elements, and the array is packed or its holes are not 1.7366 + // read. This is the best case: we can separate the bounds check for 1.7367 + // hoisting. 1.7368 + index = addBoundsCheck(index, initLength); 1.7369 + 1.7370 + load = MLoadElement::New(alloc(), elements, index, needsHoleCheck, loadDouble); 1.7371 + current->add(load); 1.7372 + } else { 1.7373 + // This load may return undefined, so assume that we *can* read holes, 1.7374 + // or that we can read out-of-bounds accesses. In this case, the bounds 1.7375 + // check is part of the opcode. 1.7376 + load = MLoadElementHole::New(alloc(), elements, index, initLength, needsHoleCheck); 1.7377 + current->add(load); 1.7378 + 1.7379 + // If maybeUndefined was true, the typeset must have undefined, and 1.7380 + // then either additional types or a barrier. This means we should 1.7381 + // never have a typed version of LoadElementHole. 1.7382 + JS_ASSERT(knownType == MIRType_Value); 1.7383 + } 1.7384 + 1.7385 + // If the array is being converted to doubles, but we've observed 1.7386 + // just int, substitute a type set of int+double into the observed 1.7387 + // type set. The reason for this is that, in the 1.7388 + // interpreter+baseline, such arrays may consist of mixed 1.7389 + // ints/doubles, but when we enter ion code, we will be coercing 1.7390 + // all inputs to doubles. Therefore, the type barrier checking for 1.7391 + // just int is highly likely (*almost* guaranteed) to fail sooner 1.7392 + // or later. Essentially, by eagerly coercing to double, ion is 1.7393 + // making the observed types outdated. To compensate for this, we 1.7394 + // substitute a broader observed type set consisting of both ints 1.7395 + // and doubles. There is perhaps a tradeoff here, so we limit this 1.7396 + // optimization to parallel code, where it is needed to prevent 1.7397 + // perpetual bailouts in some extreme cases. (Bug 977853) 1.7398 + // 1.7399 + // NB: we have not added a MConvertElementsToDoubles MIR, so we 1.7400 + // cannot *assume* the result is a double. 1.7401 + if (executionMode == ParallelExecution && 1.7402 + barrier && 1.7403 + types->getKnownMIRType() == MIRType_Int32 && 1.7404 + objTypes && 1.7405 + objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles) 1.7406 + { 1.7407 + // Note: double implies int32 as well for typesets 1.7408 + types = alloc_->lifoAlloc()->new_<types::TemporaryTypeSet>(types::Type::DoubleType()); 1.7409 + if (!types) 1.7410 + return false; 1.7411 + 1.7412 + barrier = false; // Don't need a barrier anymore 1.7413 + } 1.7414 + 1.7415 + if (knownType != MIRType_Value) 1.7416 + load->setResultType(knownType); 1.7417 + 1.7418 + current->push(load); 1.7419 + return pushTypeBarrier(load, types, barrier); 1.7420 +} 1.7421 + 1.7422 +void 1.7423 +IonBuilder::addTypedArrayLengthAndData(MDefinition *obj, 1.7424 + BoundsChecking checking, 1.7425 + MDefinition **index, 1.7426 + MInstruction **length, MInstruction **elements) 1.7427 +{ 1.7428 + MOZ_ASSERT((index != nullptr) == (elements != nullptr)); 1.7429 + 1.7430 + if (obj->isConstant() && obj->toConstant()->value().isObject()) { 1.7431 + TypedArrayObject *tarr = &obj->toConstant()->value().toObject().as<TypedArrayObject>(); 1.7432 + void *data = tarr->viewData(); 1.7433 + // Bug 979449 - Optimistically embed the elements and use TI to 1.7434 + // invalidate if we move them. 1.7435 + if (!gc::IsInsideNursery(tarr->runtimeFromMainThread(), data)) { 1.7436 + // The 'data' pointer can change in rare circumstances 1.7437 + // (ArrayBufferObject::changeContents). 1.7438 + types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr); 1.7439 + if (!tarrType->unknownProperties()) { 1.7440 + tarrType->watchStateChangeForTypedArrayData(constraints()); 1.7441 + 1.7442 + obj->setImplicitlyUsedUnchecked(); 1.7443 + 1.7444 + int32_t len = SafeCast<int32_t>(tarr->length()); 1.7445 + *length = MConstant::New(alloc(), Int32Value(len)); 1.7446 + current->add(*length); 1.7447 + 1.7448 + if (index) { 1.7449 + if (checking == DoBoundsCheck) 1.7450 + *index = addBoundsCheck(*index, *length); 1.7451 + 1.7452 + *elements = MConstantElements::New(alloc(), data); 1.7453 + current->add(*elements); 1.7454 + } 1.7455 + return; 1.7456 + } 1.7457 + } 1.7458 + } 1.7459 + 1.7460 + *length = MTypedArrayLength::New(alloc(), obj); 1.7461 + current->add(*length); 1.7462 + 1.7463 + if (index) { 1.7464 + if (checking == DoBoundsCheck) 1.7465 + *index = addBoundsCheck(*index, *length); 1.7466 + 1.7467 + *elements = MTypedArrayElements::New(alloc(), obj); 1.7468 + current->add(*elements); 1.7469 + } 1.7470 +} 1.7471 + 1.7472 +MDefinition * 1.7473 +IonBuilder::convertShiftToMaskForStaticTypedArray(MDefinition *id, 1.7474 + ArrayBufferView::ViewType viewType) 1.7475 +{ 1.7476 + // No shifting is necessary if the typed array has single byte elements. 1.7477 + if (TypedArrayShift(viewType) == 0) 1.7478 + return id; 1.7479 + 1.7480 + // If the index is an already shifted constant, undo the shift to get the 1.7481 + // absolute offset being accessed. 1.7482 + if (id->isConstant() && id->toConstant()->value().isInt32()) { 1.7483 + int32_t index = id->toConstant()->value().toInt32(); 1.7484 + MConstant *offset = MConstant::New(alloc(), Int32Value(index << TypedArrayShift(viewType))); 1.7485 + current->add(offset); 1.7486 + return offset; 1.7487 + } 1.7488 + 1.7489 + if (!id->isRsh() || id->isEffectful()) 1.7490 + return nullptr; 1.7491 + if (!id->getOperand(1)->isConstant()) 1.7492 + return nullptr; 1.7493 + const Value &value = id->getOperand(1)->toConstant()->value(); 1.7494 + if (!value.isInt32() || uint32_t(value.toInt32()) != TypedArrayShift(viewType)) 1.7495 + return nullptr; 1.7496 + 1.7497 + // Instead of shifting, mask off the low bits of the index so that 1.7498 + // a non-scaled access on the typed array can be performed. 1.7499 + MConstant *mask = MConstant::New(alloc(), Int32Value(~((1 << value.toInt32()) - 1))); 1.7500 + MBitAnd *ptr = MBitAnd::New(alloc(), id->getOperand(0), mask); 1.7501 + 1.7502 + ptr->infer(nullptr, nullptr); 1.7503 + JS_ASSERT(!ptr->isEffectful()); 1.7504 + 1.7505 + current->add(mask); 1.7506 + current->add(ptr); 1.7507 + 1.7508 + return ptr; 1.7509 +} 1.7510 + 1.7511 +static MIRType 1.7512 +MIRTypeForTypedArrayRead(ScalarTypeDescr::Type arrayType, 1.7513 + bool observedDouble) 1.7514 +{ 1.7515 + switch (arrayType) { 1.7516 + case ScalarTypeDescr::TYPE_INT8: 1.7517 + case ScalarTypeDescr::TYPE_UINT8: 1.7518 + case ScalarTypeDescr::TYPE_UINT8_CLAMPED: 1.7519 + case ScalarTypeDescr::TYPE_INT16: 1.7520 + case ScalarTypeDescr::TYPE_UINT16: 1.7521 + case ScalarTypeDescr::TYPE_INT32: 1.7522 + return MIRType_Int32; 1.7523 + case ScalarTypeDescr::TYPE_UINT32: 1.7524 + return observedDouble ? MIRType_Double : MIRType_Int32; 1.7525 + case ScalarTypeDescr::TYPE_FLOAT32: 1.7526 + return (LIRGenerator::allowFloat32Optimizations()) ? MIRType_Float32 : MIRType_Double; 1.7527 + case ScalarTypeDescr::TYPE_FLOAT64: 1.7528 + return MIRType_Double; 1.7529 + } 1.7530 + MOZ_ASSUME_UNREACHABLE("Unknown typed array type"); 1.7531 +} 1.7532 + 1.7533 +bool 1.7534 +IonBuilder::jsop_getelem_typed(MDefinition *obj, MDefinition *index, 1.7535 + ScalarTypeDescr::Type arrayType) 1.7536 +{ 1.7537 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.7538 + 1.7539 + bool maybeUndefined = types->hasType(types::Type::UndefinedType()); 1.7540 + 1.7541 + // Reading from an Uint32Array will result in a double for values 1.7542 + // that don't fit in an int32. We have to bailout if this happens 1.7543 + // and the instruction is not known to return a double. 1.7544 + bool allowDouble = types->hasType(types::Type::DoubleType()); 1.7545 + 1.7546 + // Ensure id is an integer. 1.7547 + MInstruction *idInt32 = MToInt32::New(alloc(), index); 1.7548 + current->add(idInt32); 1.7549 + index = idInt32; 1.7550 + 1.7551 + if (!maybeUndefined) { 1.7552 + // Assume the index is in range, so that we can hoist the length, 1.7553 + // elements vector and bounds check. 1.7554 + 1.7555 + // If we are reading in-bounds elements, we can use knowledge about 1.7556 + // the array type to determine the result type, even if the opcode has 1.7557 + // never executed. The known pushed type is only used to distinguish 1.7558 + // uint32 reads that may produce either doubles or integers. 1.7559 + MIRType knownType = MIRTypeForTypedArrayRead(arrayType, allowDouble); 1.7560 + 1.7561 + // Get length, bounds-check, then get elements, and add all instructions. 1.7562 + MInstruction *length; 1.7563 + MInstruction *elements; 1.7564 + addTypedArrayLengthAndData(obj, DoBoundsCheck, &index, &length, &elements); 1.7565 + 1.7566 + // Load the element. 1.7567 + MLoadTypedArrayElement *load = MLoadTypedArrayElement::New(alloc(), elements, index, arrayType); 1.7568 + current->add(load); 1.7569 + current->push(load); 1.7570 + 1.7571 + // Note: we can ignore the type barrier here, we know the type must 1.7572 + // be valid and unbarriered. 1.7573 + load->setResultType(knownType); 1.7574 + return true; 1.7575 + } else { 1.7576 + // We need a type barrier if the array's element type has never been 1.7577 + // observed (we've only read out-of-bounds values). Note that for 1.7578 + // Uint32Array, we only check for int32: if allowDouble is false we 1.7579 + // will bailout when we read a double. 1.7580 + bool needsBarrier = true; 1.7581 + switch (arrayType) { 1.7582 + case ScalarTypeDescr::TYPE_INT8: 1.7583 + case ScalarTypeDescr::TYPE_UINT8: 1.7584 + case ScalarTypeDescr::TYPE_UINT8_CLAMPED: 1.7585 + case ScalarTypeDescr::TYPE_INT16: 1.7586 + case ScalarTypeDescr::TYPE_UINT16: 1.7587 + case ScalarTypeDescr::TYPE_INT32: 1.7588 + case ScalarTypeDescr::TYPE_UINT32: 1.7589 + if (types->hasType(types::Type::Int32Type())) 1.7590 + needsBarrier = false; 1.7591 + break; 1.7592 + case ScalarTypeDescr::TYPE_FLOAT32: 1.7593 + case ScalarTypeDescr::TYPE_FLOAT64: 1.7594 + if (allowDouble) 1.7595 + needsBarrier = false; 1.7596 + break; 1.7597 + default: 1.7598 + MOZ_ASSUME_UNREACHABLE("Unknown typed array type"); 1.7599 + } 1.7600 + 1.7601 + // Assume we will read out-of-bound values. In this case the 1.7602 + // bounds check will be part of the instruction, and the instruction 1.7603 + // will always return a Value. 1.7604 + MLoadTypedArrayElementHole *load = 1.7605 + MLoadTypedArrayElementHole::New(alloc(), obj, index, arrayType, allowDouble); 1.7606 + current->add(load); 1.7607 + current->push(load); 1.7608 + 1.7609 + return pushTypeBarrier(load, types, needsBarrier); 1.7610 + } 1.7611 +} 1.7612 + 1.7613 +bool 1.7614 +IonBuilder::jsop_setelem() 1.7615 +{ 1.7616 + bool emitted = false; 1.7617 + 1.7618 + MDefinition *value = current->pop(); 1.7619 + MDefinition *index = current->pop(); 1.7620 + MDefinition *object = current->pop(); 1.7621 + 1.7622 + if (!setElemTryTypedObject(&emitted, object, index, value) || emitted) 1.7623 + return emitted; 1.7624 + 1.7625 + if (!setElemTryTypedStatic(&emitted, object, index, value) || emitted) 1.7626 + return emitted; 1.7627 + 1.7628 + if (!setElemTryTypedArray(&emitted, object, index, value) || emitted) 1.7629 + return emitted; 1.7630 + 1.7631 + if (!setElemTryDense(&emitted, object, index, value) || emitted) 1.7632 + return emitted; 1.7633 + 1.7634 + if (!setElemTryArguments(&emitted, object, index, value) || emitted) 1.7635 + return emitted; 1.7636 + 1.7637 + if (script()->argumentsHasVarBinding() && object->mightBeType(MIRType_MagicOptimizedArguments)) 1.7638 + return abort("Type is not definitely lazy arguments."); 1.7639 + 1.7640 + if (!setElemTryCache(&emitted, object, index, value) || emitted) 1.7641 + return emitted; 1.7642 + 1.7643 + // Emit call. 1.7644 + MInstruction *ins = MCallSetElement::New(alloc(), object, index, value); 1.7645 + current->add(ins); 1.7646 + current->push(value); 1.7647 + 1.7648 + return resumeAfter(ins); 1.7649 +} 1.7650 + 1.7651 +bool 1.7652 +IonBuilder::setElemTryTypedObject(bool *emitted, MDefinition *obj, 1.7653 + MDefinition *index, MDefinition *value) 1.7654 +{ 1.7655 + JS_ASSERT(*emitted == false); 1.7656 + 1.7657 + TypeDescrSet objTypeDescrs; 1.7658 + if (!lookupTypeDescrSet(obj, &objTypeDescrs)) 1.7659 + return false; 1.7660 + 1.7661 + if (!objTypeDescrs.allOfArrayKind()) 1.7662 + return true; 1.7663 + 1.7664 + TypeDescrSet elemTypeDescrs; 1.7665 + if (!objTypeDescrs.arrayElementType(*this, &elemTypeDescrs)) 1.7666 + return false; 1.7667 + if (elemTypeDescrs.empty()) 1.7668 + return true; 1.7669 + 1.7670 + JS_ASSERT(TypeDescr::isSized(elemTypeDescrs.kind())); 1.7671 + 1.7672 + int32_t elemSize; 1.7673 + if (!elemTypeDescrs.allHaveSameSize(&elemSize)) 1.7674 + return true; 1.7675 + 1.7676 + switch (elemTypeDescrs.kind()) { 1.7677 + case TypeDescr::X4: 1.7678 + // FIXME (bug 894105): store a MIRType_float32x4 etc 1.7679 + return true; 1.7680 + 1.7681 + case TypeDescr::Reference: 1.7682 + case TypeDescr::Struct: 1.7683 + case TypeDescr::SizedArray: 1.7684 + case TypeDescr::UnsizedArray: 1.7685 + // For now, only optimize storing scalars. 1.7686 + return true; 1.7687 + 1.7688 + case TypeDescr::Scalar: 1.7689 + return setElemTryScalarElemOfTypedObject(emitted, 1.7690 + obj, 1.7691 + index, 1.7692 + objTypeDescrs, 1.7693 + value, 1.7694 + elemTypeDescrs, 1.7695 + elemSize); 1.7696 + } 1.7697 + 1.7698 + MOZ_ASSUME_UNREACHABLE("Bad kind"); 1.7699 +} 1.7700 + 1.7701 +bool 1.7702 +IonBuilder::setElemTryScalarElemOfTypedObject(bool *emitted, 1.7703 + MDefinition *obj, 1.7704 + MDefinition *index, 1.7705 + TypeDescrSet objTypeDescrs, 1.7706 + MDefinition *value, 1.7707 + TypeDescrSet elemTypeDescrs, 1.7708 + int32_t elemSize) 1.7709 +{ 1.7710 + // Must always be loading the same scalar type 1.7711 + ScalarTypeDescr::Type elemType; 1.7712 + if (!elemTypeDescrs.scalarType(&elemType)) 1.7713 + return true; 1.7714 + JS_ASSERT(elemSize == ScalarTypeDescr::alignment(elemType)); 1.7715 + 1.7716 + bool canBeNeutered; 1.7717 + MDefinition *indexAsByteOffset; 1.7718 + if (!checkTypedObjectIndexInBounds(elemSize, obj, index, objTypeDescrs, 1.7719 + &indexAsByteOffset, &canBeNeutered)) 1.7720 + { 1.7721 + return false; 1.7722 + } 1.7723 + 1.7724 + // Store the element 1.7725 + if (!storeScalarTypedObjectValue(obj, indexAsByteOffset, elemType, canBeNeutered, false, value)) 1.7726 + return false; 1.7727 + 1.7728 + current->push(value); 1.7729 + 1.7730 + *emitted = true; 1.7731 + return true; 1.7732 +} 1.7733 + 1.7734 +bool 1.7735 +IonBuilder::setElemTryTypedStatic(bool *emitted, MDefinition *object, 1.7736 + MDefinition *index, MDefinition *value) 1.7737 +{ 1.7738 + JS_ASSERT(*emitted == false); 1.7739 + 1.7740 + ScalarTypeDescr::Type arrayType; 1.7741 + if (!ElementAccessIsTypedArray(object, index, &arrayType)) 1.7742 + return true; 1.7743 + 1.7744 + if (!LIRGenerator::allowStaticTypedArrayAccesses()) 1.7745 + return true; 1.7746 + 1.7747 + if (ElementAccessHasExtraIndexedProperty(constraints(), object)) 1.7748 + return true; 1.7749 + 1.7750 + if (!object->resultTypeSet()) 1.7751 + return true; 1.7752 + JSObject *tarrObj = object->resultTypeSet()->getSingleton(); 1.7753 + if (!tarrObj) 1.7754 + return true; 1.7755 + 1.7756 + TypedArrayObject *tarr = &tarrObj->as<TypedArrayObject>(); 1.7757 + 1.7758 + types::TypeObjectKey *tarrType = types::TypeObjectKey::get(tarr); 1.7759 + if (tarrType->unknownProperties()) 1.7760 + return true; 1.7761 + 1.7762 + ArrayBufferView::ViewType viewType = (ArrayBufferView::ViewType) tarr->type(); 1.7763 + MDefinition *ptr = convertShiftToMaskForStaticTypedArray(index, viewType); 1.7764 + if (!ptr) 1.7765 + return true; 1.7766 + 1.7767 + // Emit StoreTypedArrayElementStatic. 1.7768 + tarrType->watchStateChangeForTypedArrayData(constraints()); 1.7769 + 1.7770 + object->setImplicitlyUsedUnchecked(); 1.7771 + index->setImplicitlyUsedUnchecked(); 1.7772 + 1.7773 + // Clamp value to [0, 255] for Uint8ClampedArray. 1.7774 + MDefinition *toWrite = value; 1.7775 + if (viewType == ArrayBufferView::TYPE_UINT8_CLAMPED) { 1.7776 + toWrite = MClampToUint8::New(alloc(), value); 1.7777 + current->add(toWrite->toInstruction()); 1.7778 + } 1.7779 + 1.7780 + MInstruction *store = MStoreTypedArrayElementStatic::New(alloc(), tarr, ptr, toWrite); 1.7781 + current->add(store); 1.7782 + current->push(value); 1.7783 + 1.7784 + if (!resumeAfter(store)) 1.7785 + return false; 1.7786 + 1.7787 + *emitted = true; 1.7788 + return true; 1.7789 +} 1.7790 + 1.7791 +bool 1.7792 +IonBuilder::setElemTryTypedArray(bool *emitted, MDefinition *object, 1.7793 + MDefinition *index, MDefinition *value) 1.7794 +{ 1.7795 + JS_ASSERT(*emitted == false); 1.7796 + 1.7797 + ScalarTypeDescr::Type arrayType; 1.7798 + if (!ElementAccessIsTypedArray(object, index, &arrayType)) 1.7799 + return true; 1.7800 + 1.7801 + // Emit typed setelem variant. 1.7802 + if (!jsop_setelem_typed(arrayType, SetElem_Normal, object, index, value)) 1.7803 + return false; 1.7804 + 1.7805 + *emitted = true; 1.7806 + return true; 1.7807 +} 1.7808 + 1.7809 +bool 1.7810 +IonBuilder::setElemTryDense(bool *emitted, MDefinition *object, 1.7811 + MDefinition *index, MDefinition *value) 1.7812 +{ 1.7813 + JS_ASSERT(*emitted == false); 1.7814 + 1.7815 + if (!ElementAccessIsDenseNative(object, index)) 1.7816 + return true; 1.7817 + if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, 1.7818 + &object, nullptr, &value, /* canModify = */ true)) 1.7819 + { 1.7820 + return true; 1.7821 + } 1.7822 + if (!object->resultTypeSet()) 1.7823 + return true; 1.7824 + 1.7825 + types::TemporaryTypeSet::DoubleConversion conversion = 1.7826 + object->resultTypeSet()->convertDoubleElements(constraints()); 1.7827 + 1.7828 + // If AmbiguousDoubleConversion, only handle int32 values for now. 1.7829 + if (conversion == types::TemporaryTypeSet::AmbiguousDoubleConversion && 1.7830 + value->type() != MIRType_Int32) 1.7831 + { 1.7832 + return true; 1.7833 + } 1.7834 + 1.7835 + // Don't generate a fast path if there have been bounds check failures 1.7836 + // and this access might be on a sparse property. 1.7837 + if (ElementAccessHasExtraIndexedProperty(constraints(), object) && failedBoundsCheck_) 1.7838 + return true; 1.7839 + 1.7840 + // Emit dense setelem variant. 1.7841 + if (!jsop_setelem_dense(conversion, SetElem_Normal, object, index, value)) 1.7842 + return false; 1.7843 + 1.7844 + *emitted = true; 1.7845 + return true; 1.7846 +} 1.7847 + 1.7848 +bool 1.7849 +IonBuilder::setElemTryArguments(bool *emitted, MDefinition *object, 1.7850 + MDefinition *index, MDefinition *value) 1.7851 +{ 1.7852 + JS_ASSERT(*emitted == false); 1.7853 + 1.7854 + if (object->type() != MIRType_MagicOptimizedArguments) 1.7855 + return true; 1.7856 + 1.7857 + // Arguments are not supported yet. 1.7858 + return abort("NYI arguments[]="); 1.7859 +} 1.7860 + 1.7861 +bool 1.7862 +IonBuilder::setElemTryCache(bool *emitted, MDefinition *object, 1.7863 + MDefinition *index, MDefinition *value) 1.7864 +{ 1.7865 + JS_ASSERT(*emitted == false); 1.7866 + 1.7867 + if (!object->mightBeType(MIRType_Object)) 1.7868 + return true; 1.7869 + 1.7870 + if (!index->mightBeType(MIRType_Int32) && !index->mightBeType(MIRType_String)) 1.7871 + return true; 1.7872 + 1.7873 + // TODO: Bug 876650: remove this check: 1.7874 + // Temporary disable the cache if non dense native, 1.7875 + // until the cache supports more ics 1.7876 + SetElemICInspector icInspect(inspector->setElemICInspector(pc)); 1.7877 + if (!icInspect.sawDenseWrite() && !icInspect.sawTypedArrayWrite()) 1.7878 + return true; 1.7879 + 1.7880 + if (PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, 1.7881 + &object, nullptr, &value, /* canModify = */ true)) 1.7882 + { 1.7883 + return true; 1.7884 + } 1.7885 + 1.7886 + // We can avoid worrying about holes in the IC if we know a priori we are safe 1.7887 + // from them. If TI can guard that there are no indexed properties on the prototype 1.7888 + // chain, we know that we anen't missing any setters by overwriting the hole with 1.7889 + // another value. 1.7890 + bool guardHoles = ElementAccessHasExtraIndexedProperty(constraints(), object); 1.7891 + 1.7892 + if (NeedsPostBarrier(info(), value)) 1.7893 + current->add(MPostWriteBarrier::New(alloc(), object, value)); 1.7894 + 1.7895 + // Emit SetElementCache. 1.7896 + MInstruction *ins = MSetElementCache::New(alloc(), object, index, value, script()->strict(), guardHoles); 1.7897 + current->add(ins); 1.7898 + current->push(value); 1.7899 + 1.7900 + if (!resumeAfter(ins)) 1.7901 + return false; 1.7902 + 1.7903 + *emitted = true; 1.7904 + return true; 1.7905 +} 1.7906 + 1.7907 +bool 1.7908 +IonBuilder::jsop_setelem_dense(types::TemporaryTypeSet::DoubleConversion conversion, 1.7909 + SetElemSafety safety, 1.7910 + MDefinition *obj, MDefinition *id, MDefinition *value) 1.7911 +{ 1.7912 + MIRType elementType = DenseNativeElementType(constraints(), obj); 1.7913 + bool packed = ElementAccessIsPacked(constraints(), obj); 1.7914 + 1.7915 + // Writes which are on holes in the object do not have to bail out if they 1.7916 + // cannot hit another indexed property on the object or its prototypes. 1.7917 + bool writeOutOfBounds = !ElementAccessHasExtraIndexedProperty(constraints(), obj); 1.7918 + 1.7919 + if (NeedsPostBarrier(info(), value)) 1.7920 + current->add(MPostWriteBarrier::New(alloc(), obj, value)); 1.7921 + 1.7922 + // Ensure id is an integer. 1.7923 + MInstruction *idInt32 = MToInt32::New(alloc(), id); 1.7924 + current->add(idInt32); 1.7925 + id = idInt32; 1.7926 + 1.7927 + // Get the elements vector. 1.7928 + MElements *elements = MElements::New(alloc(), obj); 1.7929 + current->add(elements); 1.7930 + 1.7931 + // Ensure the value is a double, if double conversion might be needed. 1.7932 + MDefinition *newValue = value; 1.7933 + switch (conversion) { 1.7934 + case types::TemporaryTypeSet::AlwaysConvertToDoubles: 1.7935 + case types::TemporaryTypeSet::MaybeConvertToDoubles: { 1.7936 + MInstruction *valueDouble = MToDouble::New(alloc(), value); 1.7937 + current->add(valueDouble); 1.7938 + newValue = valueDouble; 1.7939 + break; 1.7940 + } 1.7941 + 1.7942 + case types::TemporaryTypeSet::AmbiguousDoubleConversion: { 1.7943 + JS_ASSERT(value->type() == MIRType_Int32); 1.7944 + MInstruction *maybeDouble = MMaybeToDoubleElement::New(alloc(), elements, value); 1.7945 + current->add(maybeDouble); 1.7946 + newValue = maybeDouble; 1.7947 + break; 1.7948 + } 1.7949 + 1.7950 + case types::TemporaryTypeSet::DontConvertToDoubles: 1.7951 + break; 1.7952 + 1.7953 + default: 1.7954 + MOZ_ASSUME_UNREACHABLE("Unknown double conversion"); 1.7955 + } 1.7956 + 1.7957 + bool writeHole = false; 1.7958 + if (safety == SetElem_Normal) { 1.7959 + SetElemICInspector icInspect(inspector->setElemICInspector(pc)); 1.7960 + writeHole = icInspect.sawOOBDenseWrite(); 1.7961 + } 1.7962 + 1.7963 + // Use MStoreElementHole if this SETELEM has written to out-of-bounds 1.7964 + // indexes in the past. Otherwise, use MStoreElement so that we can hoist 1.7965 + // the initialized length and bounds check. 1.7966 + MStoreElementCommon *store; 1.7967 + if (writeHole && writeOutOfBounds) { 1.7968 + JS_ASSERT(safety == SetElem_Normal); 1.7969 + 1.7970 + MStoreElementHole *ins = MStoreElementHole::New(alloc(), obj, elements, id, newValue); 1.7971 + store = ins; 1.7972 + 1.7973 + current->add(ins); 1.7974 + current->push(value); 1.7975 + 1.7976 + if (!resumeAfter(ins)) 1.7977 + return false; 1.7978 + } else { 1.7979 + MInitializedLength *initLength = MInitializedLength::New(alloc(), elements); 1.7980 + current->add(initLength); 1.7981 + 1.7982 + bool needsHoleCheck; 1.7983 + if (safety == SetElem_Normal) { 1.7984 + id = addBoundsCheck(id, initLength); 1.7985 + needsHoleCheck = !packed && !writeOutOfBounds; 1.7986 + } else { 1.7987 + needsHoleCheck = false; 1.7988 + } 1.7989 + 1.7990 + MStoreElement *ins = MStoreElement::New(alloc(), elements, id, newValue, needsHoleCheck); 1.7991 + store = ins; 1.7992 + 1.7993 + if (safety == SetElem_Unsafe) 1.7994 + ins->setRacy(); 1.7995 + 1.7996 + current->add(ins); 1.7997 + 1.7998 + if (safety == SetElem_Normal) 1.7999 + current->push(value); 1.8000 + 1.8001 + if (!resumeAfter(ins)) 1.8002 + return false; 1.8003 + } 1.8004 + 1.8005 + // Determine whether a write barrier is required. 1.8006 + if (obj->resultTypeSet()->propertyNeedsBarrier(constraints(), JSID_VOID)) 1.8007 + store->setNeedsBarrier(); 1.8008 + 1.8009 + if (elementType != MIRType_None && packed) 1.8010 + store->setElementType(elementType); 1.8011 + 1.8012 + return true; 1.8013 +} 1.8014 + 1.8015 + 1.8016 +bool 1.8017 +IonBuilder::jsop_setelem_typed(ScalarTypeDescr::Type arrayType, 1.8018 + SetElemSafety safety, 1.8019 + MDefinition *obj, MDefinition *id, MDefinition *value) 1.8020 +{ 1.8021 + bool expectOOB; 1.8022 + if (safety == SetElem_Normal) { 1.8023 + SetElemICInspector icInspect(inspector->setElemICInspector(pc)); 1.8024 + expectOOB = icInspect.sawOOBTypedArrayWrite(); 1.8025 + } else { 1.8026 + expectOOB = false; 1.8027 + } 1.8028 + 1.8029 + if (expectOOB) 1.8030 + spew("Emitting OOB TypedArray SetElem"); 1.8031 + 1.8032 + // Ensure id is an integer. 1.8033 + MInstruction *idInt32 = MToInt32::New(alloc(), id); 1.8034 + current->add(idInt32); 1.8035 + id = idInt32; 1.8036 + 1.8037 + // Get length, bounds-check, then get elements, and add all instructions. 1.8038 + MInstruction *length; 1.8039 + MInstruction *elements; 1.8040 + BoundsChecking checking = (!expectOOB && safety == SetElem_Normal) 1.8041 + ? DoBoundsCheck 1.8042 + : SkipBoundsCheck; 1.8043 + addTypedArrayLengthAndData(obj, checking, &id, &length, &elements); 1.8044 + 1.8045 + // Clamp value to [0, 255] for Uint8ClampedArray. 1.8046 + MDefinition *toWrite = value; 1.8047 + if (arrayType == ScalarTypeDescr::TYPE_UINT8_CLAMPED) { 1.8048 + toWrite = MClampToUint8::New(alloc(), value); 1.8049 + current->add(toWrite->toInstruction()); 1.8050 + } 1.8051 + 1.8052 + // Store the value. 1.8053 + MInstruction *ins; 1.8054 + if (expectOOB) { 1.8055 + ins = MStoreTypedArrayElementHole::New(alloc(), elements, length, id, toWrite, arrayType); 1.8056 + } else { 1.8057 + MStoreTypedArrayElement *store = 1.8058 + MStoreTypedArrayElement::New(alloc(), elements, id, toWrite, arrayType); 1.8059 + if (safety == SetElem_Unsafe) 1.8060 + store->setRacy(); 1.8061 + ins = store; 1.8062 + } 1.8063 + 1.8064 + current->add(ins); 1.8065 + 1.8066 + if (safety == SetElem_Normal) 1.8067 + current->push(value); 1.8068 + 1.8069 + return resumeAfter(ins); 1.8070 +} 1.8071 + 1.8072 +bool 1.8073 +IonBuilder::jsop_setelem_typed_object(ScalarTypeDescr::Type arrayType, 1.8074 + SetElemSafety safety, 1.8075 + bool racy, 1.8076 + MDefinition *object, MDefinition *index, MDefinition *value) 1.8077 +{ 1.8078 + JS_ASSERT(safety == SetElem_Unsafe); // Can be fixed, but there's been no reason to as of yet 1.8079 + 1.8080 + MInstruction *int_index = MToInt32::New(alloc(), index); 1.8081 + current->add(int_index); 1.8082 + 1.8083 + size_t elemSize = ScalarTypeDescr::alignment(arrayType); 1.8084 + MMul *byteOffset = MMul::New(alloc(), int_index, constantInt(elemSize), 1.8085 + MIRType_Int32, MMul::Integer); 1.8086 + current->add(byteOffset); 1.8087 + 1.8088 + if (!storeScalarTypedObjectValue(object, byteOffset, arrayType, false, racy, value)) 1.8089 + return false; 1.8090 + 1.8091 + return true; 1.8092 +} 1.8093 + 1.8094 +bool 1.8095 +IonBuilder::jsop_length() 1.8096 +{ 1.8097 + if (jsop_length_fastPath()) 1.8098 + return true; 1.8099 + 1.8100 + PropertyName *name = info().getAtom(pc)->asPropertyName(); 1.8101 + return jsop_getprop(name); 1.8102 +} 1.8103 + 1.8104 +bool 1.8105 +IonBuilder::jsop_length_fastPath() 1.8106 +{ 1.8107 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.8108 + 1.8109 + if (types->getKnownMIRType() != MIRType_Int32) 1.8110 + return false; 1.8111 + 1.8112 + MDefinition *obj = current->peek(-1); 1.8113 + 1.8114 + if (obj->mightBeType(MIRType_String)) { 1.8115 + if (obj->mightBeType(MIRType_Object)) 1.8116 + return false; 1.8117 + current->pop(); 1.8118 + MStringLength *ins = MStringLength::New(alloc(), obj); 1.8119 + current->add(ins); 1.8120 + current->push(ins); 1.8121 + return true; 1.8122 + } 1.8123 + 1.8124 + if (obj->mightBeType(MIRType_Object)) { 1.8125 + types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); 1.8126 + 1.8127 + if (objTypes && 1.8128 + objTypes->getKnownClass() == &ArrayObject::class_ && 1.8129 + !objTypes->hasObjectFlags(constraints(), types::OBJECT_FLAG_LENGTH_OVERFLOW)) 1.8130 + { 1.8131 + current->pop(); 1.8132 + MElements *elements = MElements::New(alloc(), obj); 1.8133 + current->add(elements); 1.8134 + 1.8135 + // Read length. 1.8136 + MArrayLength *length = MArrayLength::New(alloc(), elements); 1.8137 + current->add(length); 1.8138 + current->push(length); 1.8139 + return true; 1.8140 + } 1.8141 + 1.8142 + if (objTypes && objTypes->getTypedArrayType() != ScalarTypeDescr::TYPE_MAX) { 1.8143 + current->pop(); 1.8144 + MInstruction *length = addTypedArrayLength(obj); 1.8145 + current->push(length); 1.8146 + return true; 1.8147 + } 1.8148 + } 1.8149 + 1.8150 + return false; 1.8151 +} 1.8152 + 1.8153 +bool 1.8154 +IonBuilder::jsop_arguments() 1.8155 +{ 1.8156 + if (info().needsArgsObj()) { 1.8157 + current->push(current->argumentsObject()); 1.8158 + return true; 1.8159 + } 1.8160 + JS_ASSERT(lazyArguments_); 1.8161 + current->push(lazyArguments_); 1.8162 + return true; 1.8163 +} 1.8164 + 1.8165 +bool 1.8166 +IonBuilder::jsop_arguments_length() 1.8167 +{ 1.8168 + // Type Inference has guaranteed this is an optimized arguments object. 1.8169 + MDefinition *args = current->pop(); 1.8170 + args->setImplicitlyUsedUnchecked(); 1.8171 + 1.8172 + // We don't know anything from the callee 1.8173 + if (inliningDepth_ == 0) { 1.8174 + MInstruction *ins = MArgumentsLength::New(alloc()); 1.8175 + current->add(ins); 1.8176 + current->push(ins); 1.8177 + return true; 1.8178 + } 1.8179 + 1.8180 + // We are inlining and know the number of arguments the callee pushed 1.8181 + return pushConstant(Int32Value(inlineCallInfo_->argv().length())); 1.8182 +} 1.8183 + 1.8184 +bool 1.8185 +IonBuilder::jsop_rest() 1.8186 +{ 1.8187 + JSObject *templateObject = inspector->getTemplateObject(pc); 1.8188 + JS_ASSERT(templateObject->is<ArrayObject>()); 1.8189 + 1.8190 + if (inliningDepth_ == 0) { 1.8191 + // We don't know anything about the callee. 1.8192 + MArgumentsLength *numActuals = MArgumentsLength::New(alloc()); 1.8193 + current->add(numActuals); 1.8194 + 1.8195 + // Pass in the number of actual arguments, the number of formals (not 1.8196 + // including the rest parameter slot itself), and the template object. 1.8197 + MRest *rest = MRest::New(alloc(), constraints(), numActuals, info().nargs() - 1, 1.8198 + templateObject); 1.8199 + current->add(rest); 1.8200 + current->push(rest); 1.8201 + return true; 1.8202 + } 1.8203 + 1.8204 + // We know the exact number of arguments the callee pushed. 1.8205 + unsigned numActuals = inlineCallInfo_->argv().length(); 1.8206 + unsigned numFormals = info().nargs() - 1; 1.8207 + unsigned numRest = numActuals > numFormals ? numActuals - numFormals : 0; 1.8208 + 1.8209 + MNewArray *array = MNewArray::New(alloc(), constraints(), numRest, templateObject, 1.8210 + templateObject->type()->initialHeap(constraints()), 1.8211 + MNewArray::NewArray_Allocating); 1.8212 + current->add(array); 1.8213 + 1.8214 + if (numRest == 0) { 1.8215 + // No more updating to do. (Note that in this one case the length from 1.8216 + // the template object is already correct.) 1.8217 + current->push(array); 1.8218 + return true; 1.8219 + } 1.8220 + 1.8221 + MElements *elements = MElements::New(alloc(), array); 1.8222 + current->add(elements); 1.8223 + 1.8224 + // Unroll the argument copy loop. We don't need to do any bounds or hole 1.8225 + // checking here. 1.8226 + MConstant *index = nullptr; 1.8227 + for (unsigned i = numFormals; i < numActuals; i++) { 1.8228 + index = MConstant::New(alloc(), Int32Value(i - numFormals)); 1.8229 + current->add(index); 1.8230 + 1.8231 + MDefinition *arg = inlineCallInfo_->argv()[i]; 1.8232 + MStoreElement *store = MStoreElement::New(alloc(), elements, index, arg, 1.8233 + /* needsHoleCheck = */ false); 1.8234 + current->add(store); 1.8235 + 1.8236 + if (NeedsPostBarrier(info(), arg)) 1.8237 + current->add(MPostWriteBarrier::New(alloc(), array, arg)); 1.8238 + } 1.8239 + 1.8240 + // The array's length is incorrectly 0 now, from the template object 1.8241 + // created by BaselineCompiler::emit_JSOP_REST() before the actual argument 1.8242 + // count was known. Set the correct length now that we know that count. 1.8243 + MSetArrayLength *length = MSetArrayLength::New(alloc(), elements, index); 1.8244 + current->add(length); 1.8245 + 1.8246 + // Update the initialized length for all the (necessarily non-hole) 1.8247 + // elements added. 1.8248 + MSetInitializedLength *initLength = MSetInitializedLength::New(alloc(), elements, index); 1.8249 + current->add(initLength); 1.8250 + 1.8251 + current->push(array); 1.8252 + return true; 1.8253 +} 1.8254 + 1.8255 +bool 1.8256 +IonBuilder::getDefiniteSlot(types::TemporaryTypeSet *types, PropertyName *name, 1.8257 + types::HeapTypeSetKey *property) 1.8258 +{ 1.8259 + if (!types || types->unknownObject() || types->getObjectCount() != 1) 1.8260 + return false; 1.8261 + 1.8262 + types::TypeObjectKey *type = types->getObject(0); 1.8263 + if (type->unknownProperties() || type->singleton()) 1.8264 + return false; 1.8265 + 1.8266 + jsid id = NameToId(name); 1.8267 + 1.8268 + *property = type->property(id); 1.8269 + return property->maybeTypes() && 1.8270 + property->maybeTypes()->definiteProperty() && 1.8271 + !property->nonData(constraints()); 1.8272 +} 1.8273 + 1.8274 +bool 1.8275 +IonBuilder::jsop_runonce() 1.8276 +{ 1.8277 + MRunOncePrologue *ins = MRunOncePrologue::New(alloc()); 1.8278 + current->add(ins); 1.8279 + return resumeAfter(ins); 1.8280 +} 1.8281 + 1.8282 +bool 1.8283 +IonBuilder::jsop_not() 1.8284 +{ 1.8285 + MDefinition *value = current->pop(); 1.8286 + 1.8287 + MNot *ins = MNot::New(alloc(), value); 1.8288 + current->add(ins); 1.8289 + current->push(ins); 1.8290 + ins->infer(); 1.8291 + return true; 1.8292 +} 1.8293 + 1.8294 +bool 1.8295 +IonBuilder::objectsHaveCommonPrototype(types::TemporaryTypeSet *types, PropertyName *name, 1.8296 + bool isGetter, JSObject *foundProto) 1.8297 +{ 1.8298 + // With foundProto a prototype with a getter or setter for name, return 1.8299 + // whether looking up name on any object in |types| will go through 1.8300 + // foundProto, i.e. all the objects have foundProto on their prototype 1.8301 + // chain and do not have a property for name before reaching foundProto. 1.8302 + 1.8303 + // No sense looking if we don't know what's going on. 1.8304 + if (!types || types->unknownObject()) 1.8305 + return false; 1.8306 + 1.8307 + for (unsigned i = 0; i < types->getObjectCount(); i++) { 1.8308 + if (types->getSingleObject(i) == foundProto) 1.8309 + continue; 1.8310 + 1.8311 + types::TypeObjectKey *type = types->getObject(i); 1.8312 + if (!type) 1.8313 + continue; 1.8314 + 1.8315 + while (type) { 1.8316 + if (type->unknownProperties()) 1.8317 + return false; 1.8318 + 1.8319 + const Class *clasp = type->clasp(); 1.8320 + if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name)) 1.8321 + return false; 1.8322 + 1.8323 + // Look for a getter/setter on the class itself which may need 1.8324 + // to be called. Ignore the getGeneric hook for typed arrays, it 1.8325 + // only handles integers and forwards names to the prototype. 1.8326 + if (isGetter && clasp->ops.getGeneric && !IsTypedArrayClass(clasp)) 1.8327 + return false; 1.8328 + if (!isGetter && clasp->ops.setGeneric) 1.8329 + return false; 1.8330 + 1.8331 + // Test for isOwnProperty() without freezing. If we end up 1.8332 + // optimizing, freezePropertiesForCommonPropFunc will freeze the 1.8333 + // property type sets later on. 1.8334 + types::HeapTypeSetKey property = type->property(NameToId(name)); 1.8335 + if (types::TypeSet *types = property.maybeTypes()) { 1.8336 + if (!types->empty() || types->nonDataProperty()) 1.8337 + return false; 1.8338 + } 1.8339 + if (JSObject *obj = type->singleton()) { 1.8340 + if (types::CanHaveEmptyPropertyTypesForOwnProperty(obj)) 1.8341 + return false; 1.8342 + } 1.8343 + 1.8344 + if (!type->hasTenuredProto()) 1.8345 + return false; 1.8346 + JSObject *proto = type->proto().toObjectOrNull(); 1.8347 + if (proto == foundProto) 1.8348 + break; 1.8349 + if (!proto) { 1.8350 + // The foundProto being searched for did not show up on the 1.8351 + // object's prototype chain. 1.8352 + return false; 1.8353 + } 1.8354 + type = types::TypeObjectKey::get(type->proto().toObjectOrNull()); 1.8355 + } 1.8356 + } 1.8357 + 1.8358 + return true; 1.8359 +} 1.8360 + 1.8361 +void 1.8362 +IonBuilder::freezePropertiesForCommonPrototype(types::TemporaryTypeSet *types, PropertyName *name, 1.8363 + JSObject *foundProto) 1.8364 +{ 1.8365 + for (unsigned i = 0; i < types->getObjectCount(); i++) { 1.8366 + // If we found a Singleton object's own-property, there's nothing to 1.8367 + // freeze. 1.8368 + if (types->getSingleObject(i) == foundProto) 1.8369 + continue; 1.8370 + 1.8371 + types::TypeObjectKey *type = types->getObject(i); 1.8372 + if (!type) 1.8373 + continue; 1.8374 + 1.8375 + while (true) { 1.8376 + types::HeapTypeSetKey property = type->property(NameToId(name)); 1.8377 + JS_ALWAYS_TRUE(!property.isOwnProperty(constraints())); 1.8378 + 1.8379 + // Don't mark the proto. It will be held down by the shape 1.8380 + // guard. This allows us to use properties found on prototypes 1.8381 + // with properties unknown to TI. 1.8382 + if (type->proto() == foundProto) 1.8383 + break; 1.8384 + type = types::TypeObjectKey::get(type->proto().toObjectOrNull()); 1.8385 + } 1.8386 + } 1.8387 +} 1.8388 + 1.8389 +inline MDefinition * 1.8390 +IonBuilder::testCommonGetterSetter(types::TemporaryTypeSet *types, PropertyName *name, 1.8391 + bool isGetter, JSObject *foundProto, Shape *lastProperty) 1.8392 +{ 1.8393 + // Check if all objects being accessed will lookup the name through foundProto. 1.8394 + if (!objectsHaveCommonPrototype(types, name, isGetter, foundProto)) 1.8395 + return nullptr; 1.8396 + 1.8397 + // We can optimize the getter/setter, so freeze all involved properties to 1.8398 + // ensure there isn't a lower shadowing getter or setter installed in the 1.8399 + // future. 1.8400 + freezePropertiesForCommonPrototype(types, name, foundProto); 1.8401 + 1.8402 + // Add a shape guard on the prototype we found the property on. The rest of 1.8403 + // the prototype chain is guarded by TI freezes. Note that a shape guard is 1.8404 + // good enough here, even in the proxy case, because we have ensured there 1.8405 + // are no lookup hooks for this property. 1.8406 + MInstruction *wrapper = constant(ObjectValue(*foundProto)); 1.8407 + return addShapeGuard(wrapper, lastProperty, Bailout_ShapeGuard); 1.8408 +} 1.8409 + 1.8410 +bool 1.8411 +IonBuilder::annotateGetPropertyCache(MDefinition *obj, MGetPropertyCache *getPropCache, 1.8412 + types::TemporaryTypeSet *objTypes, 1.8413 + types::TemporaryTypeSet *pushedTypes) 1.8414 +{ 1.8415 + PropertyName *name = getPropCache->name(); 1.8416 + 1.8417 + // Ensure every pushed value is a singleton. 1.8418 + if (pushedTypes->unknownObject() || pushedTypes->baseFlags() != 0) 1.8419 + return true; 1.8420 + 1.8421 + for (unsigned i = 0; i < pushedTypes->getObjectCount(); i++) { 1.8422 + if (pushedTypes->getTypeObject(i) != nullptr) 1.8423 + return true; 1.8424 + } 1.8425 + 1.8426 + // Object's typeset should be a proper object 1.8427 + if (!objTypes || objTypes->baseFlags() || objTypes->unknownObject()) 1.8428 + return true; 1.8429 + 1.8430 + unsigned int objCount = objTypes->getObjectCount(); 1.8431 + if (objCount == 0) 1.8432 + return true; 1.8433 + 1.8434 + InlinePropertyTable *inlinePropTable = getPropCache->initInlinePropertyTable(alloc(), pc); 1.8435 + if (!inlinePropTable) 1.8436 + return false; 1.8437 + 1.8438 + // Ensure that the relevant property typeset for each type object is 1.8439 + // is a single-object typeset containing a JSFunction 1.8440 + for (unsigned int i = 0; i < objCount; i++) { 1.8441 + types::TypeObject *baseTypeObj = objTypes->getTypeObject(i); 1.8442 + if (!baseTypeObj) 1.8443 + continue; 1.8444 + types::TypeObjectKey *typeObj = types::TypeObjectKey::get(baseTypeObj); 1.8445 + if (typeObj->unknownProperties() || !typeObj->hasTenuredProto() || !typeObj->proto().isObject()) 1.8446 + continue; 1.8447 + 1.8448 + const Class *clasp = typeObj->clasp(); 1.8449 + if (!ClassHasEffectlessLookup(clasp, name) || ClassHasResolveHook(compartment, clasp, name)) 1.8450 + continue; 1.8451 + 1.8452 + types::HeapTypeSetKey ownTypes = typeObj->property(NameToId(name)); 1.8453 + if (ownTypes.isOwnProperty(constraints())) 1.8454 + continue; 1.8455 + 1.8456 + JSObject *singleton = testSingletonProperty(typeObj->proto().toObject(), name); 1.8457 + if (!singleton || !singleton->is<JSFunction>()) 1.8458 + continue; 1.8459 + 1.8460 + // Don't add cases corresponding to non-observed pushes 1.8461 + if (!pushedTypes->hasType(types::Type::ObjectType(singleton))) 1.8462 + continue; 1.8463 + 1.8464 + if (!inlinePropTable->addEntry(alloc(), baseTypeObj, &singleton->as<JSFunction>())) 1.8465 + return false; 1.8466 + } 1.8467 + 1.8468 + if (inlinePropTable->numEntries() == 0) { 1.8469 + getPropCache->clearInlinePropertyTable(); 1.8470 + return true; 1.8471 + } 1.8472 + 1.8473 +#ifdef DEBUG 1.8474 + if (inlinePropTable->numEntries() > 0) 1.8475 + IonSpew(IonSpew_Inlining, "Annotated GetPropertyCache with %d/%d inline cases", 1.8476 + (int) inlinePropTable->numEntries(), (int) objCount); 1.8477 +#endif 1.8478 + 1.8479 + // If we successfully annotated the GetPropertyCache and there are inline cases, 1.8480 + // then keep a resume point of the state right before this instruction for use 1.8481 + // later when we have to bail out to this point in the fallback case of a 1.8482 + // PolyInlineDispatch. 1.8483 + if (inlinePropTable->numEntries() > 0) { 1.8484 + // Push the object back onto the stack temporarily to capture the resume point. 1.8485 + current->push(obj); 1.8486 + MResumePoint *resumePoint = MResumePoint::New(alloc(), current, pc, callerResumePoint_, 1.8487 + MResumePoint::ResumeAt); 1.8488 + if (!resumePoint) 1.8489 + return false; 1.8490 + inlinePropTable->setPriorResumePoint(resumePoint); 1.8491 + current->pop(); 1.8492 + } 1.8493 + return true; 1.8494 +} 1.8495 + 1.8496 +// Returns true if an idempotent cache has ever invalidated this script 1.8497 +// or an outer script. 1.8498 +bool 1.8499 +IonBuilder::invalidatedIdempotentCache() 1.8500 +{ 1.8501 + IonBuilder *builder = this; 1.8502 + do { 1.8503 + if (builder->script()->invalidatedIdempotentCache()) 1.8504 + return true; 1.8505 + builder = builder->callerBuilder_; 1.8506 + } while (builder); 1.8507 + 1.8508 + return false; 1.8509 +} 1.8510 + 1.8511 +bool 1.8512 +IonBuilder::loadSlot(MDefinition *obj, size_t slot, size_t nfixed, MIRType rvalType, 1.8513 + bool barrier, types::TemporaryTypeSet *types) 1.8514 +{ 1.8515 + if (slot < nfixed) { 1.8516 + MLoadFixedSlot *load = MLoadFixedSlot::New(alloc(), obj, slot); 1.8517 + current->add(load); 1.8518 + current->push(load); 1.8519 + 1.8520 + load->setResultType(rvalType); 1.8521 + return pushTypeBarrier(load, types, barrier); 1.8522 + } 1.8523 + 1.8524 + MSlots *slots = MSlots::New(alloc(), obj); 1.8525 + current->add(slots); 1.8526 + 1.8527 + MLoadSlot *load = MLoadSlot::New(alloc(), slots, slot - nfixed); 1.8528 + current->add(load); 1.8529 + current->push(load); 1.8530 + 1.8531 + load->setResultType(rvalType); 1.8532 + return pushTypeBarrier(load, types, barrier); 1.8533 +} 1.8534 + 1.8535 +bool 1.8536 +IonBuilder::loadSlot(MDefinition *obj, Shape *shape, MIRType rvalType, 1.8537 + bool barrier, types::TemporaryTypeSet *types) 1.8538 +{ 1.8539 + return loadSlot(obj, shape->slot(), shape->numFixedSlots(), rvalType, barrier, types); 1.8540 +} 1.8541 + 1.8542 +bool 1.8543 +IonBuilder::storeSlot(MDefinition *obj, size_t slot, size_t nfixed, 1.8544 + MDefinition *value, bool needsBarrier, 1.8545 + MIRType slotType /* = MIRType_None */) 1.8546 +{ 1.8547 + if (slot < nfixed) { 1.8548 + MStoreFixedSlot *store = MStoreFixedSlot::New(alloc(), obj, slot, value); 1.8549 + current->add(store); 1.8550 + current->push(value); 1.8551 + if (needsBarrier) 1.8552 + store->setNeedsBarrier(); 1.8553 + return resumeAfter(store); 1.8554 + } 1.8555 + 1.8556 + MSlots *slots = MSlots::New(alloc(), obj); 1.8557 + current->add(slots); 1.8558 + 1.8559 + MStoreSlot *store = MStoreSlot::New(alloc(), slots, slot - nfixed, value); 1.8560 + current->add(store); 1.8561 + current->push(value); 1.8562 + if (needsBarrier) 1.8563 + store->setNeedsBarrier(); 1.8564 + if (slotType != MIRType_None) 1.8565 + store->setSlotType(slotType); 1.8566 + return resumeAfter(store); 1.8567 +} 1.8568 + 1.8569 +bool 1.8570 +IonBuilder::storeSlot(MDefinition *obj, Shape *shape, MDefinition *value, bool needsBarrier, 1.8571 + MIRType slotType /* = MIRType_None */) 1.8572 +{ 1.8573 + JS_ASSERT(shape->writable()); 1.8574 + return storeSlot(obj, shape->slot(), shape->numFixedSlots(), value, needsBarrier, slotType); 1.8575 +} 1.8576 + 1.8577 +bool 1.8578 +IonBuilder::jsop_getprop(PropertyName *name) 1.8579 +{ 1.8580 + bool emitted = false; 1.8581 + 1.8582 + // Try to optimize arguments.length. 1.8583 + if (!getPropTryArgumentsLength(&emitted) || emitted) 1.8584 + return emitted; 1.8585 + 1.8586 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.8587 + bool barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), 1.8588 + current->peek(-1), name, types); 1.8589 + 1.8590 + // Always use a call if we are performing analysis and 1.8591 + // not actually emitting code, to simplify later analysis. Also skip deeper 1.8592 + // analysis if there are no known types for this operation, as it will 1.8593 + // always invalidate when executing. 1.8594 + if (info().executionModeIsAnalysis() || types->empty()) { 1.8595 + MDefinition *obj = current->peek(-1); 1.8596 + MCallGetProperty *call = MCallGetProperty::New(alloc(), obj, name, *pc == JSOP_CALLPROP); 1.8597 + current->add(call); 1.8598 + 1.8599 + // During the definite properties analysis we can still try to bake in 1.8600 + // constants read off the prototype chain, to allow inlining later on. 1.8601 + // In this case we still need the getprop call so that the later 1.8602 + // analysis knows when the |this| value has been read from. 1.8603 + if (info().executionModeIsAnalysis()) { 1.8604 + if (!getPropTryConstant(&emitted, name, types) || emitted) 1.8605 + return emitted; 1.8606 + } 1.8607 + 1.8608 + current->pop(); 1.8609 + current->push(call); 1.8610 + return resumeAfter(call) && pushTypeBarrier(call, types, true); 1.8611 + } 1.8612 + 1.8613 + // Try to hardcode known constants. 1.8614 + if (!getPropTryConstant(&emitted, name, types) || emitted) 1.8615 + return emitted; 1.8616 + 1.8617 + // Try to emit loads from known binary data blocks 1.8618 + if (!getPropTryTypedObject(&emitted, name, types) || emitted) 1.8619 + return emitted; 1.8620 + 1.8621 + // Try to emit loads from definite slots. 1.8622 + if (!getPropTryDefiniteSlot(&emitted, name, barrier, types) || emitted) 1.8623 + return emitted; 1.8624 + 1.8625 + // Try to inline a common property getter, or make a call. 1.8626 + if (!getPropTryCommonGetter(&emitted, name, types) || emitted) 1.8627 + return emitted; 1.8628 + 1.8629 + // Try to emit a monomorphic/polymorphic access based on baseline caches. 1.8630 + if (!getPropTryInlineAccess(&emitted, name, barrier, types) || emitted) 1.8631 + return emitted; 1.8632 + 1.8633 + // Try to emit a polymorphic cache. 1.8634 + if (!getPropTryCache(&emitted, name, barrier, types) || emitted) 1.8635 + return emitted; 1.8636 + 1.8637 + // Emit a call. 1.8638 + MDefinition *obj = current->pop(); 1.8639 + MCallGetProperty *call = MCallGetProperty::New(alloc(), obj, name, *pc == JSOP_CALLPROP); 1.8640 + current->add(call); 1.8641 + current->push(call); 1.8642 + if (!resumeAfter(call)) 1.8643 + return false; 1.8644 + 1.8645 + return pushTypeBarrier(call, types, true); 1.8646 +} 1.8647 + 1.8648 +bool 1.8649 +IonBuilder::getPropTryArgumentsLength(bool *emitted) 1.8650 +{ 1.8651 + JS_ASSERT(*emitted == false); 1.8652 + if (current->peek(-1)->type() != MIRType_MagicOptimizedArguments) { 1.8653 + if (script()->argumentsHasVarBinding() && 1.8654 + current->peek(-1)->mightBeType(MIRType_MagicOptimizedArguments)) 1.8655 + { 1.8656 + return abort("Type is not definitely lazy arguments."); 1.8657 + } 1.8658 + return true; 1.8659 + } 1.8660 + if (JSOp(*pc) != JSOP_LENGTH) 1.8661 + return true; 1.8662 + 1.8663 + *emitted = true; 1.8664 + return jsop_arguments_length(); 1.8665 +} 1.8666 + 1.8667 +bool 1.8668 +IonBuilder::getPropTryConstant(bool *emitted, PropertyName *name, 1.8669 + types::TemporaryTypeSet *types) 1.8670 +{ 1.8671 + JS_ASSERT(*emitted == false); 1.8672 + JSObject *singleton = types ? types->getSingleton() : nullptr; 1.8673 + if (!singleton) 1.8674 + return true; 1.8675 + 1.8676 + bool testObject, testString; 1.8677 + if (!testSingletonPropertyTypes(current->peek(-1), singleton, name, &testObject, &testString)) 1.8678 + return true; 1.8679 + 1.8680 + MDefinition *obj = current->pop(); 1.8681 + 1.8682 + // Property access is a known constant -- safe to emit. 1.8683 + JS_ASSERT(!testString || !testObject); 1.8684 + if (testObject) 1.8685 + current->add(MGuardObject::New(alloc(), obj)); 1.8686 + else if (testString) 1.8687 + current->add(MGuardString::New(alloc(), obj)); 1.8688 + else 1.8689 + obj->setImplicitlyUsedUnchecked(); 1.8690 + 1.8691 + pushConstant(ObjectValue(*singleton)); 1.8692 + 1.8693 + *emitted = true; 1.8694 + return true; 1.8695 +} 1.8696 + 1.8697 +bool 1.8698 +IonBuilder::getPropTryTypedObject(bool *emitted, PropertyName *name, 1.8699 + types::TemporaryTypeSet *resultTypes) 1.8700 +{ 1.8701 + TypeDescrSet fieldDescrs; 1.8702 + int32_t fieldOffset; 1.8703 + size_t fieldIndex; 1.8704 + if (!lookupTypedObjectField(current->peek(-1), name, &fieldOffset, 1.8705 + &fieldDescrs, &fieldIndex)) 1.8706 + return false; 1.8707 + if (fieldDescrs.empty()) 1.8708 + return true; 1.8709 + 1.8710 + switch (fieldDescrs.kind()) { 1.8711 + case TypeDescr::Reference: 1.8712 + return true; 1.8713 + 1.8714 + case TypeDescr::X4: 1.8715 + // FIXME (bug 894104): load into a MIRType_float32x4 etc 1.8716 + return true; 1.8717 + 1.8718 + case TypeDescr::Struct: 1.8719 + case TypeDescr::SizedArray: 1.8720 + return getPropTryComplexPropOfTypedObject(emitted, 1.8721 + fieldOffset, 1.8722 + fieldDescrs, 1.8723 + fieldIndex, 1.8724 + resultTypes); 1.8725 + 1.8726 + case TypeDescr::Scalar: 1.8727 + return getPropTryScalarPropOfTypedObject(emitted, 1.8728 + fieldOffset, 1.8729 + fieldDescrs, 1.8730 + resultTypes); 1.8731 + 1.8732 + case TypeDescr::UnsizedArray: 1.8733 + MOZ_ASSUME_UNREACHABLE("Field of unsized array type"); 1.8734 + } 1.8735 + 1.8736 + MOZ_ASSUME_UNREACHABLE("Bad kind"); 1.8737 +} 1.8738 + 1.8739 +bool 1.8740 +IonBuilder::getPropTryScalarPropOfTypedObject(bool *emitted, 1.8741 + int32_t fieldOffset, 1.8742 + TypeDescrSet fieldDescrs, 1.8743 + types::TemporaryTypeSet *resultTypes) 1.8744 +{ 1.8745 + // Must always be loading the same scalar type 1.8746 + ScalarTypeDescr::Type fieldType; 1.8747 + if (!fieldDescrs.scalarType(&fieldType)) 1.8748 + return true; 1.8749 + 1.8750 + // OK, perform the optimization 1.8751 + 1.8752 + MDefinition *typedObj = current->pop(); 1.8753 + 1.8754 + return pushScalarLoadFromTypedObject(emitted, typedObj, constantInt(fieldOffset), 1.8755 + fieldType, true); 1.8756 +} 1.8757 + 1.8758 +bool 1.8759 +IonBuilder::getPropTryComplexPropOfTypedObject(bool *emitted, 1.8760 + int32_t fieldOffset, 1.8761 + TypeDescrSet fieldDescrs, 1.8762 + size_t fieldIndex, 1.8763 + types::TemporaryTypeSet *resultTypes) 1.8764 +{ 1.8765 + // Must know the field index so that we can load the new type 1.8766 + // object for the derived value 1.8767 + if (fieldIndex == SIZE_MAX) 1.8768 + return true; 1.8769 + 1.8770 + // OK, perform the optimization 1.8771 + 1.8772 + MDefinition *typedObj = current->pop(); 1.8773 + 1.8774 + // Identify the type object for the field. 1.8775 + MDefinition *type = loadTypedObjectType(typedObj); 1.8776 + MDefinition *fieldTypeObj = typeObjectForFieldFromStructType(type, fieldIndex); 1.8777 + 1.8778 + return pushDerivedTypedObject(emitted, typedObj, constantInt(fieldOffset), 1.8779 + fieldDescrs, fieldTypeObj, true); 1.8780 +} 1.8781 + 1.8782 +bool 1.8783 +IonBuilder::getPropTryDefiniteSlot(bool *emitted, PropertyName *name, 1.8784 + bool barrier, types::TemporaryTypeSet *types) 1.8785 +{ 1.8786 + JS_ASSERT(*emitted == false); 1.8787 + types::HeapTypeSetKey property; 1.8788 + if (!getDefiniteSlot(current->peek(-1)->resultTypeSet(), name, &property)) 1.8789 + return true; 1.8790 + 1.8791 + MDefinition *obj = current->pop(); 1.8792 + MDefinition *useObj = obj; 1.8793 + if (obj->type() != MIRType_Object) { 1.8794 + MGuardObject *guard = MGuardObject::New(alloc(), obj); 1.8795 + current->add(guard); 1.8796 + useObj = guard; 1.8797 + } 1.8798 + 1.8799 + MLoadFixedSlot *fixed = MLoadFixedSlot::New(alloc(), useObj, property.maybeTypes()->definiteSlot()); 1.8800 + if (!barrier) 1.8801 + fixed->setResultType(types->getKnownMIRType()); 1.8802 + 1.8803 + current->add(fixed); 1.8804 + current->push(fixed); 1.8805 + 1.8806 + if (!pushTypeBarrier(fixed, types, barrier)) 1.8807 + return false; 1.8808 + 1.8809 + *emitted = true; 1.8810 + return true; 1.8811 +} 1.8812 + 1.8813 +bool 1.8814 +IonBuilder::getPropTryCommonGetter(bool *emitted, PropertyName *name, 1.8815 + types::TemporaryTypeSet *types) 1.8816 +{ 1.8817 + JS_ASSERT(*emitted == false); 1.8818 + 1.8819 + Shape *lastProperty = nullptr; 1.8820 + JSFunction *commonGetter = nullptr; 1.8821 + JSObject *foundProto = inspector->commonGetPropFunction(pc, &lastProperty, &commonGetter); 1.8822 + if (!foundProto) 1.8823 + return true; 1.8824 + 1.8825 + types::TemporaryTypeSet *objTypes = current->peek(-1)->resultTypeSet(); 1.8826 + MDefinition *guard = testCommonGetterSetter(objTypes, name, /* isGetter = */ true, 1.8827 + foundProto, lastProperty); 1.8828 + if (!guard) 1.8829 + return true; 1.8830 + 1.8831 + bool isDOM = objTypes->isDOMClass(); 1.8832 + 1.8833 + MDefinition *obj = current->pop(); 1.8834 + 1.8835 + if (isDOM && testShouldDOMCall(objTypes, commonGetter, JSJitInfo::Getter)) { 1.8836 + const JSJitInfo *jitinfo = commonGetter->jitInfo(); 1.8837 + MInstruction *get; 1.8838 + if (jitinfo->isInSlot) { 1.8839 + // We can't use MLoadFixedSlot here because it might not have the 1.8840 + // right aliasing behavior; we want to alias DOM setters. 1.8841 + get = MGetDOMMember::New(alloc(), jitinfo, obj, guard); 1.8842 + } else { 1.8843 + get = MGetDOMProperty::New(alloc(), jitinfo, obj, guard); 1.8844 + } 1.8845 + current->add(get); 1.8846 + current->push(get); 1.8847 + 1.8848 + if (get->isEffectful() && !resumeAfter(get)) 1.8849 + return false; 1.8850 + 1.8851 + if (!pushDOMTypeBarrier(get, types, commonGetter)) 1.8852 + return false; 1.8853 + 1.8854 + *emitted = true; 1.8855 + return true; 1.8856 + } 1.8857 + 1.8858 + // Don't call the getter with a primitive value. 1.8859 + if (objTypes->getKnownMIRType() != MIRType_Object) { 1.8860 + MGuardObject *guardObj = MGuardObject::New(alloc(), obj); 1.8861 + current->add(guardObj); 1.8862 + obj = guardObj; 1.8863 + } 1.8864 + 1.8865 + // Spoof stack to expected state for call. 1.8866 + 1.8867 + // Make sure there's enough room 1.8868 + if (!current->ensureHasSlots(2)) 1.8869 + return false; 1.8870 + pushConstant(ObjectValue(*commonGetter)); 1.8871 + 1.8872 + current->push(obj); 1.8873 + 1.8874 + CallInfo callInfo(alloc(), false); 1.8875 + if (!callInfo.init(current, 0)) 1.8876 + return false; 1.8877 + 1.8878 + // Inline if we can, otherwise, forget it and just generate a call. 1.8879 + bool inlineable = false; 1.8880 + if (commonGetter->isInterpreted()) { 1.8881 + InliningDecision decision = makeInliningDecision(commonGetter, callInfo); 1.8882 + switch (decision) { 1.8883 + case InliningDecision_Error: 1.8884 + return false; 1.8885 + case InliningDecision_DontInline: 1.8886 + break; 1.8887 + case InliningDecision_Inline: 1.8888 + inlineable = true; 1.8889 + break; 1.8890 + } 1.8891 + } 1.8892 + 1.8893 + if (inlineable) { 1.8894 + if (!inlineScriptedCall(callInfo, commonGetter)) 1.8895 + return false; 1.8896 + } else { 1.8897 + if (!makeCall(commonGetter, callInfo, false)) 1.8898 + return false; 1.8899 + } 1.8900 + 1.8901 + *emitted = true; 1.8902 + return true; 1.8903 +} 1.8904 + 1.8905 +static bool 1.8906 +CanInlinePropertyOpShapes(const BaselineInspector::ShapeVector &shapes) 1.8907 +{ 1.8908 + for (size_t i = 0; i < shapes.length(); i++) { 1.8909 + // We inline the property access as long as the shape is not in 1.8910 + // dictionary made. We cannot be sure that the shape is still a 1.8911 + // lastProperty, and calling Shape::search() on dictionary mode 1.8912 + // shapes that aren't lastProperty is invalid. 1.8913 + if (shapes[i]->inDictionary()) 1.8914 + return false; 1.8915 + } 1.8916 + 1.8917 + return true; 1.8918 +} 1.8919 + 1.8920 +bool 1.8921 +IonBuilder::getPropTryInlineAccess(bool *emitted, PropertyName *name, 1.8922 + bool barrier, types::TemporaryTypeSet *types) 1.8923 +{ 1.8924 + JS_ASSERT(*emitted == false); 1.8925 + if (current->peek(-1)->type() != MIRType_Object) 1.8926 + return true; 1.8927 + 1.8928 + BaselineInspector::ShapeVector shapes(alloc()); 1.8929 + if (!inspector->maybeShapesForPropertyOp(pc, shapes)) 1.8930 + return false; 1.8931 + 1.8932 + if (shapes.empty() || !CanInlinePropertyOpShapes(shapes)) 1.8933 + return true; 1.8934 + 1.8935 + MIRType rvalType = types->getKnownMIRType(); 1.8936 + if (barrier || IsNullOrUndefined(rvalType)) 1.8937 + rvalType = MIRType_Value; 1.8938 + 1.8939 + MDefinition *obj = current->pop(); 1.8940 + if (shapes.length() == 1) { 1.8941 + // In the monomorphic case, use separate ShapeGuard and LoadSlot 1.8942 + // instructions. 1.8943 + spew("Inlining monomorphic GETPROP"); 1.8944 + 1.8945 + Shape *objShape = shapes[0]; 1.8946 + obj = addShapeGuard(obj, objShape, Bailout_ShapeGuard); 1.8947 + 1.8948 + Shape *shape = objShape->searchLinear(NameToId(name)); 1.8949 + JS_ASSERT(shape); 1.8950 + 1.8951 + if (!loadSlot(obj, shape, rvalType, barrier, types)) 1.8952 + return false; 1.8953 + } else { 1.8954 + JS_ASSERT(shapes.length() > 1); 1.8955 + spew("Inlining polymorphic GETPROP"); 1.8956 + 1.8957 + MGetPropertyPolymorphic *load = MGetPropertyPolymorphic::New(alloc(), obj, name); 1.8958 + current->add(load); 1.8959 + current->push(load); 1.8960 + 1.8961 + for (size_t i = 0; i < shapes.length(); i++) { 1.8962 + Shape *objShape = shapes[i]; 1.8963 + Shape *shape = objShape->searchLinear(NameToId(name)); 1.8964 + JS_ASSERT(shape); 1.8965 + if (!load->addShape(objShape, shape)) 1.8966 + return false; 1.8967 + } 1.8968 + 1.8969 + if (failedShapeGuard_) 1.8970 + load->setNotMovable(); 1.8971 + 1.8972 + load->setResultType(rvalType); 1.8973 + if (!pushTypeBarrier(load, types, barrier)) 1.8974 + return false; 1.8975 + } 1.8976 + 1.8977 + *emitted = true; 1.8978 + return true; 1.8979 +} 1.8980 + 1.8981 +bool 1.8982 +IonBuilder::getPropTryCache(bool *emitted, PropertyName *name, 1.8983 + bool barrier, types::TemporaryTypeSet *types) 1.8984 +{ 1.8985 + JS_ASSERT(*emitted == false); 1.8986 + 1.8987 + MDefinition *obj = current->peek(-1); 1.8988 + 1.8989 + // The input value must either be an object, or we should have strong suspicions 1.8990 + // that it can be safely unboxed to an object. 1.8991 + if (obj->type() != MIRType_Object) { 1.8992 + types::TemporaryTypeSet *types = obj->resultTypeSet(); 1.8993 + if (!types || !types->objectOrSentinel()) 1.8994 + return true; 1.8995 + } 1.8996 + 1.8997 + // Since getters have no guaranteed return values, we must barrier in order to be 1.8998 + // able to attach stubs for them. 1.8999 + if (inspector->hasSeenAccessedGetter(pc)) 1.9000 + barrier = true; 1.9001 + 1.9002 + if (needsToMonitorMissingProperties(types)) 1.9003 + barrier = true; 1.9004 + 1.9005 + // Caches can read values from prototypes, so update the barrier to 1.9006 + // reflect such possible values. 1.9007 + if (!barrier) 1.9008 + barrier = PropertyReadOnPrototypeNeedsTypeBarrier(constraints(), obj, name, types); 1.9009 + 1.9010 + current->pop(); 1.9011 + MGetPropertyCache *load = MGetPropertyCache::New(alloc(), obj, name, barrier); 1.9012 + 1.9013 + // Try to mark the cache as idempotent. 1.9014 + // 1.9015 + // In parallel execution, idempotency of caches is ignored, since we 1.9016 + // repeat the entire ForkJoin workload if we bail out. Note that it's 1.9017 + // overly restrictive to mark everything as idempotent, because we can 1.9018 + // treat non-idempotent caches in parallel as repeatable. 1.9019 + if (obj->type() == MIRType_Object && !invalidatedIdempotentCache() && 1.9020 + info().executionMode() != ParallelExecution) 1.9021 + { 1.9022 + if (PropertyReadIsIdempotent(constraints(), obj, name)) 1.9023 + load->setIdempotent(); 1.9024 + } 1.9025 + 1.9026 + if (JSOp(*pc) == JSOP_CALLPROP) { 1.9027 + if (!annotateGetPropertyCache(obj, load, obj->resultTypeSet(), types)) 1.9028 + return false; 1.9029 + } 1.9030 + 1.9031 + current->add(load); 1.9032 + current->push(load); 1.9033 + 1.9034 + if (load->isEffectful() && !resumeAfter(load)) 1.9035 + return false; 1.9036 + 1.9037 + MIRType rvalType = types->getKnownMIRType(); 1.9038 + if (barrier || IsNullOrUndefined(rvalType)) 1.9039 + rvalType = MIRType_Value; 1.9040 + load->setResultType(rvalType); 1.9041 + 1.9042 + if (!pushTypeBarrier(load, types, barrier)) 1.9043 + return false; 1.9044 + 1.9045 + *emitted = true; 1.9046 + return true; 1.9047 +} 1.9048 + 1.9049 +bool 1.9050 +IonBuilder::needsToMonitorMissingProperties(types::TemporaryTypeSet *types) 1.9051 +{ 1.9052 + // GetPropertyParIC and GetElementParIC cannot safely call 1.9053 + // TypeScript::Monitor to ensure that the observed type set contains 1.9054 + // undefined. To account for possible missing properties, which property 1.9055 + // types do not track, we must always insert a type barrier. 1.9056 + return (info().executionMode() == ParallelExecution && 1.9057 + !types->hasType(types::Type::UndefinedType())); 1.9058 +} 1.9059 + 1.9060 +bool 1.9061 +IonBuilder::jsop_setprop(PropertyName *name) 1.9062 +{ 1.9063 + MDefinition *value = current->pop(); 1.9064 + MDefinition *obj = current->pop(); 1.9065 + 1.9066 + bool emitted = false; 1.9067 + 1.9068 + // Always use a call if we are doing the definite properties analysis and 1.9069 + // not actually emitting code, to simplify later analysis. 1.9070 + if (info().executionModeIsAnalysis()) { 1.9071 + MInstruction *ins = MCallSetProperty::New(alloc(), obj, value, name, script()->strict()); 1.9072 + current->add(ins); 1.9073 + current->push(value); 1.9074 + return resumeAfter(ins); 1.9075 + } 1.9076 + 1.9077 + // Add post barrier if needed. 1.9078 + if (NeedsPostBarrier(info(), value)) 1.9079 + current->add(MPostWriteBarrier::New(alloc(), obj, value)); 1.9080 + 1.9081 + // Try to inline a common property setter, or make a call. 1.9082 + if (!setPropTryCommonSetter(&emitted, obj, name, value) || emitted) 1.9083 + return emitted; 1.9084 + 1.9085 + types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); 1.9086 + bool barrier = PropertyWriteNeedsTypeBarrier(alloc(), constraints(), current, &obj, name, &value, 1.9087 + /* canModify = */ true); 1.9088 + 1.9089 + // Try to emit stores to known binary data blocks 1.9090 + if (!setPropTryTypedObject(&emitted, obj, name, value) || emitted) 1.9091 + return emitted; 1.9092 + 1.9093 + // Try to emit store from definite slots. 1.9094 + if (!setPropTryDefiniteSlot(&emitted, obj, name, value, barrier, objTypes) || emitted) 1.9095 + return emitted; 1.9096 + 1.9097 + // Try to emit a monomorphic/polymorphic store based on baseline caches. 1.9098 + if (!setPropTryInlineAccess(&emitted, obj, name, value, barrier, objTypes) || emitted) 1.9099 + return emitted; 1.9100 + 1.9101 + // Try to emit a polymorphic cache. 1.9102 + if (!setPropTryCache(&emitted, obj, name, value, barrier, objTypes) || emitted) 1.9103 + return emitted; 1.9104 + 1.9105 + // Emit call. 1.9106 + MInstruction *ins = MCallSetProperty::New(alloc(), obj, value, name, script()->strict()); 1.9107 + current->add(ins); 1.9108 + current->push(value); 1.9109 + return resumeAfter(ins); 1.9110 +} 1.9111 + 1.9112 +bool 1.9113 +IonBuilder::setPropTryCommonSetter(bool *emitted, MDefinition *obj, 1.9114 + PropertyName *name, MDefinition *value) 1.9115 +{ 1.9116 + JS_ASSERT(*emitted == false); 1.9117 + 1.9118 + Shape *lastProperty = nullptr; 1.9119 + JSFunction *commonSetter = nullptr; 1.9120 + JSObject *foundProto = inspector->commonSetPropFunction(pc, &lastProperty, &commonSetter); 1.9121 + if (!foundProto) 1.9122 + return true; 1.9123 + 1.9124 + types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); 1.9125 + MDefinition *guard = testCommonGetterSetter(objTypes, name, /* isGetter = */ false, 1.9126 + foundProto, lastProperty); 1.9127 + if (!guard) 1.9128 + return true; 1.9129 + 1.9130 + bool isDOM = objTypes->isDOMClass(); 1.9131 + 1.9132 + // Emit common setter. 1.9133 + 1.9134 + // Setters can be called even if the property write needs a type 1.9135 + // barrier, as calling the setter does not actually write any data 1.9136 + // properties. 1.9137 + 1.9138 + // Try emitting dom call. 1.9139 + if (!setPropTryCommonDOMSetter(emitted, obj, value, commonSetter, isDOM)) 1.9140 + return false; 1.9141 + 1.9142 + if (*emitted) 1.9143 + return true; 1.9144 + 1.9145 + // Don't call the setter with a primitive value. 1.9146 + if (objTypes->getKnownMIRType() != MIRType_Object) { 1.9147 + MGuardObject *guardObj = MGuardObject::New(alloc(), obj); 1.9148 + current->add(guardObj); 1.9149 + obj = guardObj; 1.9150 + } 1.9151 + 1.9152 + // Dummy up the stack, as in getprop. We are pushing an extra value, so 1.9153 + // ensure there is enough space. 1.9154 + if (!current->ensureHasSlots(3)) 1.9155 + return false; 1.9156 + 1.9157 + pushConstant(ObjectValue(*commonSetter)); 1.9158 + 1.9159 + current->push(obj); 1.9160 + current->push(value); 1.9161 + 1.9162 + // Call the setter. Note that we have to push the original value, not 1.9163 + // the setter's return value. 1.9164 + CallInfo callInfo(alloc(), false); 1.9165 + if (!callInfo.init(current, 1)) 1.9166 + return false; 1.9167 + 1.9168 + // Ensure that we know we are calling a setter in case we inline it. 1.9169 + callInfo.markAsSetter(); 1.9170 + 1.9171 + // Inline the setter if we can. 1.9172 + if (commonSetter->isInterpreted()) { 1.9173 + InliningDecision decision = makeInliningDecision(commonSetter, callInfo); 1.9174 + switch (decision) { 1.9175 + case InliningDecision_Error: 1.9176 + return false; 1.9177 + case InliningDecision_DontInline: 1.9178 + break; 1.9179 + case InliningDecision_Inline: 1.9180 + if (!inlineScriptedCall(callInfo, commonSetter)) 1.9181 + return false; 1.9182 + *emitted = true; 1.9183 + return true; 1.9184 + } 1.9185 + } 1.9186 + 1.9187 + MCall *call = makeCallHelper(commonSetter, callInfo, false); 1.9188 + if (!call) 1.9189 + return false; 1.9190 + 1.9191 + current->push(value); 1.9192 + if (!resumeAfter(call)) 1.9193 + return false; 1.9194 + 1.9195 + *emitted = true; 1.9196 + return true; 1.9197 +} 1.9198 + 1.9199 +bool 1.9200 +IonBuilder::setPropTryCommonDOMSetter(bool *emitted, MDefinition *obj, 1.9201 + MDefinition *value, JSFunction *setter, 1.9202 + bool isDOM) 1.9203 +{ 1.9204 + JS_ASSERT(*emitted == false); 1.9205 + 1.9206 + if (!isDOM) 1.9207 + return true; 1.9208 + 1.9209 + types::TemporaryTypeSet *objTypes = obj->resultTypeSet(); 1.9210 + if (!testShouldDOMCall(objTypes, setter, JSJitInfo::Setter)) 1.9211 + return true; 1.9212 + 1.9213 + // Emit SetDOMProperty. 1.9214 + JS_ASSERT(setter->jitInfo()->type() == JSJitInfo::Setter); 1.9215 + MSetDOMProperty *set = MSetDOMProperty::New(alloc(), setter->jitInfo()->setter, obj, value); 1.9216 + 1.9217 + current->add(set); 1.9218 + current->push(value); 1.9219 + 1.9220 + if (!resumeAfter(set)) 1.9221 + return false; 1.9222 + 1.9223 + *emitted = true; 1.9224 + return true; 1.9225 +} 1.9226 + 1.9227 +bool 1.9228 +IonBuilder::setPropTryTypedObject(bool *emitted, MDefinition *obj, 1.9229 + PropertyName *name, MDefinition *value) 1.9230 +{ 1.9231 + TypeDescrSet fieldDescrs; 1.9232 + int32_t fieldOffset; 1.9233 + size_t fieldIndex; 1.9234 + if (!lookupTypedObjectField(obj, name, &fieldOffset, &fieldDescrs, 1.9235 + &fieldIndex)) 1.9236 + return false; 1.9237 + if (fieldDescrs.empty()) 1.9238 + return true; 1.9239 + 1.9240 + switch (fieldDescrs.kind()) { 1.9241 + case TypeDescr::X4: 1.9242 + // FIXME (bug 894104): store into a MIRType_float32x4 etc 1.9243 + return true; 1.9244 + 1.9245 + case TypeDescr::Reference: 1.9246 + case TypeDescr::Struct: 1.9247 + case TypeDescr::SizedArray: 1.9248 + case TypeDescr::UnsizedArray: 1.9249 + // For now, only optimize storing scalars. 1.9250 + return true; 1.9251 + 1.9252 + case TypeDescr::Scalar: 1.9253 + return setPropTryScalarPropOfTypedObject(emitted, obj, fieldOffset, 1.9254 + value, fieldDescrs); 1.9255 + } 1.9256 + 1.9257 + MOZ_ASSUME_UNREACHABLE("Unknown kind"); 1.9258 +} 1.9259 + 1.9260 +bool 1.9261 +IonBuilder::setPropTryScalarPropOfTypedObject(bool *emitted, 1.9262 + MDefinition *obj, 1.9263 + int32_t fieldOffset, 1.9264 + MDefinition *value, 1.9265 + TypeDescrSet fieldDescrs) 1.9266 +{ 1.9267 + // Must always be loading the same scalar type 1.9268 + ScalarTypeDescr::Type fieldType; 1.9269 + if (!fieldDescrs.scalarType(&fieldType)) 1.9270 + return true; 1.9271 + 1.9272 + // OK! Perform the optimization. 1.9273 + 1.9274 + if (!storeScalarTypedObjectValue(obj, constantInt(fieldOffset), fieldType, true, false, value)) 1.9275 + return false; 1.9276 + 1.9277 + current->push(value); 1.9278 + 1.9279 + *emitted = true; 1.9280 + return true; 1.9281 +} 1.9282 + 1.9283 +bool 1.9284 +IonBuilder::setPropTryDefiniteSlot(bool *emitted, MDefinition *obj, 1.9285 + PropertyName *name, MDefinition *value, 1.9286 + bool barrier, types::TemporaryTypeSet *objTypes) 1.9287 +{ 1.9288 + JS_ASSERT(*emitted == false); 1.9289 + 1.9290 + if (barrier) 1.9291 + return true; 1.9292 + 1.9293 + types::HeapTypeSetKey property; 1.9294 + if (!getDefiniteSlot(obj->resultTypeSet(), name, &property)) 1.9295 + return true; 1.9296 + 1.9297 + if (property.nonWritable(constraints())) 1.9298 + return true; 1.9299 + 1.9300 + MStoreFixedSlot *fixed = MStoreFixedSlot::New(alloc(), obj, property.maybeTypes()->definiteSlot(), value); 1.9301 + current->add(fixed); 1.9302 + current->push(value); 1.9303 + 1.9304 + if (property.needsBarrier(constraints())) 1.9305 + fixed->setNeedsBarrier(); 1.9306 + 1.9307 + if (!resumeAfter(fixed)) 1.9308 + return false; 1.9309 + 1.9310 + *emitted = true; 1.9311 + return true; 1.9312 +} 1.9313 + 1.9314 +bool 1.9315 +IonBuilder::setPropTryInlineAccess(bool *emitted, MDefinition *obj, 1.9316 + PropertyName *name, 1.9317 + MDefinition *value, bool barrier, 1.9318 + types::TemporaryTypeSet *objTypes) 1.9319 +{ 1.9320 + JS_ASSERT(*emitted == false); 1.9321 + 1.9322 + if (barrier) 1.9323 + return true; 1.9324 + 1.9325 + BaselineInspector::ShapeVector shapes(alloc()); 1.9326 + if (!inspector->maybeShapesForPropertyOp(pc, shapes)) 1.9327 + return false; 1.9328 + 1.9329 + if (shapes.empty()) 1.9330 + return true; 1.9331 + 1.9332 + if (!CanInlinePropertyOpShapes(shapes)) 1.9333 + return true; 1.9334 + 1.9335 + if (shapes.length() == 1) { 1.9336 + spew("Inlining monomorphic SETPROP"); 1.9337 + 1.9338 + // The Baseline IC was monomorphic, so we inline the property access as 1.9339 + // long as the shape is not in dictionary mode. We cannot be sure 1.9340 + // that the shape is still a lastProperty, and calling Shape::search 1.9341 + // on dictionary mode shapes that aren't lastProperty is invalid. 1.9342 + Shape *objShape = shapes[0]; 1.9343 + obj = addShapeGuard(obj, objShape, Bailout_ShapeGuard); 1.9344 + 1.9345 + Shape *shape = objShape->searchLinear(NameToId(name)); 1.9346 + JS_ASSERT(shape); 1.9347 + 1.9348 + bool needsBarrier = objTypes->propertyNeedsBarrier(constraints(), NameToId(name)); 1.9349 + if (!storeSlot(obj, shape, value, needsBarrier)) 1.9350 + return false; 1.9351 + } else { 1.9352 + JS_ASSERT(shapes.length() > 1); 1.9353 + spew("Inlining polymorphic SETPROP"); 1.9354 + 1.9355 + MSetPropertyPolymorphic *ins = MSetPropertyPolymorphic::New(alloc(), obj, value); 1.9356 + current->add(ins); 1.9357 + current->push(value); 1.9358 + 1.9359 + for (size_t i = 0; i < shapes.length(); i++) { 1.9360 + Shape *objShape = shapes[i]; 1.9361 + Shape *shape = objShape->searchLinear(NameToId(name)); 1.9362 + JS_ASSERT(shape); 1.9363 + if (!ins->addShape(objShape, shape)) 1.9364 + return false; 1.9365 + } 1.9366 + 1.9367 + if (objTypes->propertyNeedsBarrier(constraints(), NameToId(name))) 1.9368 + ins->setNeedsBarrier(); 1.9369 + 1.9370 + if (!resumeAfter(ins)) 1.9371 + return false; 1.9372 + } 1.9373 + 1.9374 + *emitted = true; 1.9375 + return true; 1.9376 +} 1.9377 + 1.9378 +bool 1.9379 +IonBuilder::setPropTryCache(bool *emitted, MDefinition *obj, 1.9380 + PropertyName *name, MDefinition *value, 1.9381 + bool barrier, types::TemporaryTypeSet *objTypes) 1.9382 +{ 1.9383 + JS_ASSERT(*emitted == false); 1.9384 + 1.9385 + // Emit SetPropertyCache. 1.9386 + MSetPropertyCache *ins = MSetPropertyCache::New(alloc(), obj, value, name, script()->strict(), barrier); 1.9387 + 1.9388 + if (!objTypes || objTypes->propertyNeedsBarrier(constraints(), NameToId(name))) 1.9389 + ins->setNeedsBarrier(); 1.9390 + 1.9391 + current->add(ins); 1.9392 + current->push(value); 1.9393 + 1.9394 + if (!resumeAfter(ins)) 1.9395 + return false; 1.9396 + 1.9397 + *emitted = true; 1.9398 + return true; 1.9399 +} 1.9400 + 1.9401 +bool 1.9402 +IonBuilder::jsop_delprop(PropertyName *name) 1.9403 +{ 1.9404 + MDefinition *obj = current->pop(); 1.9405 + 1.9406 + MInstruction *ins = MDeleteProperty::New(alloc(), obj, name); 1.9407 + 1.9408 + current->add(ins); 1.9409 + current->push(ins); 1.9410 + 1.9411 + return resumeAfter(ins); 1.9412 +} 1.9413 + 1.9414 +bool 1.9415 +IonBuilder::jsop_delelem() 1.9416 +{ 1.9417 + MDefinition *index = current->pop(); 1.9418 + MDefinition *obj = current->pop(); 1.9419 + 1.9420 + MDeleteElement *ins = MDeleteElement::New(alloc(), obj, index); 1.9421 + current->add(ins); 1.9422 + current->push(ins); 1.9423 + 1.9424 + return resumeAfter(ins); 1.9425 +} 1.9426 + 1.9427 +bool 1.9428 +IonBuilder::jsop_regexp(RegExpObject *reobj) 1.9429 +{ 1.9430 + // JS semantics require regular expression literals to create different 1.9431 + // objects every time they execute. We only need to do this cloning if the 1.9432 + // script could actually observe the effect of such cloning, for instance 1.9433 + // by getting or setting properties on it. 1.9434 + // 1.9435 + // First, make sure the regex is one we can safely optimize. Lowering can 1.9436 + // then check if this regex object only flows into known natives and can 1.9437 + // avoid cloning in this case. 1.9438 + 1.9439 + bool mustClone = true; 1.9440 + types::TypeObjectKey *typeObj = types::TypeObjectKey::get(&script()->global()); 1.9441 + if (!typeObj->hasFlags(constraints(), types::OBJECT_FLAG_REGEXP_FLAGS_SET)) { 1.9442 + RegExpStatics *res = script()->global().getRegExpStatics(); 1.9443 + 1.9444 + DebugOnly<uint32_t> origFlags = reobj->getFlags(); 1.9445 + DebugOnly<uint32_t> staticsFlags = res->getFlags(); 1.9446 + JS_ASSERT((origFlags & staticsFlags) == staticsFlags); 1.9447 + 1.9448 + if (!reobj->global() && !reobj->sticky()) 1.9449 + mustClone = false; 1.9450 + } 1.9451 + 1.9452 + MRegExp *regexp = MRegExp::New(alloc(), constraints(), reobj, mustClone); 1.9453 + current->add(regexp); 1.9454 + current->push(regexp); 1.9455 + 1.9456 + return true; 1.9457 +} 1.9458 + 1.9459 +bool 1.9460 +IonBuilder::jsop_object(JSObject *obj) 1.9461 +{ 1.9462 + if (options.cloneSingletons()) { 1.9463 + MCloneLiteral *clone = MCloneLiteral::New(alloc(), constant(ObjectValue(*obj))); 1.9464 + current->add(clone); 1.9465 + current->push(clone); 1.9466 + return resumeAfter(clone); 1.9467 + } 1.9468 + 1.9469 + compartment->setSingletonsAsValues(); 1.9470 + pushConstant(ObjectValue(*obj)); 1.9471 + return true; 1.9472 +} 1.9473 + 1.9474 +bool 1.9475 +IonBuilder::jsop_lambda(JSFunction *fun) 1.9476 +{ 1.9477 + MOZ_ASSERT(analysis().usesScopeChain()); 1.9478 + MOZ_ASSERT(!fun->isArrow()); 1.9479 + 1.9480 + if (fun->isNative() && IsAsmJSModuleNative(fun->native())) 1.9481 + return abort("asm.js module function"); 1.9482 + 1.9483 + MLambda *ins = MLambda::New(alloc(), constraints(), current->scopeChain(), fun); 1.9484 + current->add(ins); 1.9485 + current->push(ins); 1.9486 + 1.9487 + return resumeAfter(ins); 1.9488 +} 1.9489 + 1.9490 +bool 1.9491 +IonBuilder::jsop_lambda_arrow(JSFunction *fun) 1.9492 +{ 1.9493 + MOZ_ASSERT(analysis().usesScopeChain()); 1.9494 + MOZ_ASSERT(fun->isArrow()); 1.9495 + MOZ_ASSERT(!fun->isNative()); 1.9496 + 1.9497 + MDefinition *thisDef = current->pop(); 1.9498 + 1.9499 + MLambdaArrow *ins = MLambdaArrow::New(alloc(), constraints(), current->scopeChain(), 1.9500 + thisDef, fun); 1.9501 + current->add(ins); 1.9502 + current->push(ins); 1.9503 + 1.9504 + return resumeAfter(ins); 1.9505 +} 1.9506 + 1.9507 +bool 1.9508 +IonBuilder::jsop_setarg(uint32_t arg) 1.9509 +{ 1.9510 + // To handle this case, we should spill the arguments to the space where 1.9511 + // actual arguments are stored. The tricky part is that if we add a MIR 1.9512 + // to wrap the spilling action, we don't want the spilling to be 1.9513 + // captured by the GETARG and by the resume point, only by 1.9514 + // MGetFrameArgument. 1.9515 + JS_ASSERT(analysis_.hasSetArg()); 1.9516 + MDefinition *val = current->peek(-1); 1.9517 + 1.9518 + // If an arguments object is in use, and it aliases formals, then all SETARGs 1.9519 + // must go through the arguments object. 1.9520 + if (info().argsObjAliasesFormals()) { 1.9521 + if (NeedsPostBarrier(info(), val)) 1.9522 + current->add(MPostWriteBarrier::New(alloc(), current->argumentsObject(), val)); 1.9523 + current->add(MSetArgumentsObjectArg::New(alloc(), current->argumentsObject(), 1.9524 + GET_ARGNO(pc), val)); 1.9525 + return true; 1.9526 + } 1.9527 + 1.9528 + // :TODO: if hasArguments() is true, and the script has a JSOP_SETARG, then 1.9529 + // convert all arg accesses to go through the arguments object. (see Bug 957475) 1.9530 + if (info().hasArguments()) 1.9531 + return abort("NYI: arguments & setarg."); 1.9532 + 1.9533 + // Otherwise, if a magic arguments is in use, and it aliases formals, and there exist 1.9534 + // arguments[...] GETELEM expressions in the script, then SetFrameArgument must be used. 1.9535 + // If no arguments[...] GETELEM expressions are in the script, and an argsobj is not 1.9536 + // required, then it means that any aliased argument set can never be observed, and 1.9537 + // the frame does not actually need to be updated with the new arg value. 1.9538 + if (info().argumentsAliasesFormals()) { 1.9539 + // JSOP_SETARG with magic arguments within inline frames is not yet supported. 1.9540 + JS_ASSERT(script()->uninlineable() && !isInlineBuilder()); 1.9541 + 1.9542 + MSetFrameArgument *store = MSetFrameArgument::New(alloc(), arg, val); 1.9543 + modifiesFrameArguments_ = true; 1.9544 + current->add(store); 1.9545 + current->setArg(arg); 1.9546 + return true; 1.9547 + } 1.9548 + 1.9549 + // If this assignment is at the start of the function and is coercing 1.9550 + // the original value for the argument which was passed in, loosen 1.9551 + // the type information for that original argument if it is currently 1.9552 + // empty due to originally executing in the interpreter. 1.9553 + if (graph().numBlocks() == 1 && 1.9554 + (val->isBitOr() || val->isBitAnd() || val->isMul() /* for JSOP_POS */)) 1.9555 + { 1.9556 + for (size_t i = 0; i < val->numOperands(); i++) { 1.9557 + MDefinition *op = val->getOperand(i); 1.9558 + if (op->isParameter() && 1.9559 + op->toParameter()->index() == (int32_t)arg && 1.9560 + op->resultTypeSet() && 1.9561 + op->resultTypeSet()->empty()) 1.9562 + { 1.9563 + bool otherUses = false; 1.9564 + for (MUseDefIterator iter(op); iter; iter++) { 1.9565 + MDefinition *def = iter.def(); 1.9566 + if (def == val) 1.9567 + continue; 1.9568 + otherUses = true; 1.9569 + } 1.9570 + if (!otherUses) { 1.9571 + JS_ASSERT(op->resultTypeSet() == &argTypes[arg]); 1.9572 + argTypes[arg].addType(types::Type::UnknownType(), alloc_->lifoAlloc()); 1.9573 + if (val->isMul()) { 1.9574 + val->setResultType(MIRType_Double); 1.9575 + val->toMul()->setSpecialization(MIRType_Double); 1.9576 + } else { 1.9577 + JS_ASSERT(val->type() == MIRType_Int32); 1.9578 + } 1.9579 + val->setResultTypeSet(nullptr); 1.9580 + } 1.9581 + } 1.9582 + } 1.9583 + } 1.9584 + 1.9585 + current->setArg(arg); 1.9586 + return true; 1.9587 +} 1.9588 + 1.9589 +bool 1.9590 +IonBuilder::jsop_defvar(uint32_t index) 1.9591 +{ 1.9592 + JS_ASSERT(JSOp(*pc) == JSOP_DEFVAR || JSOp(*pc) == JSOP_DEFCONST); 1.9593 + 1.9594 + PropertyName *name = script()->getName(index); 1.9595 + 1.9596 + // Bake in attrs. 1.9597 + unsigned attrs = JSPROP_ENUMERATE | JSPROP_PERMANENT; 1.9598 + if (JSOp(*pc) == JSOP_DEFCONST) 1.9599 + attrs |= JSPROP_READONLY; 1.9600 + 1.9601 + // Pass the ScopeChain. 1.9602 + JS_ASSERT(analysis().usesScopeChain()); 1.9603 + 1.9604 + // Bake the name pointer into the MDefVar. 1.9605 + MDefVar *defvar = MDefVar::New(alloc(), name, attrs, current->scopeChain()); 1.9606 + current->add(defvar); 1.9607 + 1.9608 + return resumeAfter(defvar); 1.9609 +} 1.9610 + 1.9611 +bool 1.9612 +IonBuilder::jsop_deffun(uint32_t index) 1.9613 +{ 1.9614 + JSFunction *fun = script()->getFunction(index); 1.9615 + if (fun->isNative() && IsAsmJSModuleNative(fun->native())) 1.9616 + return abort("asm.js module function"); 1.9617 + 1.9618 + JS_ASSERT(analysis().usesScopeChain()); 1.9619 + 1.9620 + MDefFun *deffun = MDefFun::New(alloc(), fun, current->scopeChain()); 1.9621 + current->add(deffun); 1.9622 + 1.9623 + return resumeAfter(deffun); 1.9624 +} 1.9625 + 1.9626 +bool 1.9627 +IonBuilder::jsop_this() 1.9628 +{ 1.9629 + if (!info().funMaybeLazy()) 1.9630 + return abort("JSOP_THIS outside of a JSFunction."); 1.9631 + 1.9632 + if (info().funMaybeLazy()->isArrow()) { 1.9633 + // Arrow functions store their lexical |this| in an extended slot. 1.9634 + MLoadArrowThis *thisObj = MLoadArrowThis::New(alloc(), getCallee()); 1.9635 + current->add(thisObj); 1.9636 + current->push(thisObj); 1.9637 + return true; 1.9638 + } 1.9639 + 1.9640 + if (script()->strict() || info().funMaybeLazy()->isSelfHostedBuiltin()) { 1.9641 + // No need to wrap primitive |this| in strict mode or self-hosted code. 1.9642 + current->pushSlot(info().thisSlot()); 1.9643 + return true; 1.9644 + } 1.9645 + 1.9646 + if (thisTypes->getKnownMIRType() == MIRType_Object || 1.9647 + (thisTypes->empty() && baselineFrame_ && baselineFrame_->thisType.isSomeObject())) 1.9648 + { 1.9649 + // This is safe, because if the entry type of |this| is an object, it 1.9650 + // will necessarily be an object throughout the entire function. OSR 1.9651 + // can introduce a phi, but this phi will be specialized. 1.9652 + current->pushSlot(info().thisSlot()); 1.9653 + return true; 1.9654 + } 1.9655 + 1.9656 + // If we are doing an analysis, we might not yet know the type of |this|. 1.9657 + // Instead of bailing out just push the |this| slot, as this code won't 1.9658 + // actually execute and it does not matter whether |this| is primitive. 1.9659 + if (info().executionModeIsAnalysis()) { 1.9660 + current->pushSlot(info().thisSlot()); 1.9661 + return true; 1.9662 + } 1.9663 + 1.9664 + // Hard case: |this| may be a primitive we have to wrap. 1.9665 + MDefinition *def = current->getSlot(info().thisSlot()); 1.9666 + 1.9667 + if (def->type() == MIRType_Object) { 1.9668 + // If we already computed a |this| object, we can reuse it. 1.9669 + current->push(def); 1.9670 + return true; 1.9671 + } 1.9672 + 1.9673 + MComputeThis *thisObj = MComputeThis::New(alloc(), def); 1.9674 + current->add(thisObj); 1.9675 + current->push(thisObj); 1.9676 + 1.9677 + current->setSlot(info().thisSlot(), thisObj); 1.9678 + 1.9679 + return resumeAfter(thisObj); 1.9680 +} 1.9681 + 1.9682 +bool 1.9683 +IonBuilder::jsop_typeof() 1.9684 +{ 1.9685 + MDefinition *input = current->pop(); 1.9686 + MTypeOf *ins = MTypeOf::New(alloc(), input, input->type()); 1.9687 + 1.9688 + ins->infer(); 1.9689 + 1.9690 + current->add(ins); 1.9691 + current->push(ins); 1.9692 + 1.9693 + return true; 1.9694 +} 1.9695 + 1.9696 +bool 1.9697 +IonBuilder::jsop_toid() 1.9698 +{ 1.9699 + // No-op if the index is an integer. 1.9700 + if (current->peek(-1)->type() == MIRType_Int32) 1.9701 + return true; 1.9702 + 1.9703 + MDefinition *index = current->pop(); 1.9704 + MToId *ins = MToId::New(alloc(), current->peek(-1), index); 1.9705 + 1.9706 + current->add(ins); 1.9707 + current->push(ins); 1.9708 + 1.9709 + return resumeAfter(ins); 1.9710 +} 1.9711 + 1.9712 +bool 1.9713 +IonBuilder::jsop_iter(uint8_t flags) 1.9714 +{ 1.9715 + if (flags != JSITER_ENUMERATE) 1.9716 + nonStringIteration_ = true; 1.9717 + 1.9718 + MDefinition *obj = current->pop(); 1.9719 + MInstruction *ins = MIteratorStart::New(alloc(), obj, flags); 1.9720 + 1.9721 + if (!iterators_.append(ins)) 1.9722 + return false; 1.9723 + 1.9724 + current->add(ins); 1.9725 + current->push(ins); 1.9726 + 1.9727 + return resumeAfter(ins); 1.9728 +} 1.9729 + 1.9730 +bool 1.9731 +IonBuilder::jsop_iternext() 1.9732 +{ 1.9733 + MDefinition *iter = current->peek(-1); 1.9734 + MInstruction *ins = MIteratorNext::New(alloc(), iter); 1.9735 + 1.9736 + current->add(ins); 1.9737 + current->push(ins); 1.9738 + 1.9739 + if (!resumeAfter(ins)) 1.9740 + return false; 1.9741 + 1.9742 + if (!nonStringIteration_ && !inspector->hasSeenNonStringIterNext(pc)) { 1.9743 + ins = MUnbox::New(alloc(), ins, MIRType_String, MUnbox::Fallible, Bailout_BaselineInfo); 1.9744 + current->add(ins); 1.9745 + current->rewriteAtDepth(-1, ins); 1.9746 + } 1.9747 + 1.9748 + return true; 1.9749 +} 1.9750 + 1.9751 +bool 1.9752 +IonBuilder::jsop_itermore() 1.9753 +{ 1.9754 + MDefinition *iter = current->peek(-1); 1.9755 + MInstruction *ins = MIteratorMore::New(alloc(), iter); 1.9756 + 1.9757 + current->add(ins); 1.9758 + current->push(ins); 1.9759 + 1.9760 + return resumeAfter(ins); 1.9761 +} 1.9762 + 1.9763 +bool 1.9764 +IonBuilder::jsop_iterend() 1.9765 +{ 1.9766 + MDefinition *iter = current->pop(); 1.9767 + MInstruction *ins = MIteratorEnd::New(alloc(), iter); 1.9768 + 1.9769 + current->add(ins); 1.9770 + 1.9771 + return resumeAfter(ins); 1.9772 +} 1.9773 + 1.9774 +MDefinition * 1.9775 +IonBuilder::walkScopeChain(unsigned hops) 1.9776 +{ 1.9777 + MDefinition *scope = current->getSlot(info().scopeChainSlot()); 1.9778 + 1.9779 + for (unsigned i = 0; i < hops; i++) { 1.9780 + MInstruction *ins = MEnclosingScope::New(alloc(), scope); 1.9781 + current->add(ins); 1.9782 + scope = ins; 1.9783 + } 1.9784 + 1.9785 + return scope; 1.9786 +} 1.9787 + 1.9788 +bool 1.9789 +IonBuilder::hasStaticScopeObject(ScopeCoordinate sc, JSObject **pcall) 1.9790 +{ 1.9791 + JSScript *outerScript = ScopeCoordinateFunctionScript(script(), pc); 1.9792 + if (!outerScript || !outerScript->treatAsRunOnce()) 1.9793 + return false; 1.9794 + 1.9795 + types::TypeObjectKey *funType = 1.9796 + types::TypeObjectKey::get(outerScript->functionNonDelazifying()); 1.9797 + if (funType->hasFlags(constraints(), types::OBJECT_FLAG_RUNONCE_INVALIDATED)) 1.9798 + return false; 1.9799 + 1.9800 + // The script this aliased var operation is accessing will run only once, 1.9801 + // so there will be only one call object and the aliased var access can be 1.9802 + // compiled in the same manner as a global access. We still need to find 1.9803 + // the call object though. 1.9804 + 1.9805 + // Look for the call object on the current script's function's scope chain. 1.9806 + // If the current script is inner to the outer script and the function has 1.9807 + // singleton type then it should show up here. 1.9808 + 1.9809 + MDefinition *scope = current->getSlot(info().scopeChainSlot()); 1.9810 + scope->setImplicitlyUsedUnchecked(); 1.9811 + 1.9812 + JSObject *environment = script()->functionNonDelazifying()->environment(); 1.9813 + while (environment && !environment->is<GlobalObject>()) { 1.9814 + if (environment->is<CallObject>() && 1.9815 + !environment->as<CallObject>().isForEval() && 1.9816 + environment->as<CallObject>().callee().nonLazyScript() == outerScript) 1.9817 + { 1.9818 + JS_ASSERT(environment->hasSingletonType()); 1.9819 + *pcall = environment; 1.9820 + return true; 1.9821 + } 1.9822 + environment = environment->enclosingScope(); 1.9823 + } 1.9824 + 1.9825 + // Look for the call object on the current frame, if we are compiling the 1.9826 + // outer script itself. Don't do this if we are at entry to the outer 1.9827 + // script, as the call object we see will not be the real one --- after 1.9828 + // entering the Ion code a different call object will be created. 1.9829 + 1.9830 + if (script() == outerScript && baselineFrame_ && info().osrPc()) { 1.9831 + JSObject *singletonScope = baselineFrame_->singletonScopeChain; 1.9832 + if (singletonScope && 1.9833 + singletonScope->is<CallObject>() && 1.9834 + singletonScope->as<CallObject>().callee().nonLazyScript() == outerScript) 1.9835 + { 1.9836 + JS_ASSERT(singletonScope->hasSingletonType()); 1.9837 + *pcall = singletonScope; 1.9838 + return true; 1.9839 + } 1.9840 + } 1.9841 + 1.9842 + return true; 1.9843 +} 1.9844 + 1.9845 +bool 1.9846 +IonBuilder::jsop_getaliasedvar(ScopeCoordinate sc) 1.9847 +{ 1.9848 + JSObject *call = nullptr; 1.9849 + if (hasStaticScopeObject(sc, &call) && call) { 1.9850 + PropertyName *name = ScopeCoordinateName(scopeCoordinateNameCache, script(), pc); 1.9851 + bool succeeded; 1.9852 + if (!getStaticName(call, name, &succeeded)) 1.9853 + return false; 1.9854 + if (succeeded) 1.9855 + return true; 1.9856 + } 1.9857 + 1.9858 + MDefinition *obj = walkScopeChain(sc.hops()); 1.9859 + 1.9860 + Shape *shape = ScopeCoordinateToStaticScopeShape(script(), pc); 1.9861 + 1.9862 + MInstruction *load; 1.9863 + if (shape->numFixedSlots() <= sc.slot()) { 1.9864 + MInstruction *slots = MSlots::New(alloc(), obj); 1.9865 + current->add(slots); 1.9866 + 1.9867 + load = MLoadSlot::New(alloc(), slots, sc.slot() - shape->numFixedSlots()); 1.9868 + } else { 1.9869 + load = MLoadFixedSlot::New(alloc(), obj, sc.slot()); 1.9870 + } 1.9871 + 1.9872 + current->add(load); 1.9873 + current->push(load); 1.9874 + 1.9875 + types::TemporaryTypeSet *types = bytecodeTypes(pc); 1.9876 + return pushTypeBarrier(load, types, true); 1.9877 +} 1.9878 + 1.9879 +bool 1.9880 +IonBuilder::jsop_setaliasedvar(ScopeCoordinate sc) 1.9881 +{ 1.9882 + JSObject *call = nullptr; 1.9883 + if (hasStaticScopeObject(sc, &call)) { 1.9884 + uint32_t depth = current->stackDepth() + 1; 1.9885 + if (depth > current->nslots()) { 1.9886 + if (!current->increaseSlots(depth - current->nslots())) 1.9887 + return false; 1.9888 + } 1.9889 + MDefinition *value = current->pop(); 1.9890 + PropertyName *name = ScopeCoordinateName(scopeCoordinateNameCache, script(), pc); 1.9891 + 1.9892 + if (call) { 1.9893 + // Push the object on the stack to match the bound object expected in 1.9894 + // the global and property set cases. 1.9895 + pushConstant(ObjectValue(*call)); 1.9896 + current->push(value); 1.9897 + return setStaticName(call, name); 1.9898 + } 1.9899 + 1.9900 + // The call object has type information we need to respect but we 1.9901 + // couldn't find it. Just do a normal property assign. 1.9902 + MDefinition *obj = walkScopeChain(sc.hops()); 1.9903 + current->push(obj); 1.9904 + current->push(value); 1.9905 + return jsop_setprop(name); 1.9906 + } 1.9907 + 1.9908 + MDefinition *rval = current->peek(-1); 1.9909 + MDefinition *obj = walkScopeChain(sc.hops()); 1.9910 + 1.9911 + Shape *shape = ScopeCoordinateToStaticScopeShape(script(), pc); 1.9912 + 1.9913 + if (NeedsPostBarrier(info(), rval)) 1.9914 + current->add(MPostWriteBarrier::New(alloc(), obj, rval)); 1.9915 + 1.9916 + MInstruction *store; 1.9917 + if (shape->numFixedSlots() <= sc.slot()) { 1.9918 + MInstruction *slots = MSlots::New(alloc(), obj); 1.9919 + current->add(slots); 1.9920 + 1.9921 + store = MStoreSlot::NewBarriered(alloc(), slots, sc.slot() - shape->numFixedSlots(), rval); 1.9922 + } else { 1.9923 + store = MStoreFixedSlot::NewBarriered(alloc(), obj, sc.slot(), rval); 1.9924 + } 1.9925 + 1.9926 + current->add(store); 1.9927 + return resumeAfter(store); 1.9928 +} 1.9929 + 1.9930 +bool 1.9931 +IonBuilder::jsop_in() 1.9932 +{ 1.9933 + MDefinition *obj = current->peek(-1); 1.9934 + MDefinition *id = current->peek(-2); 1.9935 + 1.9936 + if (ElementAccessIsDenseNative(obj, id) && 1.9937 + !ElementAccessHasExtraIndexedProperty(constraints(), obj)) 1.9938 + { 1.9939 + return jsop_in_dense(); 1.9940 + } 1.9941 + 1.9942 + current->pop(); 1.9943 + current->pop(); 1.9944 + MIn *ins = MIn::New(alloc(), id, obj); 1.9945 + 1.9946 + current->add(ins); 1.9947 + current->push(ins); 1.9948 + 1.9949 + return resumeAfter(ins); 1.9950 +} 1.9951 + 1.9952 +bool 1.9953 +IonBuilder::jsop_in_dense() 1.9954 +{ 1.9955 + MDefinition *obj = current->pop(); 1.9956 + MDefinition *id = current->pop(); 1.9957 + 1.9958 + bool needsHoleCheck = !ElementAccessIsPacked(constraints(), obj); 1.9959 + 1.9960 + // Ensure id is an integer. 1.9961 + MInstruction *idInt32 = MToInt32::New(alloc(), id); 1.9962 + current->add(idInt32); 1.9963 + id = idInt32; 1.9964 + 1.9965 + // Get the elements vector. 1.9966 + MElements *elements = MElements::New(alloc(), obj); 1.9967 + current->add(elements); 1.9968 + 1.9969 + MInitializedLength *initLength = MInitializedLength::New(alloc(), elements); 1.9970 + current->add(initLength); 1.9971 + 1.9972 + // Check if id < initLength and elem[id] not a hole. 1.9973 + MInArray *ins = MInArray::New(alloc(), elements, id, initLength, obj, needsHoleCheck); 1.9974 + 1.9975 + current->add(ins); 1.9976 + current->push(ins); 1.9977 + 1.9978 + return true; 1.9979 +} 1.9980 + 1.9981 +bool 1.9982 +IonBuilder::jsop_instanceof() 1.9983 +{ 1.9984 + MDefinition *rhs = current->pop(); 1.9985 + MDefinition *obj = current->pop(); 1.9986 + 1.9987 + // If this is an 'x instanceof function' operation and we can determine the 1.9988 + // exact function and prototype object being tested for, use a typed path. 1.9989 + do { 1.9990 + types::TemporaryTypeSet *rhsTypes = rhs->resultTypeSet(); 1.9991 + JSObject *rhsObject = rhsTypes ? rhsTypes->getSingleton() : nullptr; 1.9992 + if (!rhsObject || !rhsObject->is<JSFunction>() || rhsObject->isBoundFunction()) 1.9993 + break; 1.9994 + 1.9995 + types::TypeObjectKey *rhsType = types::TypeObjectKey::get(rhsObject); 1.9996 + if (rhsType->unknownProperties()) 1.9997 + break; 1.9998 + 1.9999 + types::HeapTypeSetKey protoProperty = 1.10000 + rhsType->property(NameToId(names().prototype)); 1.10001 + JSObject *protoObject = protoProperty.singleton(constraints()); 1.10002 + if (!protoObject) 1.10003 + break; 1.10004 + 1.10005 + rhs->setImplicitlyUsedUnchecked(); 1.10006 + 1.10007 + MInstanceOf *ins = MInstanceOf::New(alloc(), obj, protoObject); 1.10008 + 1.10009 + current->add(ins); 1.10010 + current->push(ins); 1.10011 + 1.10012 + return resumeAfter(ins); 1.10013 + } while (false); 1.10014 + 1.10015 + MCallInstanceOf *ins = MCallInstanceOf::New(alloc(), obj, rhs); 1.10016 + 1.10017 + current->add(ins); 1.10018 + current->push(ins); 1.10019 + 1.10020 + return resumeAfter(ins); 1.10021 +} 1.10022 + 1.10023 +MInstruction * 1.10024 +IonBuilder::addConvertElementsToDoubles(MDefinition *elements) 1.10025 +{ 1.10026 + MInstruction *convert = MConvertElementsToDoubles::New(alloc(), elements); 1.10027 + current->add(convert); 1.10028 + return convert; 1.10029 +} 1.10030 + 1.10031 +MInstruction * 1.10032 +IonBuilder::addBoundsCheck(MDefinition *index, MDefinition *length) 1.10033 +{ 1.10034 + MInstruction *check = MBoundsCheck::New(alloc(), index, length); 1.10035 + current->add(check); 1.10036 + 1.10037 + // If a bounds check failed in the past, don't optimize bounds checks. 1.10038 + if (failedBoundsCheck_) 1.10039 + check->setNotMovable(); 1.10040 + 1.10041 + return check; 1.10042 +} 1.10043 + 1.10044 +MInstruction * 1.10045 +IonBuilder::addShapeGuard(MDefinition *obj, Shape *const shape, BailoutKind bailoutKind) 1.10046 +{ 1.10047 + MGuardShape *guard = MGuardShape::New(alloc(), obj, shape, bailoutKind); 1.10048 + current->add(guard); 1.10049 + 1.10050 + // If a shape guard failed in the past, don't optimize shape guard. 1.10051 + if (failedShapeGuard_) 1.10052 + guard->setNotMovable(); 1.10053 + 1.10054 + return guard; 1.10055 +} 1.10056 + 1.10057 +types::TemporaryTypeSet * 1.10058 +IonBuilder::bytecodeTypes(jsbytecode *pc) 1.10059 +{ 1.10060 + return types::TypeScript::BytecodeTypes(script(), pc, bytecodeTypeMap, &typeArrayHint, typeArray); 1.10061 +} 1.10062 + 1.10063 +TypeDescrSetHash * 1.10064 +IonBuilder::getOrCreateDescrSetHash() 1.10065 +{ 1.10066 + if (!descrSetHash_) { 1.10067 + TypeDescrSetHash *hash = 1.10068 + alloc_->lifoAlloc()->new_<TypeDescrSetHash>(alloc()); 1.10069 + if (!hash || !hash->init()) 1.10070 + return nullptr; 1.10071 + 1.10072 + descrSetHash_ = hash; 1.10073 + } 1.10074 + return descrSetHash_; 1.10075 +} 1.10076 + 1.10077 +bool 1.10078 +IonBuilder::lookupTypeDescrSet(MDefinition *typedObj, 1.10079 + TypeDescrSet *out) 1.10080 +{ 1.10081 + *out = TypeDescrSet(); // default to unknown 1.10082 + 1.10083 + // Extract TypeDescrSet directly if we can 1.10084 + if (typedObj->isNewDerivedTypedObject()) { 1.10085 + *out = typedObj->toNewDerivedTypedObject()->set(); 1.10086 + return true; 1.10087 + } 1.10088 + 1.10089 + types::TemporaryTypeSet *types = typedObj->resultTypeSet(); 1.10090 + return typeSetToTypeDescrSet(types, out); 1.10091 +} 1.10092 + 1.10093 +bool 1.10094 +IonBuilder::typeSetToTypeDescrSet(types::TemporaryTypeSet *types, 1.10095 + TypeDescrSet *out) 1.10096 +{ 1.10097 + // Extract TypeDescrSet directly if we can 1.10098 + if (!types || types->getKnownMIRType() != MIRType_Object) 1.10099 + return true; 1.10100 + 1.10101 + // And only known objects. 1.10102 + if (types->unknownObject()) 1.10103 + return true; 1.10104 + 1.10105 + TypeDescrSetBuilder set; 1.10106 + for (uint32_t i = 0; i < types->getObjectCount(); i++) { 1.10107 + types::TypeObject *type = types->getTypeObject(i); 1.10108 + if (!type || type->unknownProperties()) 1.10109 + return true; 1.10110 + 1.10111 + if (!type->hasTypedObject()) 1.10112 + return true; 1.10113 + 1.10114 + TypeDescr &descr = type->typedObject()->descr(); 1.10115 + if (!set.insert(&descr)) 1.10116 + return false; 1.10117 + } 1.10118 + 1.10119 + return set.build(*this, out); 1.10120 +} 1.10121 + 1.10122 +MDefinition * 1.10123 +IonBuilder::loadTypedObjectType(MDefinition *typedObj) 1.10124 +{ 1.10125 + // Shortcircuit derived type objects, meaning the intermediate 1.10126 + // objects created to represent `a.b` in an expression like 1.10127 + // `a.b.c`. In that case, the type object can be simply pulled 1.10128 + // from the operands of that instruction. 1.10129 + if (typedObj->isNewDerivedTypedObject()) 1.10130 + return typedObj->toNewDerivedTypedObject()->type(); 1.10131 + 1.10132 + MInstruction *load = MLoadFixedSlot::New(alloc(), typedObj, 1.10133 + JS_TYPEDOBJ_SLOT_TYPE_DESCR); 1.10134 + current->add(load); 1.10135 + return load; 1.10136 +} 1.10137 + 1.10138 +// Given a typed object `typedObj` and an offset `offset` into that 1.10139 +// object's data, returns another typed object and adusted offset 1.10140 +// where the data can be found. Often, these returned values are the 1.10141 +// same as the inputs, but in cases where intermediate derived type 1.10142 +// objects have been created, the return values will remove 1.10143 +// intermediate layers (often rendering those derived type objects 1.10144 +// into dead code). 1.10145 +void 1.10146 +IonBuilder::loadTypedObjectData(MDefinition *typedObj, 1.10147 + MDefinition *offset, 1.10148 + bool canBeNeutered, 1.10149 + MDefinition **owner, 1.10150 + MDefinition **ownerOffset) 1.10151 +{ 1.10152 + JS_ASSERT(typedObj->type() == MIRType_Object); 1.10153 + JS_ASSERT(offset->type() == MIRType_Int32); 1.10154 + 1.10155 + // Shortcircuit derived type objects, meaning the intermediate 1.10156 + // objects created to represent `a.b` in an expression like 1.10157 + // `a.b.c`. In that case, the owned and a base offset can be 1.10158 + // pulled from the operands of the instruction and combined with 1.10159 + // `offset`. 1.10160 + if (typedObj->isNewDerivedTypedObject()) { 1.10161 + MNewDerivedTypedObject *ins = typedObj->toNewDerivedTypedObject(); 1.10162 + 1.10163 + // Note: we never need to check for neutering on this path, 1.10164 + // because when we create the derived typed object, we check 1.10165 + // for neutering there, if needed. 1.10166 + 1.10167 + MAdd *offsetAdd = MAdd::NewAsmJS(alloc(), ins->offset(), offset, MIRType_Int32); 1.10168 + current->add(offsetAdd); 1.10169 + 1.10170 + *owner = ins->owner(); 1.10171 + *ownerOffset = offsetAdd; 1.10172 + return; 1.10173 + } 1.10174 + 1.10175 + if (canBeNeutered) { 1.10176 + MNeuterCheck *chk = MNeuterCheck::New(alloc(), typedObj); 1.10177 + current->add(chk); 1.10178 + typedObj = chk; 1.10179 + } 1.10180 + 1.10181 + *owner = typedObj; 1.10182 + *ownerOffset = offset; 1.10183 +} 1.10184 + 1.10185 +// Takes as input a typed object, an offset into that typed object's 1.10186 +// memory, and the type repr of the data found at that offset. Returns 1.10187 +// the elements pointer and a scaled offset. The scaled offset is 1.10188 +// expressed in units of `unit`; when working with typed array MIR, 1.10189 +// this is typically the alignment. 1.10190 +void 1.10191 +IonBuilder::loadTypedObjectElements(MDefinition *typedObj, 1.10192 + MDefinition *offset, 1.10193 + int32_t unit, 1.10194 + bool canBeNeutered, 1.10195 + MDefinition **ownerElements, 1.10196 + MDefinition **ownerScaledOffset) 1.10197 +{ 1.10198 + MDefinition *owner, *ownerOffset; 1.10199 + loadTypedObjectData(typedObj, offset, canBeNeutered, &owner, &ownerOffset); 1.10200 + 1.10201 + // Load the element data. 1.10202 + MTypedObjectElements *elements = MTypedObjectElements::New(alloc(), owner); 1.10203 + current->add(elements); 1.10204 + 1.10205 + // Scale to a different unit for compat with typed array MIRs. 1.10206 + if (unit != 1) { 1.10207 + MDiv *scaledOffset = MDiv::NewAsmJS(alloc(), ownerOffset, constantInt(unit), MIRType_Int32, 1.10208 + /* unsignd = */ false); 1.10209 + current->add(scaledOffset); 1.10210 + *ownerScaledOffset = scaledOffset; 1.10211 + } else { 1.10212 + *ownerScaledOffset = ownerOffset; 1.10213 + } 1.10214 + 1.10215 + *ownerElements = elements; 1.10216 +} 1.10217 + 1.10218 +// Looks up the offset/type-repr-set of the field `id`, given the type 1.10219 +// set `objTypes` of the field owner. Note that even when true is 1.10220 +// returned, `*fieldDescrs` might be empty if no useful type/offset 1.10221 +// pair could be determined. 1.10222 +bool 1.10223 +IonBuilder::lookupTypedObjectField(MDefinition *typedObj, 1.10224 + PropertyName *name, 1.10225 + int32_t *fieldOffset, 1.10226 + TypeDescrSet *fieldDescrs, 1.10227 + size_t *fieldIndex) 1.10228 +{ 1.10229 + TypeDescrSet objDescrs; 1.10230 + if (!lookupTypeDescrSet(typedObj, &objDescrs)) 1.10231 + return false; 1.10232 + 1.10233 + // Must be accessing a struct. 1.10234 + if (!objDescrs.allOfKind(TypeDescr::Struct)) 1.10235 + return true; 1.10236 + 1.10237 + // Determine the type/offset of the field `name`, if any. 1.10238 + int32_t offset; 1.10239 + if (!objDescrs.fieldNamed(*this, NameToId(name), &offset, 1.10240 + fieldDescrs, fieldIndex)) 1.10241 + return false; 1.10242 + if (fieldDescrs->empty()) 1.10243 + return true; 1.10244 + 1.10245 + JS_ASSERT(offset >= 0); 1.10246 + *fieldOffset = offset; 1.10247 + 1.10248 + return true; 1.10249 +} 1.10250 + 1.10251 +MDefinition * 1.10252 +IonBuilder::typeObjectForElementFromArrayStructType(MDefinition *typeObj) 1.10253 +{ 1.10254 + MInstruction *elemType = MLoadFixedSlot::New(alloc(), typeObj, JS_DESCR_SLOT_ARRAY_ELEM_TYPE); 1.10255 + current->add(elemType); 1.10256 + 1.10257 + MInstruction *unboxElemType = MUnbox::New(alloc(), elemType, MIRType_Object, MUnbox::Infallible); 1.10258 + current->add(unboxElemType); 1.10259 + 1.10260 + return unboxElemType; 1.10261 +} 1.10262 + 1.10263 +MDefinition * 1.10264 +IonBuilder::typeObjectForFieldFromStructType(MDefinition *typeObj, 1.10265 + size_t fieldIndex) 1.10266 +{ 1.10267 + // Load list of field type objects. 1.10268 + 1.10269 + MInstruction *fieldTypes = MLoadFixedSlot::New(alloc(), typeObj, JS_DESCR_SLOT_STRUCT_FIELD_TYPES); 1.10270 + current->add(fieldTypes); 1.10271 + 1.10272 + MInstruction *unboxFieldTypes = MUnbox::New(alloc(), fieldTypes, MIRType_Object, MUnbox::Infallible); 1.10273 + current->add(unboxFieldTypes); 1.10274 + 1.10275 + // Index into list with index of field. 1.10276 + 1.10277 + MInstruction *fieldTypesElements = MElements::New(alloc(), unboxFieldTypes); 1.10278 + current->add(fieldTypesElements); 1.10279 + 1.10280 + MConstant *fieldIndexDef = constantInt(fieldIndex); 1.10281 + 1.10282 + MInstruction *fieldType = MLoadElement::New(alloc(), fieldTypesElements, fieldIndexDef, false, false); 1.10283 + current->add(fieldType); 1.10284 + 1.10285 + MInstruction *unboxFieldType = MUnbox::New(alloc(), fieldType, MIRType_Object, MUnbox::Infallible); 1.10286 + current->add(unboxFieldType); 1.10287 + 1.10288 + return unboxFieldType; 1.10289 +} 1.10290 + 1.10291 +bool 1.10292 +IonBuilder::storeScalarTypedObjectValue(MDefinition *typedObj, 1.10293 + MDefinition *byteOffset, 1.10294 + ScalarTypeDescr::Type type, 1.10295 + bool canBeNeutered, 1.10296 + bool racy, 1.10297 + MDefinition *value) 1.10298 +{ 1.10299 + // Find location within the owner object. 1.10300 + MDefinition *elements, *scaledOffset; 1.10301 + size_t alignment = ScalarTypeDescr::alignment(type); 1.10302 + loadTypedObjectElements(typedObj, byteOffset, alignment, canBeNeutered, 1.10303 + &elements, &scaledOffset); 1.10304 + 1.10305 + // Clamp value to [0, 255] when type is Uint8Clamped 1.10306 + MDefinition *toWrite = value; 1.10307 + if (type == ScalarTypeDescr::TYPE_UINT8_CLAMPED) { 1.10308 + toWrite = MClampToUint8::New(alloc(), value); 1.10309 + current->add(toWrite->toInstruction()); 1.10310 + } 1.10311 + 1.10312 + MStoreTypedArrayElement *store = 1.10313 + MStoreTypedArrayElement::New(alloc(), elements, scaledOffset, toWrite, 1.10314 + type); 1.10315 + if (racy) 1.10316 + store->setRacy(); 1.10317 + current->add(store); 1.10318 + 1.10319 + return true; 1.10320 +} 1.10321 + 1.10322 +MConstant * 1.10323 +IonBuilder::constant(const Value &v) 1.10324 +{ 1.10325 + MConstant *c = MConstant::New(alloc(), v, constraints()); 1.10326 + current->add(c); 1.10327 + return c; 1.10328 +} 1.10329 + 1.10330 +MConstant * 1.10331 +IonBuilder::constantInt(int32_t i) 1.10332 +{ 1.10333 + return constant(Int32Value(i)); 1.10334 +} 1.10335 + 1.10336 +MDefinition * 1.10337 +IonBuilder::getCallee() 1.10338 +{ 1.10339 + if (inliningDepth_ == 0) { 1.10340 + MInstruction *callee = MCallee::New(alloc()); 1.10341 + current->add(callee); 1.10342 + return callee; 1.10343 + } 1.10344 + 1.10345 + return inlineCallInfo_->fun(); 1.10346 +}