js/src/jit/IonAnalysis.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/IonAnalysis.h"
michael@0 8
michael@0 9 #include "jsanalyze.h"
michael@0 10
michael@0 11 #include "jit/BaselineInspector.h"
michael@0 12 #include "jit/BaselineJIT.h"
michael@0 13 #include "jit/Ion.h"
michael@0 14 #include "jit/IonBuilder.h"
michael@0 15 #include "jit/IonOptimizationLevels.h"
michael@0 16 #include "jit/LIR.h"
michael@0 17 #include "jit/Lowering.h"
michael@0 18 #include "jit/MIRGraph.h"
michael@0 19
michael@0 20 #include "jsinferinlines.h"
michael@0 21 #include "jsobjinlines.h"
michael@0 22 #include "jsopcodeinlines.h"
michael@0 23
michael@0 24 using namespace js;
michael@0 25 using namespace js::jit;
michael@0 26
michael@0 27 using mozilla::DebugOnly;
michael@0 28
michael@0 29 // A critical edge is an edge which is neither its successor's only predecessor
michael@0 30 // nor its predecessor's only successor. Critical edges must be split to
michael@0 31 // prevent copy-insertion and code motion from affecting other edges.
michael@0 32 bool
michael@0 33 jit::SplitCriticalEdges(MIRGraph &graph)
michael@0 34 {
michael@0 35 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
michael@0 36 if (block->numSuccessors() < 2)
michael@0 37 continue;
michael@0 38 for (size_t i = 0; i < block->numSuccessors(); i++) {
michael@0 39 MBasicBlock *target = block->getSuccessor(i);
michael@0 40 if (target->numPredecessors() < 2)
michael@0 41 continue;
michael@0 42
michael@0 43 // Create a new block inheriting from the predecessor.
michael@0 44 MBasicBlock *split = MBasicBlock::NewSplitEdge(graph, block->info(), *block);
michael@0 45 if (!split)
michael@0 46 return false;
michael@0 47 split->setLoopDepth(block->loopDepth());
michael@0 48 graph.insertBlockAfter(*block, split);
michael@0 49 split->end(MGoto::New(graph.alloc(), target));
michael@0 50
michael@0 51 block->replaceSuccessor(i, split);
michael@0 52 target->replacePredecessor(*block, split);
michael@0 53 }
michael@0 54 }
michael@0 55 return true;
michael@0 56 }
michael@0 57
michael@0 58 // Operands to a resume point which are dead at the point of the resume can be
michael@0 59 // replaced with undefined values. This analysis supports limited detection of
michael@0 60 // dead operands, pruning those which are defined in the resume point's basic
michael@0 61 // block and have no uses outside the block or at points later than the resume
michael@0 62 // point.
michael@0 63 //
michael@0 64 // This is intended to ensure that extra resume points within a basic block
michael@0 65 // will not artificially extend the lifetimes of any SSA values. This could
michael@0 66 // otherwise occur if the new resume point captured a value which is created
michael@0 67 // between the old and new resume point and is dead at the new resume point.
michael@0 68 bool
michael@0 69 jit::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
michael@0 70 {
michael@0 71 // If we are compiling try blocks, locals and arguments may be observable
michael@0 72 // from catch or finally blocks (which Ion does not compile). For now just
michael@0 73 // disable the pass in this case.
michael@0 74 if (graph.hasTryBlock())
michael@0 75 return true;
michael@0 76
michael@0 77 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
michael@0 78 if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
michael@0 79 return false;
michael@0 80
michael@0 81 // The logic below can get confused on infinite loops.
michael@0 82 if (block->isLoopHeader() && block->backedge() == *block)
michael@0 83 continue;
michael@0 84
michael@0 85 for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
michael@0 86 // No benefit to replacing constant operands with other constants.
michael@0 87 if (ins->isConstant())
michael@0 88 continue;
michael@0 89
michael@0 90 // Scanning uses does not give us sufficient information to tell
michael@0 91 // where instructions that are involved in box/unbox operations or
michael@0 92 // parameter passing might be live. Rewriting uses of these terms
michael@0 93 // in resume points may affect the interpreter's behavior. Rather
michael@0 94 // than doing a more sophisticated analysis, just ignore these.
michael@0 95 if (ins->isUnbox() || ins->isParameter() || ins->isTypeBarrier() || ins->isComputeThis())
michael@0 96 continue;
michael@0 97
michael@0 98 // If the instruction's behavior has been constant folded into a
michael@0 99 // separate instruction, we can't determine precisely where the
michael@0 100 // instruction becomes dead and can't eliminate its uses.
michael@0 101 if (ins->isImplicitlyUsed())
michael@0 102 continue;
michael@0 103
michael@0 104 // Check if this instruction's result is only used within the
michael@0 105 // current block, and keep track of its last use in a definition
michael@0 106 // (not resume point). This requires the instructions in the block
michael@0 107 // to be numbered, ensured by running this immediately after alias
michael@0 108 // analysis.
michael@0 109 uint32_t maxDefinition = 0;
michael@0 110 for (MUseDefIterator uses(*ins); uses; uses++) {
michael@0 111 if (uses.def()->block() != *block ||
michael@0 112 uses.def()->isBox() ||
michael@0 113 uses.def()->isPhi())
michael@0 114 {
michael@0 115 maxDefinition = UINT32_MAX;
michael@0 116 break;
michael@0 117 }
michael@0 118 maxDefinition = Max(maxDefinition, uses.def()->id());
michael@0 119 }
michael@0 120 if (maxDefinition == UINT32_MAX)
michael@0 121 continue;
michael@0 122
michael@0 123 // Walk the uses a second time, removing any in resume points after
michael@0 124 // the last use in a definition.
michael@0 125 for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
michael@0 126 if (uses->consumer()->isDefinition()) {
michael@0 127 uses++;
michael@0 128 continue;
michael@0 129 }
michael@0 130 MResumePoint *mrp = uses->consumer()->toResumePoint();
michael@0 131 if (mrp->block() != *block ||
michael@0 132 !mrp->instruction() ||
michael@0 133 mrp->instruction() == *ins ||
michael@0 134 mrp->instruction()->id() <= maxDefinition)
michael@0 135 {
michael@0 136 uses++;
michael@0 137 continue;
michael@0 138 }
michael@0 139
michael@0 140 // The operand is an uneliminable slot. This currently
michael@0 141 // includes argument slots in non-strict scripts (due to being
michael@0 142 // observable via Function.arguments).
michael@0 143 if (!block->info().canOptimizeOutSlot(uses->index())) {
michael@0 144 uses++;
michael@0 145 continue;
michael@0 146 }
michael@0 147
michael@0 148 // Store an optimized out magic value in place of all dead
michael@0 149 // resume point operands. Making any such substitution can in
michael@0 150 // general alter the interpreter's behavior, even though the
michael@0 151 // code is dead, as the interpreter will still execute opcodes
michael@0 152 // whose effects cannot be observed. If the undefined value
michael@0 153 // were to flow to, say, a dead property access the
michael@0 154 // interpreter could throw an exception; we avoid this problem
michael@0 155 // by removing dead operands before removing dead code.
michael@0 156 MConstant *constant = MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
michael@0 157 block->insertBefore(*(block->begin()), constant);
michael@0 158 uses = mrp->replaceOperand(uses, constant);
michael@0 159 }
michael@0 160 }
michael@0 161 }
michael@0 162
michael@0 163 return true;
michael@0 164 }
michael@0 165
michael@0 166 // Instructions are useless if they are unused and have no side effects.
michael@0 167 // This pass eliminates useless instructions.
michael@0 168 // The graph itself is unchanged.
michael@0 169 bool
michael@0 170 jit::EliminateDeadCode(MIRGenerator *mir, MIRGraph &graph)
michael@0 171 {
michael@0 172 // Traverse in postorder so that we hit uses before definitions.
michael@0 173 // Traverse instruction list backwards for the same reason.
michael@0 174 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
michael@0 175 if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
michael@0 176 return false;
michael@0 177
michael@0 178 // Remove unused instructions.
michael@0 179 for (MInstructionReverseIterator inst = block->rbegin(); inst != block->rend(); ) {
michael@0 180 if (!inst->isEffectful() && !inst->resumePoint() &&
michael@0 181 !inst->hasUses() && !inst->isGuard() &&
michael@0 182 !inst->isControlInstruction()) {
michael@0 183 inst = block->discardAt(inst);
michael@0 184 } else {
michael@0 185 inst++;
michael@0 186 }
michael@0 187 }
michael@0 188 }
michael@0 189
michael@0 190 return true;
michael@0 191 }
michael@0 192
michael@0 193 static inline bool
michael@0 194 IsPhiObservable(MPhi *phi, Observability observe)
michael@0 195 {
michael@0 196 // If the phi has uses which are not reflected in SSA, then behavior in the
michael@0 197 // interpreter may be affected by removing the phi.
michael@0 198 if (phi->isImplicitlyUsed())
michael@0 199 return true;
michael@0 200
michael@0 201 // Check for uses of this phi node outside of other phi nodes.
michael@0 202 // Note that, initially, we skip reading resume points, which we
michael@0 203 // don't count as actual uses. If the only uses are resume points,
michael@0 204 // then the SSA name is never consumed by the program. However,
michael@0 205 // after optimizations have been performed, it's possible that the
michael@0 206 // actual uses in the program have been (incorrectly) optimized
michael@0 207 // away, so we must be more conservative and consider resume
michael@0 208 // points as well.
michael@0 209 switch (observe) {
michael@0 210 case AggressiveObservability:
michael@0 211 for (MUseDefIterator iter(phi); iter; iter++) {
michael@0 212 if (!iter.def()->isPhi())
michael@0 213 return true;
michael@0 214 }
michael@0 215 break;
michael@0 216
michael@0 217 case ConservativeObservability:
michael@0 218 for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
michael@0 219 if (!iter->consumer()->isDefinition() ||
michael@0 220 !iter->consumer()->toDefinition()->isPhi())
michael@0 221 return true;
michael@0 222 }
michael@0 223 break;
michael@0 224 }
michael@0 225
michael@0 226 uint32_t slot = phi->slot();
michael@0 227 CompileInfo &info = phi->block()->info();
michael@0 228 JSFunction *fun = info.funMaybeLazy();
michael@0 229
michael@0 230 // If the Phi is of the |this| value, it must always be observable.
michael@0 231 if (fun && slot == info.thisSlot())
michael@0 232 return true;
michael@0 233
michael@0 234 // If the function may need an arguments object, then make sure to
michael@0 235 // preserve the scope chain, because it may be needed to construct the
michael@0 236 // arguments object during bailout. If we've already created an arguments
michael@0 237 // object (or got one via OSR), preserve that as well.
michael@0 238 if (fun && info.hasArguments() &&
michael@0 239 (slot == info.scopeChainSlot() || slot == info.argsObjSlot()))
michael@0 240 {
michael@0 241 return true;
michael@0 242 }
michael@0 243
michael@0 244 // The Phi is an uneliminable slot. Currently this includes argument slots
michael@0 245 // in non-strict scripts (due to being observable via Function.arguments).
michael@0 246 if (fun && !info.canOptimizeOutSlot(slot))
michael@0 247 return true;
michael@0 248
michael@0 249 return false;
michael@0 250 }
michael@0 251
michael@0 252 // Handles cases like:
michael@0 253 // x is phi(a, x) --> a
michael@0 254 // x is phi(a, a) --> a
michael@0 255 static inline MDefinition *
michael@0 256 IsPhiRedundant(MPhi *phi)
michael@0 257 {
michael@0 258 MDefinition *first = phi->operandIfRedundant();
michael@0 259 if (first == nullptr)
michael@0 260 return nullptr;
michael@0 261
michael@0 262 // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
michael@0 263 if (phi->isImplicitlyUsed())
michael@0 264 first->setImplicitlyUsedUnchecked();
michael@0 265
michael@0 266 return first;
michael@0 267 }
michael@0 268
michael@0 269 bool
michael@0 270 jit::EliminatePhis(MIRGenerator *mir, MIRGraph &graph,
michael@0 271 Observability observe)
michael@0 272 {
michael@0 273 // Eliminates redundant or unobservable phis from the graph. A
michael@0 274 // redundant phi is something like b = phi(a, a) or b = phi(a, b),
michael@0 275 // both of which can be replaced with a. An unobservable phi is
michael@0 276 // one that whose value is never used in the program.
michael@0 277 //
michael@0 278 // Note that we must be careful not to eliminate phis representing
michael@0 279 // values that the interpreter will require later. When the graph
michael@0 280 // is first constructed, we can be more aggressive, because there
michael@0 281 // is a greater correspondence between the CFG and the bytecode.
michael@0 282 // After optimizations such as GVN have been performed, however,
michael@0 283 // the bytecode and CFG may not correspond as closely to one
michael@0 284 // another. In that case, we must be more conservative. The flag
michael@0 285 // |conservativeObservability| is used to indicate that eliminate
michael@0 286 // phis is being run after some optimizations have been performed,
michael@0 287 // and thus we should use more conservative rules about
michael@0 288 // observability. The particular danger is that we can optimize
michael@0 289 // away uses of a phi because we think they are not executable,
michael@0 290 // but the foundation for that assumption is false TI information
michael@0 291 // that will eventually be invalidated. Therefore, if
michael@0 292 // |conservativeObservability| is set, we will consider any use
michael@0 293 // from a resume point to be observable. Otherwise, we demand a
michael@0 294 // use from an actual instruction.
michael@0 295
michael@0 296 Vector<MPhi *, 16, SystemAllocPolicy> worklist;
michael@0 297
michael@0 298 // Add all observable phis to a worklist. We use the "in worklist" bit to
michael@0 299 // mean "this phi is live".
michael@0 300 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
michael@0 301 if (mir->shouldCancel("Eliminate Phis (populate loop)"))
michael@0 302 return false;
michael@0 303
michael@0 304 MPhiIterator iter = block->phisBegin();
michael@0 305 while (iter != block->phisEnd()) {
michael@0 306 // Flag all as unused, only observable phis would be marked as used
michael@0 307 // when processed by the work list.
michael@0 308 iter->setUnused();
michael@0 309
michael@0 310 // If the phi is redundant, remove it here.
michael@0 311 if (MDefinition *redundant = IsPhiRedundant(*iter)) {
michael@0 312 iter->replaceAllUsesWith(redundant);
michael@0 313 iter = block->discardPhiAt(iter);
michael@0 314 continue;
michael@0 315 }
michael@0 316
michael@0 317 // Enqueue observable Phis.
michael@0 318 if (IsPhiObservable(*iter, observe)) {
michael@0 319 iter->setInWorklist();
michael@0 320 if (!worklist.append(*iter))
michael@0 321 return false;
michael@0 322 }
michael@0 323 iter++;
michael@0 324 }
michael@0 325 }
michael@0 326
michael@0 327 // Iteratively mark all phis reachable from live phis.
michael@0 328 while (!worklist.empty()) {
michael@0 329 if (mir->shouldCancel("Eliminate Phis (worklist)"))
michael@0 330 return false;
michael@0 331
michael@0 332 MPhi *phi = worklist.popCopy();
michael@0 333 JS_ASSERT(phi->isUnused());
michael@0 334 phi->setNotInWorklist();
michael@0 335
michael@0 336 // The removal of Phis can produce newly redundant phis.
michael@0 337 if (MDefinition *redundant = IsPhiRedundant(phi)) {
michael@0 338 // Add to the worklist the used phis which are impacted.
michael@0 339 for (MUseDefIterator it(phi); it; it++) {
michael@0 340 if (it.def()->isPhi()) {
michael@0 341 MPhi *use = it.def()->toPhi();
michael@0 342 if (!use->isUnused()) {
michael@0 343 use->setUnusedUnchecked();
michael@0 344 use->setInWorklist();
michael@0 345 if (!worklist.append(use))
michael@0 346 return false;
michael@0 347 }
michael@0 348 }
michael@0 349 }
michael@0 350 phi->replaceAllUsesWith(redundant);
michael@0 351 } else {
michael@0 352 // Otherwise flag them as used.
michael@0 353 phi->setNotUnused();
michael@0 354 }
michael@0 355
michael@0 356 // The current phi is/was used, so all its operands are used.
michael@0 357 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
michael@0 358 MDefinition *in = phi->getOperand(i);
michael@0 359 if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
michael@0 360 continue;
michael@0 361 in->setInWorklist();
michael@0 362 if (!worklist.append(in->toPhi()))
michael@0 363 return false;
michael@0 364 }
michael@0 365 }
michael@0 366
michael@0 367 // Sweep dead phis.
michael@0 368 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
michael@0 369 MPhiIterator iter = block->phisBegin();
michael@0 370 while (iter != block->phisEnd()) {
michael@0 371 if (iter->isUnused())
michael@0 372 iter = block->discardPhiAt(iter);
michael@0 373 else
michael@0 374 iter++;
michael@0 375 }
michael@0 376 }
michael@0 377
michael@0 378 return true;
michael@0 379 }
michael@0 380
michael@0 381 namespace {
michael@0 382
michael@0 383 // The type analysis algorithm inserts conversions and box/unbox instructions
michael@0 384 // to make the IR graph well-typed for future passes.
michael@0 385 //
michael@0 386 // Phi adjustment: If a phi's inputs are all the same type, the phi is
michael@0 387 // specialized to return that type.
michael@0 388 //
michael@0 389 // Input adjustment: Each input is asked to apply conversion operations to its
michael@0 390 // inputs. This may include Box, Unbox, or other instruction-specific type
michael@0 391 // conversion operations.
michael@0 392 //
michael@0 393 class TypeAnalyzer
michael@0 394 {
michael@0 395 MIRGenerator *mir;
michael@0 396 MIRGraph &graph;
michael@0 397 Vector<MPhi *, 0, SystemAllocPolicy> phiWorklist_;
michael@0 398
michael@0 399 TempAllocator &alloc() const {
michael@0 400 return graph.alloc();
michael@0 401 }
michael@0 402
michael@0 403 bool addPhiToWorklist(MPhi *phi) {
michael@0 404 if (phi->isInWorklist())
michael@0 405 return true;
michael@0 406 if (!phiWorklist_.append(phi))
michael@0 407 return false;
michael@0 408 phi->setInWorklist();
michael@0 409 return true;
michael@0 410 }
michael@0 411 MPhi *popPhi() {
michael@0 412 MPhi *phi = phiWorklist_.popCopy();
michael@0 413 phi->setNotInWorklist();
michael@0 414 return phi;
michael@0 415 }
michael@0 416
michael@0 417 bool respecialize(MPhi *phi, MIRType type);
michael@0 418 bool propagateSpecialization(MPhi *phi);
michael@0 419 bool specializePhis();
michael@0 420 void replaceRedundantPhi(MPhi *phi);
michael@0 421 void adjustPhiInputs(MPhi *phi);
michael@0 422 bool adjustInputs(MDefinition *def);
michael@0 423 bool insertConversions();
michael@0 424
michael@0 425 bool checkFloatCoherency();
michael@0 426 bool graphContainsFloat32();
michael@0 427 bool markPhiConsumers();
michael@0 428 bool markPhiProducers();
michael@0 429 bool specializeValidFloatOps();
michael@0 430 bool tryEmitFloatOperations();
michael@0 431
michael@0 432 public:
michael@0 433 TypeAnalyzer(MIRGenerator *mir, MIRGraph &graph)
michael@0 434 : mir(mir), graph(graph)
michael@0 435 { }
michael@0 436
michael@0 437 bool analyze();
michael@0 438 };
michael@0 439
michael@0 440 } /* anonymous namespace */
michael@0 441
michael@0 442 // Try to specialize this phi based on its non-cyclic inputs.
michael@0 443 static MIRType
michael@0 444 GuessPhiType(MPhi *phi, bool *hasInputsWithEmptyTypes)
michael@0 445 {
michael@0 446 #ifdef DEBUG
michael@0 447 // Check that different magic constants aren't flowing together. Ignore
michael@0 448 // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
michael@0 449 // away.
michael@0 450 MIRType magicType = MIRType_None;
michael@0 451 for (size_t i = 0; i < phi->numOperands(); i++) {
michael@0 452 MDefinition *in = phi->getOperand(i);
michael@0 453 if (in->type() == MIRType_MagicOptimizedArguments ||
michael@0 454 in->type() == MIRType_MagicHole ||
michael@0 455 in->type() == MIRType_MagicIsConstructing)
michael@0 456 {
michael@0 457 if (magicType == MIRType_None)
michael@0 458 magicType = in->type();
michael@0 459 MOZ_ASSERT(magicType == in->type());
michael@0 460 }
michael@0 461 }
michael@0 462 #endif
michael@0 463
michael@0 464 *hasInputsWithEmptyTypes = false;
michael@0 465
michael@0 466 MIRType type = MIRType_None;
michael@0 467 bool convertibleToFloat32 = false;
michael@0 468 bool hasPhiInputs = false;
michael@0 469 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
michael@0 470 MDefinition *in = phi->getOperand(i);
michael@0 471 if (in->isPhi()) {
michael@0 472 hasPhiInputs = true;
michael@0 473 if (!in->toPhi()->triedToSpecialize())
michael@0 474 continue;
michael@0 475 if (in->type() == MIRType_None) {
michael@0 476 // The operand is a phi we tried to specialize, but we were
michael@0 477 // unable to guess its type. propagateSpecialization will
michael@0 478 // propagate the type to this phi when it becomes known.
michael@0 479 continue;
michael@0 480 }
michael@0 481 }
michael@0 482
michael@0 483 // Ignore operands which we've never observed.
michael@0 484 if (in->resultTypeSet() && in->resultTypeSet()->empty()) {
michael@0 485 *hasInputsWithEmptyTypes = true;
michael@0 486 continue;
michael@0 487 }
michael@0 488
michael@0 489 if (type == MIRType_None) {
michael@0 490 type = in->type();
michael@0 491 if (in->canProduceFloat32())
michael@0 492 convertibleToFloat32 = true;
michael@0 493 continue;
michael@0 494 }
michael@0 495 if (type != in->type()) {
michael@0 496 if (convertibleToFloat32 && in->type() == MIRType_Float32) {
michael@0 497 // If we only saw definitions that can be converted into Float32 before and
michael@0 498 // encounter a Float32 value, promote previous values to Float32
michael@0 499 type = MIRType_Float32;
michael@0 500 } else if (IsNumberType(type) && IsNumberType(in->type())) {
michael@0 501 // Specialize phis with int32 and double operands as double.
michael@0 502 type = MIRType_Double;
michael@0 503 convertibleToFloat32 &= in->canProduceFloat32();
michael@0 504 } else {
michael@0 505 return MIRType_Value;
michael@0 506 }
michael@0 507 }
michael@0 508 }
michael@0 509
michael@0 510 if (type == MIRType_None && !hasPhiInputs) {
michael@0 511 // All inputs are non-phis with empty typesets. Use MIRType_Value
michael@0 512 // in this case, as it's impossible to get better type information.
michael@0 513 JS_ASSERT(*hasInputsWithEmptyTypes);
michael@0 514 type = MIRType_Value;
michael@0 515 }
michael@0 516
michael@0 517 return type;
michael@0 518 }
michael@0 519
michael@0 520 bool
michael@0 521 TypeAnalyzer::respecialize(MPhi *phi, MIRType type)
michael@0 522 {
michael@0 523 if (phi->type() == type)
michael@0 524 return true;
michael@0 525 phi->specialize(type);
michael@0 526 return addPhiToWorklist(phi);
michael@0 527 }
michael@0 528
michael@0 529 bool
michael@0 530 TypeAnalyzer::propagateSpecialization(MPhi *phi)
michael@0 531 {
michael@0 532 JS_ASSERT(phi->type() != MIRType_None);
michael@0 533
michael@0 534 // Verify that this specialization matches any phis depending on it.
michael@0 535 for (MUseDefIterator iter(phi); iter; iter++) {
michael@0 536 if (!iter.def()->isPhi())
michael@0 537 continue;
michael@0 538 MPhi *use = iter.def()->toPhi();
michael@0 539 if (!use->triedToSpecialize())
michael@0 540 continue;
michael@0 541 if (use->type() == MIRType_None) {
michael@0 542 // We tried to specialize this phi, but were unable to guess its
michael@0 543 // type. Now that we know the type of one of its operands, we can
michael@0 544 // specialize it.
michael@0 545 if (!respecialize(use, phi->type()))
michael@0 546 return false;
michael@0 547 continue;
michael@0 548 }
michael@0 549 if (use->type() != phi->type()) {
michael@0 550 // Specialize phis with int32 that can be converted to float and float operands as floats.
michael@0 551 if ((use->type() == MIRType_Int32 && use->canProduceFloat32() && phi->type() == MIRType_Float32) ||
michael@0 552 (phi->type() == MIRType_Int32 && phi->canProduceFloat32() && use->type() == MIRType_Float32))
michael@0 553 {
michael@0 554 if (!respecialize(use, MIRType_Float32))
michael@0 555 return false;
michael@0 556 continue;
michael@0 557 }
michael@0 558
michael@0 559 // Specialize phis with int32 and double operands as double.
michael@0 560 if (IsNumberType(use->type()) && IsNumberType(phi->type())) {
michael@0 561 if (!respecialize(use, MIRType_Double))
michael@0 562 return false;
michael@0 563 continue;
michael@0 564 }
michael@0 565
michael@0 566 // This phi in our use chain can now no longer be specialized.
michael@0 567 if (!respecialize(use, MIRType_Value))
michael@0 568 return false;
michael@0 569 }
michael@0 570 }
michael@0 571
michael@0 572 return true;
michael@0 573 }
michael@0 574
michael@0 575 bool
michael@0 576 TypeAnalyzer::specializePhis()
michael@0 577 {
michael@0 578 Vector<MPhi *, 0, SystemAllocPolicy> phisWithEmptyInputTypes;
michael@0 579
michael@0 580 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
michael@0 581 if (mir->shouldCancel("Specialize Phis (main loop)"))
michael@0 582 return false;
michael@0 583
michael@0 584 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
michael@0 585 bool hasInputsWithEmptyTypes;
michael@0 586 MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes);
michael@0 587 phi->specialize(type);
michael@0 588 if (type == MIRType_None) {
michael@0 589 // We tried to guess the type but failed because all operands are
michael@0 590 // phis we still have to visit. Set the triedToSpecialize flag but
michael@0 591 // don't propagate the type to other phis, propagateSpecialization
michael@0 592 // will do that once we know the type of one of the operands.
michael@0 593
michael@0 594 // Edge case: when this phi has a non-phi input with an empty
michael@0 595 // typeset, it's possible for two phis to have a cyclic
michael@0 596 // dependency and they will both have MIRType_None. Specialize
michael@0 597 // such phis to MIRType_Value later on.
michael@0 598 if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi))
michael@0 599 return false;
michael@0 600 continue;
michael@0 601 }
michael@0 602 if (!propagateSpecialization(*phi))
michael@0 603 return false;
michael@0 604 }
michael@0 605 }
michael@0 606
michael@0 607 do {
michael@0 608 while (!phiWorklist_.empty()) {
michael@0 609 if (mir->shouldCancel("Specialize Phis (worklist)"))
michael@0 610 return false;
michael@0 611
michael@0 612 MPhi *phi = popPhi();
michael@0 613 if (!propagateSpecialization(phi))
michael@0 614 return false;
michael@0 615 }
michael@0 616
michael@0 617 // When two phis have a cyclic dependency and inputs that have an empty
michael@0 618 // typeset (which are ignored by GuessPhiType), we may still have to
michael@0 619 // specialize these to MIRType_Value.
michael@0 620 while (!phisWithEmptyInputTypes.empty()) {
michael@0 621 if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)"))
michael@0 622 return false;
michael@0 623
michael@0 624 MPhi *phi = phisWithEmptyInputTypes.popCopy();
michael@0 625 if (phi->type() == MIRType_None) {
michael@0 626 phi->specialize(MIRType_Value);
michael@0 627 if (!propagateSpecialization(phi))
michael@0 628 return false;
michael@0 629 }
michael@0 630 }
michael@0 631 } while (!phiWorklist_.empty());
michael@0 632
michael@0 633 return true;
michael@0 634 }
michael@0 635
michael@0 636 void
michael@0 637 TypeAnalyzer::adjustPhiInputs(MPhi *phi)
michael@0 638 {
michael@0 639 MIRType phiType = phi->type();
michael@0 640 JS_ASSERT(phiType != MIRType_None);
michael@0 641
michael@0 642 // If we specialized a type that's not Value, there are 3 cases:
michael@0 643 // 1. Every input is of that type.
michael@0 644 // 2. Every observed input is of that type (i.e., some inputs haven't been executed yet).
michael@0 645 // 3. Inputs were doubles and int32s, and was specialized to double.
michael@0 646 if (phiType != MIRType_Value) {
michael@0 647 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
michael@0 648 MDefinition *in = phi->getOperand(i);
michael@0 649 if (in->type() == phiType)
michael@0 650 continue;
michael@0 651
michael@0 652 if (in->isBox() && in->toBox()->input()->type() == phiType) {
michael@0 653 phi->replaceOperand(i, in->toBox()->input());
michael@0 654 } else {
michael@0 655 MInstruction *replacement;
michael@0 656
michael@0 657 if (phiType == MIRType_Double && IsFloatType(in->type())) {
michael@0 658 // Convert int32 operands to double.
michael@0 659 replacement = MToDouble::New(alloc(), in);
michael@0 660 } else if (phiType == MIRType_Float32) {
michael@0 661 if (in->type() == MIRType_Int32 || in->type() == MIRType_Double) {
michael@0 662 replacement = MToFloat32::New(alloc(), in);
michael@0 663 } else {
michael@0 664 // See comment below
michael@0 665 if (in->type() != MIRType_Value) {
michael@0 666 MBox *box = MBox::New(alloc(), in);
michael@0 667 in->block()->insertBefore(in->block()->lastIns(), box);
michael@0 668 in = box;
michael@0 669 }
michael@0 670
michael@0 671 MUnbox *unbox = MUnbox::New(alloc(), in, MIRType_Double, MUnbox::Fallible);
michael@0 672 in->block()->insertBefore(in->block()->lastIns(), unbox);
michael@0 673 replacement = MToFloat32::New(alloc(), in);
michael@0 674 }
michael@0 675 } else {
michael@0 676 // If we know this branch will fail to convert to phiType,
michael@0 677 // insert a box that'll immediately fail in the fallible unbox
michael@0 678 // below.
michael@0 679 if (in->type() != MIRType_Value) {
michael@0 680 MBox *box = MBox::New(alloc(), in);
michael@0 681 in->block()->insertBefore(in->block()->lastIns(), box);
michael@0 682 in = box;
michael@0 683 }
michael@0 684
michael@0 685 // Be optimistic and insert unboxes when the operand is a
michael@0 686 // value.
michael@0 687 replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
michael@0 688 }
michael@0 689
michael@0 690 in->block()->insertBefore(in->block()->lastIns(), replacement);
michael@0 691 phi->replaceOperand(i, replacement);
michael@0 692 }
michael@0 693 }
michael@0 694
michael@0 695 return;
michael@0 696 }
michael@0 697
michael@0 698 // Box every typed input.
michael@0 699 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
michael@0 700 MDefinition *in = phi->getOperand(i);
michael@0 701 if (in->type() == MIRType_Value)
michael@0 702 continue;
michael@0 703
michael@0 704 if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input())) {
michael@0 705 // The input is being explicitly unboxed, so sneak past and grab
michael@0 706 // the original box.
michael@0 707 phi->replaceOperand(i, in->toUnbox()->input());
michael@0 708 } else {
michael@0 709 MDefinition *box = BoxInputsPolicy::alwaysBoxAt(alloc(), in->block()->lastIns(), in);
michael@0 710 phi->replaceOperand(i, box);
michael@0 711 }
michael@0 712 }
michael@0 713 }
michael@0 714
michael@0 715 bool
michael@0 716 TypeAnalyzer::adjustInputs(MDefinition *def)
michael@0 717 {
michael@0 718 TypePolicy *policy = def->typePolicy();
michael@0 719 if (policy && !policy->adjustInputs(alloc(), def->toInstruction()))
michael@0 720 return false;
michael@0 721 return true;
michael@0 722 }
michael@0 723
michael@0 724 void
michael@0 725 TypeAnalyzer::replaceRedundantPhi(MPhi *phi)
michael@0 726 {
michael@0 727 MBasicBlock *block = phi->block();
michael@0 728 js::Value v;
michael@0 729 switch (phi->type()) {
michael@0 730 case MIRType_Undefined:
michael@0 731 v = UndefinedValue();
michael@0 732 break;
michael@0 733 case MIRType_Null:
michael@0 734 v = NullValue();
michael@0 735 break;
michael@0 736 case MIRType_MagicOptimizedArguments:
michael@0 737 v = MagicValue(JS_OPTIMIZED_ARGUMENTS);
michael@0 738 break;
michael@0 739 case MIRType_MagicOptimizedOut:
michael@0 740 v = MagicValue(JS_OPTIMIZED_OUT);
michael@0 741 break;
michael@0 742 default:
michael@0 743 MOZ_ASSUME_UNREACHABLE("unexpected type");
michael@0 744 }
michael@0 745 MConstant *c = MConstant::New(alloc(), v);
michael@0 746 // The instruction pass will insert the box
michael@0 747 block->insertBefore(*(block->begin()), c);
michael@0 748 phi->replaceAllUsesWith(c);
michael@0 749 }
michael@0 750
michael@0 751 bool
michael@0 752 TypeAnalyzer::insertConversions()
michael@0 753 {
michael@0 754 // Instructions are processed in reverse postorder: all uses are defs are
michael@0 755 // seen before uses. This ensures that output adjustment (which may rewrite
michael@0 756 // inputs of uses) does not conflict with input adjustment.
michael@0 757 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
michael@0 758 if (mir->shouldCancel("Insert Conversions"))
michael@0 759 return false;
michael@0 760
michael@0 761 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd();) {
michael@0 762 if (phi->type() == MIRType_Undefined ||
michael@0 763 phi->type() == MIRType_Null ||
michael@0 764 phi->type() == MIRType_MagicOptimizedArguments ||
michael@0 765 phi->type() == MIRType_MagicOptimizedOut)
michael@0 766 {
michael@0 767 replaceRedundantPhi(*phi);
michael@0 768 phi = block->discardPhiAt(phi);
michael@0 769 } else {
michael@0 770 adjustPhiInputs(*phi);
michael@0 771 phi++;
michael@0 772 }
michael@0 773 }
michael@0 774 for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
michael@0 775 if (!adjustInputs(*iter))
michael@0 776 return false;
michael@0 777 }
michael@0 778 }
michael@0 779 return true;
michael@0 780 }
michael@0 781
michael@0 782 // This function tries to emit Float32 specialized operations whenever it's possible.
michael@0 783 // MIR nodes are flagged as:
michael@0 784 // - Producers, when they can create Float32 that might need to be coerced into a Double.
michael@0 785 // Loads in Float32 arrays and conversions to Float32 are producers.
michael@0 786 // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
michael@0 787 // Stores in Float32 arrays and conversions to Float32 are consumers.
michael@0 788 // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
michael@0 789 // does not result in a compound loss of precision. This is the case for +, -, /, * with 2
michael@0 790 // operands, for instance. However, an addition with 3 operands is not commutative anymore,
michael@0 791 // so an intermediate coercion is needed.
michael@0 792 // Except for phis, all these flags are known after Ion building, so they cannot change during
michael@0 793 // the process.
michael@0 794 //
michael@0 795 // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
michael@0 796 // has only producers as inputs and consumers as uses, we can specialize the operation as a
michael@0 797 // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
michael@0 798 // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
michael@0 799 //
michael@0 800 // Phis have a special status. Phis need to be flagged as producers or consumers as they can
michael@0 801 // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
michael@0 802 // properties are such that we can deduce the property using all non phis inputs first (which form
michael@0 803 // an initial phi graph) and then propagate all properties from one phi to another using a
michael@0 804 // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
michael@0 805 // many flagged phis as the previous iteration (so the worst steady state case is all phis being
michael@0 806 // flagged as false).
michael@0 807 //
michael@0 808 // In a nutshell, the algorithm applies three passes:
michael@0 809 // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
michael@0 810 // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
michael@0 811 // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
michael@0 812 // consumer if all of its uses are consumers.
michael@0 813 // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
michael@0 814 // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
michael@0 815 // 3 - Go through all commutative operations and ensure their inputs are all producers and their
michael@0 816 // uses are all consumers.
michael@0 817 bool
michael@0 818 TypeAnalyzer::markPhiConsumers()
michael@0 819 {
michael@0 820 JS_ASSERT(phiWorklist_.empty());
michael@0 821
michael@0 822 // Iterate in postorder so worklist is initialized to RPO.
michael@0 823 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); ++block) {
michael@0 824 if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Initial state"))
michael@0 825 return false;
michael@0 826
michael@0 827 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
michael@0 828 JS_ASSERT(!phi->isInWorklist());
michael@0 829 bool canConsumeFloat32 = true;
michael@0 830 for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
michael@0 831 MDefinition *usedef = use.def();
michael@0 832 canConsumeFloat32 &= usedef->isPhi() || usedef->canConsumeFloat32(use.use());
michael@0 833 }
michael@0 834 phi->setCanConsumeFloat32(canConsumeFloat32);
michael@0 835 if (canConsumeFloat32 && !addPhiToWorklist(*phi))
michael@0 836 return false;
michael@0 837 }
michael@0 838 }
michael@0 839
michael@0 840 while (!phiWorklist_.empty()) {
michael@0 841 if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Fixed point"))
michael@0 842 return false;
michael@0 843
michael@0 844 MPhi *phi = popPhi();
michael@0 845 JS_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
michael@0 846
michael@0 847 bool validConsumer = true;
michael@0 848 for (MUseDefIterator use(phi); use; use++) {
michael@0 849 MDefinition *def = use.def();
michael@0 850 if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
michael@0 851 validConsumer = false;
michael@0 852 break;
michael@0 853 }
michael@0 854 }
michael@0 855
michael@0 856 if (validConsumer)
michael@0 857 continue;
michael@0 858
michael@0 859 // Propagate invalidated phis
michael@0 860 phi->setCanConsumeFloat32(false);
michael@0 861 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
michael@0 862 MDefinition *input = phi->getOperand(i);
michael@0 863 if (input->isPhi() && !input->isInWorklist() && input->canConsumeFloat32(nullptr /* unused */))
michael@0 864 {
michael@0 865 if (!addPhiToWorklist(input->toPhi()))
michael@0 866 return false;
michael@0 867 }
michael@0 868 }
michael@0 869 }
michael@0 870 return true;
michael@0 871 }
michael@0 872
michael@0 873 bool
michael@0 874 TypeAnalyzer::markPhiProducers()
michael@0 875 {
michael@0 876 JS_ASSERT(phiWorklist_.empty());
michael@0 877
michael@0 878 // Iterate in reverse postorder so worklist is initialized to PO.
michael@0 879 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
michael@0 880 if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - initial state"))
michael@0 881 return false;
michael@0 882
michael@0 883 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
michael@0 884 JS_ASSERT(!phi->isInWorklist());
michael@0 885 bool canProduceFloat32 = true;
michael@0 886 for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e; ++i) {
michael@0 887 MDefinition *input = phi->getOperand(i);
michael@0 888 canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
michael@0 889 }
michael@0 890 phi->setCanProduceFloat32(canProduceFloat32);
michael@0 891 if (canProduceFloat32 && !addPhiToWorklist(*phi))
michael@0 892 return false;
michael@0 893 }
michael@0 894 }
michael@0 895
michael@0 896 while (!phiWorklist_.empty()) {
michael@0 897 if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - Fixed point"))
michael@0 898 return false;
michael@0 899
michael@0 900 MPhi *phi = popPhi();
michael@0 901 JS_ASSERT(phi->canProduceFloat32());
michael@0 902
michael@0 903 bool validProducer = true;
michael@0 904 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
michael@0 905 MDefinition *input = phi->getOperand(i);
michael@0 906 if (input->isPhi() && !input->canProduceFloat32()) {
michael@0 907 validProducer = false;
michael@0 908 break;
michael@0 909 }
michael@0 910 }
michael@0 911
michael@0 912 if (validProducer)
michael@0 913 continue;
michael@0 914
michael@0 915 // Propagate invalidated phis
michael@0 916 phi->setCanProduceFloat32(false);
michael@0 917 for (MUseDefIterator use(phi); use; use++) {
michael@0 918 MDefinition *def = use.def();
michael@0 919 if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32())
michael@0 920 {
michael@0 921 if (!addPhiToWorklist(def->toPhi()))
michael@0 922 return false;
michael@0 923 }
michael@0 924 }
michael@0 925 }
michael@0 926 return true;
michael@0 927 }
michael@0 928
michael@0 929 bool
michael@0 930 TypeAnalyzer::specializeValidFloatOps()
michael@0 931 {
michael@0 932 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
michael@0 933 if (mir->shouldCancel("Ensure Float32 commutativity - Instructions"))
michael@0 934 return false;
michael@0 935
michael@0 936 for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
michael@0 937 if (!ins->isFloat32Commutative())
michael@0 938 continue;
michael@0 939
michael@0 940 if (ins->type() == MIRType_Float32)
michael@0 941 continue;
michael@0 942
michael@0 943 // This call will try to specialize the instruction iff all uses are consumers and
michael@0 944 // all inputs are producers.
michael@0 945 ins->trySpecializeFloat32(alloc());
michael@0 946 }
michael@0 947 }
michael@0 948 return true;
michael@0 949 }
michael@0 950
michael@0 951 bool
michael@0 952 TypeAnalyzer::graphContainsFloat32()
michael@0 953 {
michael@0 954 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
michael@0 955 if (mir->shouldCancel("Ensure Float32 commutativity - Graph contains Float32"))
michael@0 956 return false;
michael@0 957
michael@0 958 for (MDefinitionIterator def(*block); def; def++) {
michael@0 959 if (def->type() == MIRType_Float32)
michael@0 960 return true;
michael@0 961 }
michael@0 962 }
michael@0 963 return false;
michael@0 964 }
michael@0 965
michael@0 966 bool
michael@0 967 TypeAnalyzer::tryEmitFloatOperations()
michael@0 968 {
michael@0 969 // Backends that currently don't know how to generate Float32 specialized instructions
michael@0 970 // shouldn't run this pass and just let all instructions as specialized for Double.
michael@0 971 if (!LIRGenerator::allowFloat32Optimizations())
michael@0 972 return true;
michael@0 973
michael@0 974 // Asm.js uses the ahead of time type checks to specialize operations, no need to check
michael@0 975 // them again at this point.
michael@0 976 if (mir->compilingAsmJS())
michael@0 977 return true;
michael@0 978
michael@0 979 // Check ahead of time that there is at least one definition typed as Float32, otherwise we
michael@0 980 // don't need this pass.
michael@0 981 if (!graphContainsFloat32())
michael@0 982 return true;
michael@0 983
michael@0 984 if (!markPhiConsumers())
michael@0 985 return false;
michael@0 986 if (!markPhiProducers())
michael@0 987 return false;
michael@0 988 if (!specializeValidFloatOps())
michael@0 989 return false;
michael@0 990 return true;
michael@0 991 }
michael@0 992
michael@0 993 bool
michael@0 994 TypeAnalyzer::checkFloatCoherency()
michael@0 995 {
michael@0 996 #ifdef DEBUG
michael@0 997 // Asserts that all Float32 instructions are flowing into Float32 consumers or specialized
michael@0 998 // operations
michael@0 999 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
michael@0 1000 if (mir->shouldCancel("Check Float32 coherency"))
michael@0 1001 return false;
michael@0 1002
michael@0 1003 for (MDefinitionIterator def(*block); def; def++) {
michael@0 1004 if (def->type() != MIRType_Float32)
michael@0 1005 continue;
michael@0 1006
michael@0 1007 for (MUseDefIterator use(*def); use; use++) {
michael@0 1008 MDefinition *consumer = use.def();
michael@0 1009 JS_ASSERT(consumer->isConsistentFloat32Use(use.use()));
michael@0 1010 }
michael@0 1011 }
michael@0 1012 }
michael@0 1013 #endif
michael@0 1014 return true;
michael@0 1015 }
michael@0 1016
michael@0 1017 bool
michael@0 1018 TypeAnalyzer::analyze()
michael@0 1019 {
michael@0 1020 if (!tryEmitFloatOperations())
michael@0 1021 return false;
michael@0 1022 if (!specializePhis())
michael@0 1023 return false;
michael@0 1024 if (!insertConversions())
michael@0 1025 return false;
michael@0 1026 if (!checkFloatCoherency())
michael@0 1027 return false;
michael@0 1028 return true;
michael@0 1029 }
michael@0 1030
michael@0 1031 bool
michael@0 1032 jit::ApplyTypeInformation(MIRGenerator *mir, MIRGraph &graph)
michael@0 1033 {
michael@0 1034 TypeAnalyzer analyzer(mir, graph);
michael@0 1035
michael@0 1036 if (!analyzer.analyze())
michael@0 1037 return false;
michael@0 1038
michael@0 1039 return true;
michael@0 1040 }
michael@0 1041
michael@0 1042 bool
michael@0 1043 jit::MakeMRegExpHoistable(MIRGraph &graph)
michael@0 1044 {
michael@0 1045 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
michael@0 1046 for (MDefinitionIterator iter(*block); iter; iter++) {
michael@0 1047 if (!iter->isRegExp())
michael@0 1048 continue;
michael@0 1049
michael@0 1050 MRegExp *regexp = iter->toRegExp();
michael@0 1051
michael@0 1052 // Test if MRegExp is hoistable by looking at all uses.
michael@0 1053 bool hoistable = true;
michael@0 1054 for (MUseIterator i = regexp->usesBegin(); i != regexp->usesEnd(); i++) {
michael@0 1055 // Ignore resume points. At this point all uses are listed.
michael@0 1056 // No DCE or GVN or something has happened.
michael@0 1057 if (i->consumer()->isResumePoint())
michael@0 1058 continue;
michael@0 1059
michael@0 1060 JS_ASSERT(i->consumer()->isDefinition());
michael@0 1061
michael@0 1062 // All MRegExp* MIR's don't adjust the regexp.
michael@0 1063 MDefinition *use = i->consumer()->toDefinition();
michael@0 1064 if (use->isRegExpReplace())
michael@0 1065 continue;
michael@0 1066 if (use->isRegExpExec())
michael@0 1067 continue;
michael@0 1068 if (use->isRegExpTest())
michael@0 1069 continue;
michael@0 1070
michael@0 1071 hoistable = false;
michael@0 1072 break;
michael@0 1073 }
michael@0 1074
michael@0 1075 if (!hoistable)
michael@0 1076 continue;
michael@0 1077
michael@0 1078 // Make MRegExp hoistable
michael@0 1079 regexp->setMovable();
michael@0 1080
michael@0 1081 // That would be incorrect for global/sticky, because lastIndex could be wrong.
michael@0 1082 // Therefore setting the lastIndex to 0. That is faster than a not movable regexp.
michael@0 1083 RegExpObject *source = regexp->source();
michael@0 1084 if (source->sticky() || source->global()) {
michael@0 1085 JS_ASSERT(regexp->mustClone());
michael@0 1086 MConstant *zero = MConstant::New(graph.alloc(), Int32Value(0));
michael@0 1087 regexp->block()->insertAfter(regexp, zero);
michael@0 1088
michael@0 1089 MStoreFixedSlot *lastIndex =
michael@0 1090 MStoreFixedSlot::New(graph.alloc(), regexp, RegExpObject::lastIndexSlot(), zero);
michael@0 1091 regexp->block()->insertAfter(zero, lastIndex);
michael@0 1092 }
michael@0 1093 }
michael@0 1094 }
michael@0 1095
michael@0 1096 return true;
michael@0 1097 }
michael@0 1098
michael@0 1099 bool
michael@0 1100 jit::RenumberBlocks(MIRGraph &graph)
michael@0 1101 {
michael@0 1102 size_t id = 0;
michael@0 1103 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
michael@0 1104 block->setId(id++);
michael@0 1105
michael@0 1106 return true;
michael@0 1107 }
michael@0 1108
michael@0 1109 // A Simple, Fast Dominance Algorithm by Cooper et al.
michael@0 1110 // Modified to support empty intersections for OSR, and in RPO.
michael@0 1111 static MBasicBlock *
michael@0 1112 IntersectDominators(MBasicBlock *block1, MBasicBlock *block2)
michael@0 1113 {
michael@0 1114 MBasicBlock *finger1 = block1;
michael@0 1115 MBasicBlock *finger2 = block2;
michael@0 1116
michael@0 1117 JS_ASSERT(finger1);
michael@0 1118 JS_ASSERT(finger2);
michael@0 1119
michael@0 1120 // In the original paper, the block ID comparisons are on the postorder index.
michael@0 1121 // This implementation iterates in RPO, so the comparisons are reversed.
michael@0 1122
michael@0 1123 // For this function to be called, the block must have multiple predecessors.
michael@0 1124 // If a finger is then found to be self-dominating, it must therefore be
michael@0 1125 // reachable from multiple roots through non-intersecting control flow.
michael@0 1126 // nullptr is returned in this case, to denote an empty intersection.
michael@0 1127
michael@0 1128 while (finger1->id() != finger2->id()) {
michael@0 1129 while (finger1->id() > finger2->id()) {
michael@0 1130 MBasicBlock *idom = finger1->immediateDominator();
michael@0 1131 if (idom == finger1)
michael@0 1132 return nullptr; // Empty intersection.
michael@0 1133 finger1 = idom;
michael@0 1134 }
michael@0 1135
michael@0 1136 while (finger2->id() > finger1->id()) {
michael@0 1137 MBasicBlock *idom = finger2->immediateDominator();
michael@0 1138 if (idom == finger2)
michael@0 1139 return nullptr; // Empty intersection.
michael@0 1140 finger2 = idom;
michael@0 1141 }
michael@0 1142 }
michael@0 1143 return finger1;
michael@0 1144 }
michael@0 1145
michael@0 1146 static void
michael@0 1147 ComputeImmediateDominators(MIRGraph &graph)
michael@0 1148 {
michael@0 1149 // The default start block is a root and therefore only self-dominates.
michael@0 1150 MBasicBlock *startBlock = *graph.begin();
michael@0 1151 startBlock->setImmediateDominator(startBlock);
michael@0 1152
michael@0 1153 // Any OSR block is a root and therefore only self-dominates.
michael@0 1154 MBasicBlock *osrBlock = graph.osrBlock();
michael@0 1155 if (osrBlock)
michael@0 1156 osrBlock->setImmediateDominator(osrBlock);
michael@0 1157
michael@0 1158 bool changed = true;
michael@0 1159
michael@0 1160 while (changed) {
michael@0 1161 changed = false;
michael@0 1162
michael@0 1163 ReversePostorderIterator block = graph.rpoBegin();
michael@0 1164
michael@0 1165 // For each block in RPO, intersect all dominators.
michael@0 1166 for (; block != graph.rpoEnd(); block++) {
michael@0 1167 // If a node has once been found to have no exclusive dominator,
michael@0 1168 // it will never have an exclusive dominator, so it may be skipped.
michael@0 1169 if (block->immediateDominator() == *block)
michael@0 1170 continue;
michael@0 1171
michael@0 1172 MBasicBlock *newIdom = block->getPredecessor(0);
michael@0 1173
michael@0 1174 // Find the first common dominator.
michael@0 1175 for (size_t i = 1; i < block->numPredecessors(); i++) {
michael@0 1176 MBasicBlock *pred = block->getPredecessor(i);
michael@0 1177 if (pred->immediateDominator() == nullptr)
michael@0 1178 continue;
michael@0 1179
michael@0 1180 newIdom = IntersectDominators(pred, newIdom);
michael@0 1181
michael@0 1182 // If there is no common dominator, the block self-dominates.
michael@0 1183 if (newIdom == nullptr) {
michael@0 1184 block->setImmediateDominator(*block);
michael@0 1185 changed = true;
michael@0 1186 break;
michael@0 1187 }
michael@0 1188 }
michael@0 1189
michael@0 1190 if (newIdom && block->immediateDominator() != newIdom) {
michael@0 1191 block->setImmediateDominator(newIdom);
michael@0 1192 changed = true;
michael@0 1193 }
michael@0 1194 }
michael@0 1195 }
michael@0 1196
michael@0 1197 #ifdef DEBUG
michael@0 1198 // Assert that all blocks have dominator information.
michael@0 1199 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
michael@0 1200 JS_ASSERT(block->immediateDominator() != nullptr);
michael@0 1201 }
michael@0 1202 #endif
michael@0 1203 }
michael@0 1204
michael@0 1205 bool
michael@0 1206 jit::BuildDominatorTree(MIRGraph &graph)
michael@0 1207 {
michael@0 1208 ComputeImmediateDominators(graph);
michael@0 1209
michael@0 1210 // Traversing through the graph in post-order means that every use
michael@0 1211 // of a definition is visited before the def itself. Since a def
michael@0 1212 // dominates its uses, by the time we reach a particular
michael@0 1213 // block, we have processed all of its dominated children, so
michael@0 1214 // block->numDominated() is accurate.
michael@0 1215 for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
michael@0 1216 MBasicBlock *child = *i;
michael@0 1217 MBasicBlock *parent = child->immediateDominator();
michael@0 1218
michael@0 1219 // If the block only self-dominates, it has no definite parent.
michael@0 1220 if (child == parent)
michael@0 1221 continue;
michael@0 1222
michael@0 1223 if (!parent->addImmediatelyDominatedBlock(child))
michael@0 1224 return false;
michael@0 1225
michael@0 1226 // An additional +1 for the child block.
michael@0 1227 parent->addNumDominated(child->numDominated() + 1);
michael@0 1228 }
michael@0 1229
michael@0 1230 #ifdef DEBUG
michael@0 1231 // If compiling with OSR, many blocks will self-dominate.
michael@0 1232 // Without OSR, there is only one root block which dominates all.
michael@0 1233 if (!graph.osrBlock())
michael@0 1234 JS_ASSERT(graph.begin()->numDominated() == graph.numBlocks() - 1);
michael@0 1235 #endif
michael@0 1236 // Now, iterate through the dominator tree and annotate every
michael@0 1237 // block with its index in the pre-order traversal of the
michael@0 1238 // dominator tree.
michael@0 1239 Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(graph.alloc());
michael@0 1240
michael@0 1241 // The index of the current block in the CFG traversal.
michael@0 1242 size_t index = 0;
michael@0 1243
michael@0 1244 // Add all self-dominating blocks to the worklist.
michael@0 1245 // This includes all roots. Order does not matter.
michael@0 1246 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
michael@0 1247 MBasicBlock *block = *i;
michael@0 1248 if (block->immediateDominator() == block) {
michael@0 1249 if (!worklist.append(block))
michael@0 1250 return false;
michael@0 1251 }
michael@0 1252 }
michael@0 1253 // Starting from each self-dominating block, traverse the CFG in pre-order.
michael@0 1254 while (!worklist.empty()) {
michael@0 1255 MBasicBlock *block = worklist.popCopy();
michael@0 1256 block->setDomIndex(index);
michael@0 1257
michael@0 1258 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
michael@0 1259 block->immediatelyDominatedBlocksEnd())) {
michael@0 1260 return false;
michael@0 1261 }
michael@0 1262 index++;
michael@0 1263 }
michael@0 1264
michael@0 1265 return true;
michael@0 1266 }
michael@0 1267
michael@0 1268 bool
michael@0 1269 jit::BuildPhiReverseMapping(MIRGraph &graph)
michael@0 1270 {
michael@0 1271 // Build a mapping such that given a basic block, whose successor has one or
michael@0 1272 // more phis, we can find our specific input to that phi. To make this fast
michael@0 1273 // mapping work we rely on a specific property of our structured control
michael@0 1274 // flow graph: For a block with phis, its predecessors each have only one
michael@0 1275 // successor with phis. Consider each case:
michael@0 1276 // * Blocks with less than two predecessors cannot have phis.
michael@0 1277 // * Breaks. A break always has exactly one successor, and the break
michael@0 1278 // catch block has exactly one predecessor for each break, as
michael@0 1279 // well as a final predecessor for the actual loop exit.
michael@0 1280 // * Continues. A continue always has exactly one successor, and the
michael@0 1281 // continue catch block has exactly one predecessor for each
michael@0 1282 // continue, as well as a final predecessor for the actual
michael@0 1283 // loop continuation. The continue itself has exactly one
michael@0 1284 // successor.
michael@0 1285 // * An if. Each branch as exactly one predecessor.
michael@0 1286 // * A switch. Each branch has exactly one predecessor.
michael@0 1287 // * Loop tail. A new block is always created for the exit, and if a
michael@0 1288 // break statement is present, the exit block will forward
michael@0 1289 // directly to the break block.
michael@0 1290 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
michael@0 1291 if (block->numPredecessors() < 2) {
michael@0 1292 JS_ASSERT(block->phisEmpty());
michael@0 1293 continue;
michael@0 1294 }
michael@0 1295
michael@0 1296 // Assert on the above.
michael@0 1297 for (size_t j = 0; j < block->numPredecessors(); j++) {
michael@0 1298 MBasicBlock *pred = block->getPredecessor(j);
michael@0 1299
michael@0 1300 #ifdef DEBUG
michael@0 1301 size_t numSuccessorsWithPhis = 0;
michael@0 1302 for (size_t k = 0; k < pred->numSuccessors(); k++) {
michael@0 1303 MBasicBlock *successor = pred->getSuccessor(k);
michael@0 1304 if (!successor->phisEmpty())
michael@0 1305 numSuccessorsWithPhis++;
michael@0 1306 }
michael@0 1307 JS_ASSERT(numSuccessorsWithPhis <= 1);
michael@0 1308 #endif
michael@0 1309
michael@0 1310 pred->setSuccessorWithPhis(*block, j);
michael@0 1311 }
michael@0 1312 }
michael@0 1313
michael@0 1314 return true;
michael@0 1315 }
michael@0 1316
michael@0 1317 #ifdef DEBUG
michael@0 1318 static bool
michael@0 1319 CheckSuccessorImpliesPredecessor(MBasicBlock *A, MBasicBlock *B)
michael@0 1320 {
michael@0 1321 // Assuming B = succ(A), verify A = pred(B).
michael@0 1322 for (size_t i = 0; i < B->numPredecessors(); i++) {
michael@0 1323 if (A == B->getPredecessor(i))
michael@0 1324 return true;
michael@0 1325 }
michael@0 1326 return false;
michael@0 1327 }
michael@0 1328
michael@0 1329 static bool
michael@0 1330 CheckPredecessorImpliesSuccessor(MBasicBlock *A, MBasicBlock *B)
michael@0 1331 {
michael@0 1332 // Assuming B = pred(A), verify A = succ(B).
michael@0 1333 for (size_t i = 0; i < B->numSuccessors(); i++) {
michael@0 1334 if (A == B->getSuccessor(i))
michael@0 1335 return true;
michael@0 1336 }
michael@0 1337 return false;
michael@0 1338 }
michael@0 1339
michael@0 1340 static bool
michael@0 1341 CheckOperandImpliesUse(MNode *n, MDefinition *operand)
michael@0 1342 {
michael@0 1343 for (MUseIterator i = operand->usesBegin(); i != operand->usesEnd(); i++) {
michael@0 1344 if (i->consumer() == n)
michael@0 1345 return true;
michael@0 1346 }
michael@0 1347 return false;
michael@0 1348 }
michael@0 1349
michael@0 1350 static bool
michael@0 1351 CheckUseImpliesOperand(MDefinition *def, MUse *use)
michael@0 1352 {
michael@0 1353 return use->consumer()->getOperand(use->index()) == def;
michael@0 1354 }
michael@0 1355 #endif // DEBUG
michael@0 1356
michael@0 1357 void
michael@0 1358 jit::AssertBasicGraphCoherency(MIRGraph &graph)
michael@0 1359 {
michael@0 1360 #ifdef DEBUG
michael@0 1361 JS_ASSERT(graph.entryBlock()->numPredecessors() == 0);
michael@0 1362 JS_ASSERT(graph.entryBlock()->phisEmpty());
michael@0 1363 JS_ASSERT(!graph.entryBlock()->unreachable());
michael@0 1364
michael@0 1365 if (MBasicBlock *osrBlock = graph.osrBlock()) {
michael@0 1366 JS_ASSERT(osrBlock->numPredecessors() == 0);
michael@0 1367 JS_ASSERT(osrBlock->phisEmpty());
michael@0 1368 JS_ASSERT(osrBlock != graph.entryBlock());
michael@0 1369 JS_ASSERT(!osrBlock->unreachable());
michael@0 1370 }
michael@0 1371
michael@0 1372 if (MResumePoint *resumePoint = graph.entryResumePoint())
michael@0 1373 JS_ASSERT(resumePoint->block() == graph.entryBlock());
michael@0 1374
michael@0 1375 // Assert successor and predecessor list coherency.
michael@0 1376 uint32_t count = 0;
michael@0 1377 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
michael@0 1378 count++;
michael@0 1379
michael@0 1380 JS_ASSERT(&block->graph() == &graph);
michael@0 1381
michael@0 1382 for (size_t i = 0; i < block->numSuccessors(); i++)
michael@0 1383 JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
michael@0 1384
michael@0 1385 for (size_t i = 0; i < block->numPredecessors(); i++)
michael@0 1386 JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
michael@0 1387
michael@0 1388 // Assert that use chains are valid for this instruction.
michael@0 1389 for (MDefinitionIterator iter(*block); iter; iter++) {
michael@0 1390 for (uint32_t i = 0, e = iter->numOperands(); i < e; i++)
michael@0 1391 JS_ASSERT(CheckOperandImpliesUse(*iter, iter->getOperand(i)));
michael@0 1392 }
michael@0 1393 for (MResumePointIterator iter(block->resumePointsBegin()); iter != block->resumePointsEnd(); iter++) {
michael@0 1394 for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) {
michael@0 1395 if (iter->getUseFor(i)->hasProducer())
michael@0 1396 JS_ASSERT(CheckOperandImpliesUse(*iter, iter->getOperand(i)));
michael@0 1397 }
michael@0 1398 }
michael@0 1399 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
michael@0 1400 JS_ASSERT(phi->numOperands() == block->numPredecessors());
michael@0 1401 }
michael@0 1402 for (MDefinitionIterator iter(*block); iter; iter++) {
michael@0 1403 JS_ASSERT(iter->block() == *block);
michael@0 1404 for (MUseIterator i(iter->usesBegin()); i != iter->usesEnd(); i++)
michael@0 1405 JS_ASSERT(CheckUseImpliesOperand(*iter, *i));
michael@0 1406
michael@0 1407 if (iter->isInstruction()) {
michael@0 1408 if (MResumePoint *resume = iter->toInstruction()->resumePoint()) {
michael@0 1409 if (MInstruction *ins = resume->instruction())
michael@0 1410 JS_ASSERT(ins->block() == iter->block());
michael@0 1411 }
michael@0 1412 }
michael@0 1413 }
michael@0 1414 }
michael@0 1415
michael@0 1416 JS_ASSERT(graph.numBlocks() == count);
michael@0 1417 #endif
michael@0 1418 }
michael@0 1419
michael@0 1420 #ifdef DEBUG
michael@0 1421 static void
michael@0 1422 AssertReversePostOrder(MIRGraph &graph)
michael@0 1423 {
michael@0 1424 // Check that every block is visited after all its predecessors (except backedges).
michael@0 1425 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
michael@0 1426 JS_ASSERT(!block->isMarked());
michael@0 1427
michael@0 1428 for (size_t i = 0; i < block->numPredecessors(); i++) {
michael@0 1429 MBasicBlock *pred = block->getPredecessor(i);
michael@0 1430 JS_ASSERT_IF(!pred->isLoopBackedge(), pred->isMarked());
michael@0 1431 }
michael@0 1432
michael@0 1433 block->mark();
michael@0 1434 }
michael@0 1435
michael@0 1436 graph.unmarkBlocks();
michael@0 1437 }
michael@0 1438 #endif
michael@0 1439
michael@0 1440 void
michael@0 1441 jit::AssertGraphCoherency(MIRGraph &graph)
michael@0 1442 {
michael@0 1443 #ifdef DEBUG
michael@0 1444 if (!js_JitOptions.checkGraphConsistency)
michael@0 1445 return;
michael@0 1446 AssertBasicGraphCoherency(graph);
michael@0 1447 AssertReversePostOrder(graph);
michael@0 1448 #endif
michael@0 1449 }
michael@0 1450
michael@0 1451 void
michael@0 1452 jit::AssertExtendedGraphCoherency(MIRGraph &graph)
michael@0 1453 {
michael@0 1454 // Checks the basic GraphCoherency but also other conditions that
michael@0 1455 // do not hold immediately (such as the fact that critical edges
michael@0 1456 // are split)
michael@0 1457
michael@0 1458 #ifdef DEBUG
michael@0 1459 if (!js_JitOptions.checkGraphConsistency)
michael@0 1460 return;
michael@0 1461 AssertGraphCoherency(graph);
michael@0 1462
michael@0 1463 uint32_t idx = 0;
michael@0 1464 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
michael@0 1465 JS_ASSERT(block->id() == idx++);
michael@0 1466
michael@0 1467 // No critical edges:
michael@0 1468 if (block->numSuccessors() > 1)
michael@0 1469 for (size_t i = 0; i < block->numSuccessors(); i++)
michael@0 1470 JS_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
michael@0 1471
michael@0 1472 if (block->isLoopHeader()) {
michael@0 1473 JS_ASSERT(block->numPredecessors() == 2);
michael@0 1474 MBasicBlock *backedge = block->getPredecessor(1);
michael@0 1475 JS_ASSERT(backedge->id() >= block->id());
michael@0 1476 JS_ASSERT(backedge->numSuccessors() == 1);
michael@0 1477 JS_ASSERT(backedge->getSuccessor(0) == *block);
michael@0 1478 }
michael@0 1479
michael@0 1480 if (!block->phisEmpty()) {
michael@0 1481 for (size_t i = 0; i < block->numPredecessors(); i++) {
michael@0 1482 MBasicBlock *pred = block->getPredecessor(i);
michael@0 1483 JS_ASSERT(pred->successorWithPhis() == *block);
michael@0 1484 JS_ASSERT(pred->positionInPhiSuccessor() == i);
michael@0 1485 }
michael@0 1486 }
michael@0 1487
michael@0 1488 uint32_t successorWithPhis = 0;
michael@0 1489 for (size_t i = 0; i < block->numSuccessors(); i++)
michael@0 1490 if (!block->getSuccessor(i)->phisEmpty())
michael@0 1491 successorWithPhis++;
michael@0 1492
michael@0 1493 JS_ASSERT(successorWithPhis <= 1);
michael@0 1494 JS_ASSERT_IF(successorWithPhis, block->successorWithPhis() != nullptr);
michael@0 1495
michael@0 1496 // I'd like to assert this, but it's not necc. true. Sometimes we set this
michael@0 1497 // flag to non-nullptr just because a successor has multiple preds, even if it
michael@0 1498 // does not actually have any phis.
michael@0 1499 //
michael@0 1500 // JS_ASSERT_IF(!successorWithPhis, block->successorWithPhis() == nullptr);
michael@0 1501 }
michael@0 1502 #endif
michael@0 1503 }
michael@0 1504
michael@0 1505
michael@0 1506 struct BoundsCheckInfo
michael@0 1507 {
michael@0 1508 MBoundsCheck *check;
michael@0 1509 uint32_t validUntil;
michael@0 1510 };
michael@0 1511
michael@0 1512 typedef HashMap<uint32_t,
michael@0 1513 BoundsCheckInfo,
michael@0 1514 DefaultHasher<uint32_t>,
michael@0 1515 IonAllocPolicy> BoundsCheckMap;
michael@0 1516
michael@0 1517 // Compute a hash for bounds checks which ignores constant offsets in the index.
michael@0 1518 static HashNumber
michael@0 1519 BoundsCheckHashIgnoreOffset(MBoundsCheck *check)
michael@0 1520 {
michael@0 1521 SimpleLinearSum indexSum = ExtractLinearSum(check->index());
michael@0 1522 uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
michael@0 1523 uintptr_t length = uintptr_t(check->length());
michael@0 1524 return index ^ length;
michael@0 1525 }
michael@0 1526
michael@0 1527 static MBoundsCheck *
michael@0 1528 FindDominatingBoundsCheck(BoundsCheckMap &checks, MBoundsCheck *check, size_t index)
michael@0 1529 {
michael@0 1530 // See the comment in ValueNumberer::findDominatingDef.
michael@0 1531 HashNumber hash = BoundsCheckHashIgnoreOffset(check);
michael@0 1532 BoundsCheckMap::Ptr p = checks.lookup(hash);
michael@0 1533 if (!p || index > p->value().validUntil) {
michael@0 1534 // We didn't find a dominating bounds check.
michael@0 1535 BoundsCheckInfo info;
michael@0 1536 info.check = check;
michael@0 1537 info.validUntil = index + check->block()->numDominated();
michael@0 1538
michael@0 1539 if(!checks.put(hash, info))
michael@0 1540 return nullptr;
michael@0 1541
michael@0 1542 return check;
michael@0 1543 }
michael@0 1544
michael@0 1545 return p->value().check;
michael@0 1546 }
michael@0 1547
michael@0 1548 // Extract a linear sum from ins, if possible (otherwise giving the sum 'ins + 0').
michael@0 1549 SimpleLinearSum
michael@0 1550 jit::ExtractLinearSum(MDefinition *ins)
michael@0 1551 {
michael@0 1552 if (ins->isBeta())
michael@0 1553 ins = ins->getOperand(0);
michael@0 1554
michael@0 1555 if (ins->type() != MIRType_Int32)
michael@0 1556 return SimpleLinearSum(ins, 0);
michael@0 1557
michael@0 1558 if (ins->isConstant()) {
michael@0 1559 const Value &v = ins->toConstant()->value();
michael@0 1560 JS_ASSERT(v.isInt32());
michael@0 1561 return SimpleLinearSum(nullptr, v.toInt32());
michael@0 1562 } else if (ins->isAdd() || ins->isSub()) {
michael@0 1563 MDefinition *lhs = ins->getOperand(0);
michael@0 1564 MDefinition *rhs = ins->getOperand(1);
michael@0 1565 if (lhs->type() == MIRType_Int32 && rhs->type() == MIRType_Int32) {
michael@0 1566 SimpleLinearSum lsum = ExtractLinearSum(lhs);
michael@0 1567 SimpleLinearSum rsum = ExtractLinearSum(rhs);
michael@0 1568
michael@0 1569 if (lsum.term && rsum.term)
michael@0 1570 return SimpleLinearSum(ins, 0);
michael@0 1571
michael@0 1572 // Check if this is of the form <SUM> + n, n + <SUM> or <SUM> - n.
michael@0 1573 if (ins->isAdd()) {
michael@0 1574 int32_t constant;
michael@0 1575 if (!SafeAdd(lsum.constant, rsum.constant, &constant))
michael@0 1576 return SimpleLinearSum(ins, 0);
michael@0 1577 return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
michael@0 1578 } else if (lsum.term) {
michael@0 1579 int32_t constant;
michael@0 1580 if (!SafeSub(lsum.constant, rsum.constant, &constant))
michael@0 1581 return SimpleLinearSum(ins, 0);
michael@0 1582 return SimpleLinearSum(lsum.term, constant);
michael@0 1583 }
michael@0 1584 }
michael@0 1585 }
michael@0 1586
michael@0 1587 return SimpleLinearSum(ins, 0);
michael@0 1588 }
michael@0 1589
michael@0 1590 // Extract a linear inequality holding when a boolean test goes in the
michael@0 1591 // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
michael@0 1592 bool
michael@0 1593 jit::ExtractLinearInequality(MTest *test, BranchDirection direction,
michael@0 1594 SimpleLinearSum *plhs, MDefinition **prhs, bool *plessEqual)
michael@0 1595 {
michael@0 1596 if (!test->getOperand(0)->isCompare())
michael@0 1597 return false;
michael@0 1598
michael@0 1599 MCompare *compare = test->getOperand(0)->toCompare();
michael@0 1600
michael@0 1601 MDefinition *lhs = compare->getOperand(0);
michael@0 1602 MDefinition *rhs = compare->getOperand(1);
michael@0 1603
michael@0 1604 // TODO: optimize Compare_UInt32
michael@0 1605 if (!compare->isInt32Comparison())
michael@0 1606 return false;
michael@0 1607
michael@0 1608 JS_ASSERT(lhs->type() == MIRType_Int32);
michael@0 1609 JS_ASSERT(rhs->type() == MIRType_Int32);
michael@0 1610
michael@0 1611 JSOp jsop = compare->jsop();
michael@0 1612 if (direction == FALSE_BRANCH)
michael@0 1613 jsop = NegateCompareOp(jsop);
michael@0 1614
michael@0 1615 SimpleLinearSum lsum = ExtractLinearSum(lhs);
michael@0 1616 SimpleLinearSum rsum = ExtractLinearSum(rhs);
michael@0 1617
michael@0 1618 if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant))
michael@0 1619 return false;
michael@0 1620
michael@0 1621 // Normalize operations to use <= or >=.
michael@0 1622 switch (jsop) {
michael@0 1623 case JSOP_LE:
michael@0 1624 *plessEqual = true;
michael@0 1625 break;
michael@0 1626 case JSOP_LT:
michael@0 1627 /* x < y ==> x + 1 <= y */
michael@0 1628 if (!SafeAdd(lsum.constant, 1, &lsum.constant))
michael@0 1629 return false;
michael@0 1630 *plessEqual = true;
michael@0 1631 break;
michael@0 1632 case JSOP_GE:
michael@0 1633 *plessEqual = false;
michael@0 1634 break;
michael@0 1635 case JSOP_GT:
michael@0 1636 /* x > y ==> x - 1 >= y */
michael@0 1637 if (!SafeSub(lsum.constant, 1, &lsum.constant))
michael@0 1638 return false;
michael@0 1639 *plessEqual = false;
michael@0 1640 break;
michael@0 1641 default:
michael@0 1642 return false;
michael@0 1643 }
michael@0 1644
michael@0 1645 *plhs = lsum;
michael@0 1646 *prhs = rsum.term;
michael@0 1647
michael@0 1648 return true;
michael@0 1649 }
michael@0 1650
michael@0 1651 static bool
michael@0 1652 TryEliminateBoundsCheck(BoundsCheckMap &checks, size_t blockIndex, MBoundsCheck *dominated, bool *eliminated)
michael@0 1653 {
michael@0 1654 JS_ASSERT(!*eliminated);
michael@0 1655
michael@0 1656 // Replace all uses of the bounds check with the actual index.
michael@0 1657 // This is (a) necessary, because we can coalesce two different
michael@0 1658 // bounds checks and would otherwise use the wrong index and
michael@0 1659 // (b) helps register allocation. Note that this is safe since
michael@0 1660 // no other pass after bounds check elimination moves instructions.
michael@0 1661 dominated->replaceAllUsesWith(dominated->index());
michael@0 1662
michael@0 1663 if (!dominated->isMovable())
michael@0 1664 return true;
michael@0 1665
michael@0 1666 MBoundsCheck *dominating = FindDominatingBoundsCheck(checks, dominated, blockIndex);
michael@0 1667 if (!dominating)
michael@0 1668 return false;
michael@0 1669
michael@0 1670 if (dominating == dominated) {
michael@0 1671 // We didn't find a dominating bounds check.
michael@0 1672 return true;
michael@0 1673 }
michael@0 1674
michael@0 1675 // We found two bounds checks with the same hash number, but we still have
michael@0 1676 // to make sure the lengths and index terms are equal.
michael@0 1677 if (dominating->length() != dominated->length())
michael@0 1678 return true;
michael@0 1679
michael@0 1680 SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
michael@0 1681 SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
michael@0 1682
michael@0 1683 // Both terms should be nullptr or the same definition.
michael@0 1684 if (sumA.term != sumB.term)
michael@0 1685 return true;
michael@0 1686
michael@0 1687 // This bounds check is redundant.
michael@0 1688 *eliminated = true;
michael@0 1689
michael@0 1690 // Normalize the ranges according to the constant offsets in the two indexes.
michael@0 1691 int32_t minimumA, maximumA, minimumB, maximumB;
michael@0 1692 if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
michael@0 1693 !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
michael@0 1694 !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
michael@0 1695 !SafeAdd(sumB.constant, dominated->maximum(), &maximumB))
michael@0 1696 {
michael@0 1697 return false;
michael@0 1698 }
michael@0 1699
michael@0 1700 // Update the dominating check to cover both ranges, denormalizing the
michael@0 1701 // result per the constant offset in the index.
michael@0 1702 int32_t newMinimum, newMaximum;
michael@0 1703 if (!SafeSub(Min(minimumA, minimumB), sumA.constant, &newMinimum) ||
michael@0 1704 !SafeSub(Max(maximumA, maximumB), sumA.constant, &newMaximum))
michael@0 1705 {
michael@0 1706 return false;
michael@0 1707 }
michael@0 1708
michael@0 1709 dominating->setMinimum(newMinimum);
michael@0 1710 dominating->setMaximum(newMaximum);
michael@0 1711 return true;
michael@0 1712 }
michael@0 1713
michael@0 1714 static void
michael@0 1715 TryEliminateTypeBarrierFromTest(MTypeBarrier *barrier, bool filtersNull, bool filtersUndefined,
michael@0 1716 MTest *test, BranchDirection direction, bool *eliminated)
michael@0 1717 {
michael@0 1718 JS_ASSERT(filtersNull || filtersUndefined);
michael@0 1719
michael@0 1720 // Watch for code patterns similar to 'if (x.f) { ... = x.f }'. If x.f
michael@0 1721 // is either an object or null/undefined, there will be a type barrier on
michael@0 1722 // the latter read as the null/undefined value is never realized there.
michael@0 1723 // The type barrier can be eliminated, however, by looking at tests
michael@0 1724 // performed on the result of the first operation that filter out all
michael@0 1725 // types that have been seen in the first access but not the second.
michael@0 1726
michael@0 1727 // A test 'if (x.f)' filters both null and undefined.
michael@0 1728
michael@0 1729 // Disregard the possible unbox added before the Typebarrier for checking.
michael@0 1730 MDefinition *input = barrier->input();
michael@0 1731 MUnbox *inputUnbox = nullptr;
michael@0 1732 if (input->isUnbox() && input->toUnbox()->mode() != MUnbox::Fallible) {
michael@0 1733 inputUnbox = input->toUnbox();
michael@0 1734 input = inputUnbox->input();
michael@0 1735 }
michael@0 1736
michael@0 1737 MDefinition *subject = nullptr;
michael@0 1738 bool removeUndefined;
michael@0 1739 bool removeNull;
michael@0 1740 test->filtersUndefinedOrNull(direction == TRUE_BRANCH, &subject, &removeUndefined, &removeNull);
michael@0 1741
michael@0 1742 // The Test doesn't filter undefined nor null.
michael@0 1743 if (!subject)
michael@0 1744 return;
michael@0 1745
michael@0 1746 // Make sure the subject equals the input to the TypeBarrier.
michael@0 1747 if (subject != input)
michael@0 1748 return;
michael@0 1749
michael@0 1750 // When the TypeBarrier filters undefined, the test must at least also do,
michael@0 1751 // this, before the TypeBarrier can get removed.
michael@0 1752 if (!removeUndefined && filtersUndefined)
michael@0 1753 return;
michael@0 1754
michael@0 1755 // When the TypeBarrier filters null, the test must at least also do,
michael@0 1756 // this, before the TypeBarrier can get removed.
michael@0 1757 if (!removeNull && filtersNull)
michael@0 1758 return;
michael@0 1759
michael@0 1760 // Eliminate the TypeBarrier. The possible TypeBarrier unboxing is kept,
michael@0 1761 // but made infallible.
michael@0 1762 *eliminated = true;
michael@0 1763 if (inputUnbox)
michael@0 1764 inputUnbox->makeInfallible();
michael@0 1765 barrier->replaceAllUsesWith(barrier->input());
michael@0 1766 }
michael@0 1767
michael@0 1768 static bool
michael@0 1769 TryEliminateTypeBarrier(MTypeBarrier *barrier, bool *eliminated)
michael@0 1770 {
michael@0 1771 JS_ASSERT(!*eliminated);
michael@0 1772
michael@0 1773 const types::TemporaryTypeSet *barrierTypes = barrier->resultTypeSet();
michael@0 1774 const types::TemporaryTypeSet *inputTypes = barrier->input()->resultTypeSet();
michael@0 1775
michael@0 1776 // Disregard the possible unbox added before the Typebarrier.
michael@0 1777 if (barrier->input()->isUnbox() && barrier->input()->toUnbox()->mode() != MUnbox::Fallible)
michael@0 1778 inputTypes = barrier->input()->toUnbox()->input()->resultTypeSet();
michael@0 1779
michael@0 1780 if (!barrierTypes || !inputTypes)
michael@0 1781 return true;
michael@0 1782
michael@0 1783 bool filtersNull = barrierTypes->filtersType(inputTypes, types::Type::NullType());
michael@0 1784 bool filtersUndefined = barrierTypes->filtersType(inputTypes, types::Type::UndefinedType());
michael@0 1785
michael@0 1786 if (!filtersNull && !filtersUndefined)
michael@0 1787 return true;
michael@0 1788
michael@0 1789 MBasicBlock *block = barrier->block();
michael@0 1790 while (true) {
michael@0 1791 BranchDirection direction;
michael@0 1792 MTest *test = block->immediateDominatorBranch(&direction);
michael@0 1793
michael@0 1794 if (test) {
michael@0 1795 TryEliminateTypeBarrierFromTest(barrier, filtersNull, filtersUndefined,
michael@0 1796 test, direction, eliminated);
michael@0 1797 }
michael@0 1798
michael@0 1799 MBasicBlock *previous = block->immediateDominator();
michael@0 1800 if (previous == block)
michael@0 1801 break;
michael@0 1802 block = previous;
michael@0 1803 }
michael@0 1804
michael@0 1805 return true;
michael@0 1806 }
michael@0 1807
michael@0 1808 // Eliminate checks which are redundant given each other or other instructions.
michael@0 1809 //
michael@0 1810 // A type barrier is considered redundant if all missing types have been tested
michael@0 1811 // for by earlier control instructions.
michael@0 1812 //
michael@0 1813 // A bounds check is considered redundant if it's dominated by another bounds
michael@0 1814 // check with the same length and the indexes differ by only a constant amount.
michael@0 1815 // In this case we eliminate the redundant bounds check and update the other one
michael@0 1816 // to cover the ranges of both checks.
michael@0 1817 //
michael@0 1818 // Bounds checks are added to a hash map and since the hash function ignores
michael@0 1819 // differences in constant offset, this offers a fast way to find redundant
michael@0 1820 // checks.
michael@0 1821 bool
michael@0 1822 jit::EliminateRedundantChecks(MIRGraph &graph)
michael@0 1823 {
michael@0 1824 BoundsCheckMap checks(graph.alloc());
michael@0 1825
michael@0 1826 if (!checks.init())
michael@0 1827 return false;
michael@0 1828
michael@0 1829 // Stack for pre-order CFG traversal.
michael@0 1830 Vector<MBasicBlock *, 1, IonAllocPolicy> worklist(graph.alloc());
michael@0 1831
michael@0 1832 // The index of the current block in the CFG traversal.
michael@0 1833 size_t index = 0;
michael@0 1834
michael@0 1835 // Add all self-dominating blocks to the worklist.
michael@0 1836 // This includes all roots. Order does not matter.
michael@0 1837 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
michael@0 1838 MBasicBlock *block = *i;
michael@0 1839 if (block->immediateDominator() == block) {
michael@0 1840 if (!worklist.append(block))
michael@0 1841 return false;
michael@0 1842 }
michael@0 1843 }
michael@0 1844
michael@0 1845 // Starting from each self-dominating block, traverse the CFG in pre-order.
michael@0 1846 while (!worklist.empty()) {
michael@0 1847 MBasicBlock *block = worklist.popCopy();
michael@0 1848
michael@0 1849 // Add all immediate dominators to the front of the worklist.
michael@0 1850 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
michael@0 1851 block->immediatelyDominatedBlocksEnd())) {
michael@0 1852 return false;
michael@0 1853 }
michael@0 1854
michael@0 1855 for (MDefinitionIterator iter(block); iter; ) {
michael@0 1856 bool eliminated = false;
michael@0 1857
michael@0 1858 if (iter->isBoundsCheck()) {
michael@0 1859 if (!TryEliminateBoundsCheck(checks, index, iter->toBoundsCheck(), &eliminated))
michael@0 1860 return false;
michael@0 1861 } else if (iter->isTypeBarrier()) {
michael@0 1862 if (!TryEliminateTypeBarrier(iter->toTypeBarrier(), &eliminated))
michael@0 1863 return false;
michael@0 1864 } else if (iter->isConvertElementsToDoubles()) {
michael@0 1865 // Now that code motion passes have finished, replace any
michael@0 1866 // ConvertElementsToDoubles with the actual elements.
michael@0 1867 MConvertElementsToDoubles *ins = iter->toConvertElementsToDoubles();
michael@0 1868 ins->replaceAllUsesWith(ins->elements());
michael@0 1869 }
michael@0 1870
michael@0 1871 if (eliminated)
michael@0 1872 iter = block->discardDefAt(iter);
michael@0 1873 else
michael@0 1874 iter++;
michael@0 1875 }
michael@0 1876 index++;
michael@0 1877 }
michael@0 1878
michael@0 1879 JS_ASSERT(index == graph.numBlocks());
michael@0 1880 return true;
michael@0 1881 }
michael@0 1882
michael@0 1883 // If the given block contains a goto and nothing interesting before that,
michael@0 1884 // return the goto. Return nullptr otherwise.
michael@0 1885 static LGoto *
michael@0 1886 FindLeadingGoto(LBlock *bb)
michael@0 1887 {
michael@0 1888 for (LInstructionIterator ins(bb->begin()); ins != bb->end(); ins++) {
michael@0 1889 // Ignore labels.
michael@0 1890 if (ins->isLabel())
michael@0 1891 continue;
michael@0 1892 // If we have a goto, we're good to go.
michael@0 1893 if (ins->isGoto())
michael@0 1894 return ins->toGoto();
michael@0 1895 break;
michael@0 1896 }
michael@0 1897 return nullptr;
michael@0 1898 }
michael@0 1899
michael@0 1900 // Eliminate blocks containing nothing interesting besides gotos. These are
michael@0 1901 // often created by optimizer, which splits all critical edges. If these
michael@0 1902 // splits end up being unused after optimization and register allocation,
michael@0 1903 // fold them back away to avoid unnecessary branching.
michael@0 1904 bool
michael@0 1905 jit::UnsplitEdges(LIRGraph *lir)
michael@0 1906 {
michael@0 1907 for (size_t i = 0; i < lir->numBlocks(); i++) {
michael@0 1908 LBlock *bb = lir->getBlock(i);
michael@0 1909 MBasicBlock *mirBlock = bb->mir();
michael@0 1910
michael@0 1911 // Renumber the MIR blocks as we go, since we may remove some.
michael@0 1912 mirBlock->setId(i);
michael@0 1913
michael@0 1914 // Register allocation is done by this point, so we don't need the phis
michael@0 1915 // anymore. Clear them to avoid needed to keep them current as we edit
michael@0 1916 // the CFG.
michael@0 1917 bb->clearPhis();
michael@0 1918 mirBlock->discardAllPhis();
michael@0 1919
michael@0 1920 // First make sure the MIR block looks sane. Some of these checks may be
michael@0 1921 // over-conservative, but we're attempting to keep everything in MIR
michael@0 1922 // current as we modify the LIR, so only proceed if the MIR is simple.
michael@0 1923 if (mirBlock->numPredecessors() == 0 || mirBlock->numSuccessors() != 1 ||
michael@0 1924 !mirBlock->begin()->isGoto())
michael@0 1925 {
michael@0 1926 continue;
michael@0 1927 }
michael@0 1928
michael@0 1929 // The MIR block is empty, but check the LIR block too (in case the
michael@0 1930 // register allocator inserted spill code there, or whatever).
michael@0 1931 LGoto *theGoto = FindLeadingGoto(bb);
michael@0 1932 if (!theGoto)
michael@0 1933 continue;
michael@0 1934 MBasicBlock *target = theGoto->target();
michael@0 1935 if (target == mirBlock || target != mirBlock->getSuccessor(0))
michael@0 1936 continue;
michael@0 1937
michael@0 1938 // If we haven't yet cleared the phis for the successor, do so now so
michael@0 1939 // that the CFG manipulation routines don't trip over them.
michael@0 1940 if (!target->phisEmpty()) {
michael@0 1941 target->discardAllPhis();
michael@0 1942 target->lir()->clearPhis();
michael@0 1943 }
michael@0 1944
michael@0 1945 // Edit the CFG to remove lir/mirBlock and reconnect all its edges.
michael@0 1946 for (size_t j = 0; j < mirBlock->numPredecessors(); j++) {
michael@0 1947 MBasicBlock *mirPred = mirBlock->getPredecessor(j);
michael@0 1948
michael@0 1949 for (size_t k = 0; k < mirPred->numSuccessors(); k++) {
michael@0 1950 if (mirPred->getSuccessor(k) == mirBlock) {
michael@0 1951 mirPred->replaceSuccessor(k, target);
michael@0 1952 if (!target->addPredecessorWithoutPhis(mirPred))
michael@0 1953 return false;
michael@0 1954 }
michael@0 1955 }
michael@0 1956
michael@0 1957 LInstruction *predTerm = *mirPred->lir()->rbegin();
michael@0 1958 for (size_t k = 0; k < predTerm->numSuccessors(); k++) {
michael@0 1959 if (predTerm->getSuccessor(k) == mirBlock)
michael@0 1960 predTerm->setSuccessor(k, target);
michael@0 1961 }
michael@0 1962 }
michael@0 1963 target->removePredecessor(mirBlock);
michael@0 1964
michael@0 1965 // Zap the block.
michael@0 1966 lir->removeBlock(i);
michael@0 1967 lir->mir().removeBlock(mirBlock);
michael@0 1968 --i;
michael@0 1969 }
michael@0 1970
michael@0 1971 return true;
michael@0 1972 }
michael@0 1973
michael@0 1974 bool
michael@0 1975 LinearSum::multiply(int32_t scale)
michael@0 1976 {
michael@0 1977 for (size_t i = 0; i < terms_.length(); i++) {
michael@0 1978 if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale))
michael@0 1979 return false;
michael@0 1980 }
michael@0 1981 return SafeMul(scale, constant_, &constant_);
michael@0 1982 }
michael@0 1983
michael@0 1984 bool
michael@0 1985 LinearSum::add(const LinearSum &other)
michael@0 1986 {
michael@0 1987 for (size_t i = 0; i < other.terms_.length(); i++) {
michael@0 1988 if (!add(other.terms_[i].term, other.terms_[i].scale))
michael@0 1989 return false;
michael@0 1990 }
michael@0 1991 return add(other.constant_);
michael@0 1992 }
michael@0 1993
michael@0 1994 bool
michael@0 1995 LinearSum::add(MDefinition *term, int32_t scale)
michael@0 1996 {
michael@0 1997 JS_ASSERT(term);
michael@0 1998
michael@0 1999 if (scale == 0)
michael@0 2000 return true;
michael@0 2001
michael@0 2002 if (term->isConstant()) {
michael@0 2003 int32_t constant = term->toConstant()->value().toInt32();
michael@0 2004 if (!SafeMul(constant, scale, &constant))
michael@0 2005 return false;
michael@0 2006 return add(constant);
michael@0 2007 }
michael@0 2008
michael@0 2009 for (size_t i = 0; i < terms_.length(); i++) {
michael@0 2010 if (term == terms_[i].term) {
michael@0 2011 if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale))
michael@0 2012 return false;
michael@0 2013 if (terms_[i].scale == 0) {
michael@0 2014 terms_[i] = terms_.back();
michael@0 2015 terms_.popBack();
michael@0 2016 }
michael@0 2017 return true;
michael@0 2018 }
michael@0 2019 }
michael@0 2020
michael@0 2021 terms_.append(LinearTerm(term, scale));
michael@0 2022 return true;
michael@0 2023 }
michael@0 2024
michael@0 2025 bool
michael@0 2026 LinearSum::add(int32_t constant)
michael@0 2027 {
michael@0 2028 return SafeAdd(constant, constant_, &constant_);
michael@0 2029 }
michael@0 2030
michael@0 2031 void
michael@0 2032 LinearSum::print(Sprinter &sp) const
michael@0 2033 {
michael@0 2034 for (size_t i = 0; i < terms_.length(); i++) {
michael@0 2035 int32_t scale = terms_[i].scale;
michael@0 2036 int32_t id = terms_[i].term->id();
michael@0 2037 JS_ASSERT(scale);
michael@0 2038 if (scale > 0) {
michael@0 2039 if (i)
michael@0 2040 sp.printf("+");
michael@0 2041 if (scale == 1)
michael@0 2042 sp.printf("#%d", id);
michael@0 2043 else
michael@0 2044 sp.printf("%d*#%d", scale, id);
michael@0 2045 } else if (scale == -1) {
michael@0 2046 sp.printf("-#%d", id);
michael@0 2047 } else {
michael@0 2048 sp.printf("%d*#%d", scale, id);
michael@0 2049 }
michael@0 2050 }
michael@0 2051 if (constant_ > 0)
michael@0 2052 sp.printf("+%d", constant_);
michael@0 2053 else if (constant_ < 0)
michael@0 2054 sp.printf("%d", constant_);
michael@0 2055 }
michael@0 2056
michael@0 2057 void
michael@0 2058 LinearSum::dump(FILE *fp) const
michael@0 2059 {
michael@0 2060 Sprinter sp(GetIonContext()->cx);
michael@0 2061 sp.init();
michael@0 2062 print(sp);
michael@0 2063 fprintf(fp, "%s\n", sp.string());
michael@0 2064 }
michael@0 2065
michael@0 2066 void
michael@0 2067 LinearSum::dump() const
michael@0 2068 {
michael@0 2069 dump(stderr);
michael@0 2070 }
michael@0 2071
michael@0 2072 static bool
michael@0 2073 AnalyzePoppedThis(JSContext *cx, types::TypeObject *type,
michael@0 2074 MDefinition *thisValue, MInstruction *ins, bool definitelyExecuted,
michael@0 2075 HandleObject baseobj,
michael@0 2076 Vector<types::TypeNewScript::Initializer> *initializerList,
michael@0 2077 Vector<PropertyName *> *accessedProperties,
michael@0 2078 bool *phandled)
michael@0 2079 {
michael@0 2080 // Determine the effect that a use of the |this| value when calling |new|
michael@0 2081 // on a script has on the properties definitely held by the new object.
michael@0 2082
michael@0 2083 if (ins->isCallSetProperty()) {
michael@0 2084 MCallSetProperty *setprop = ins->toCallSetProperty();
michael@0 2085
michael@0 2086 if (setprop->object() != thisValue)
michael@0 2087 return true;
michael@0 2088
michael@0 2089 // Don't use GetAtomId here, we need to watch for SETPROP on
michael@0 2090 // integer properties and bail out. We can't mark the aggregate
michael@0 2091 // JSID_VOID type property as being in a definite slot.
michael@0 2092 if (setprop->name() == cx->names().prototype ||
michael@0 2093 setprop->name() == cx->names().proto ||
michael@0 2094 setprop->name() == cx->names().constructor)
michael@0 2095 {
michael@0 2096 return true;
michael@0 2097 }
michael@0 2098
michael@0 2099 // Ignore assignments to properties that were already written to.
michael@0 2100 if (baseobj->nativeLookup(cx, NameToId(setprop->name()))) {
michael@0 2101 *phandled = true;
michael@0 2102 return true;
michael@0 2103 }
michael@0 2104
michael@0 2105 // Don't add definite properties for properties that were already
michael@0 2106 // read in the constructor.
michael@0 2107 for (size_t i = 0; i < accessedProperties->length(); i++) {
michael@0 2108 if ((*accessedProperties)[i] == setprop->name())
michael@0 2109 return true;
michael@0 2110 }
michael@0 2111
michael@0 2112 // Don't add definite properties to an object which won't fit in its
michael@0 2113 // fixed slots.
michael@0 2114 if (GetGCKindSlots(gc::GetGCObjectKind(baseobj->slotSpan() + 1)) <= baseobj->slotSpan())
michael@0 2115 return true;
michael@0 2116
michael@0 2117 // Assignments to new properties must always execute.
michael@0 2118 if (!definitelyExecuted)
michael@0 2119 return true;
michael@0 2120
michael@0 2121 RootedId id(cx, NameToId(setprop->name()));
michael@0 2122 if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) {
michael@0 2123 // The prototype chain already contains a getter/setter for this
michael@0 2124 // property, or type information is too imprecise.
michael@0 2125 return true;
michael@0 2126 }
michael@0 2127
michael@0 2128 DebugOnly<unsigned> slotSpan = baseobj->slotSpan();
michael@0 2129 if (!DefineNativeProperty(cx, baseobj, id, UndefinedHandleValue, nullptr, nullptr,
michael@0 2130 JSPROP_ENUMERATE))
michael@0 2131 {
michael@0 2132 return false;
michael@0 2133 }
michael@0 2134 JS_ASSERT(baseobj->slotSpan() != slotSpan);
michael@0 2135 JS_ASSERT(!baseobj->inDictionaryMode());
michael@0 2136
michael@0 2137 Vector<MResumePoint *> callerResumePoints(cx);
michael@0 2138 MBasicBlock *block = ins->block();
michael@0 2139 for (MResumePoint *rp = block->callerResumePoint();
michael@0 2140 rp;
michael@0 2141 block = rp->block(), rp = block->callerResumePoint())
michael@0 2142 {
michael@0 2143 JSScript *script = rp->block()->info().script();
michael@0 2144 if (!types::AddClearDefiniteFunctionUsesInScript(cx, type, script, block->info().script()))
michael@0 2145 return true;
michael@0 2146 if (!callerResumePoints.append(rp))
michael@0 2147 return false;
michael@0 2148 }
michael@0 2149
michael@0 2150 for (int i = callerResumePoints.length() - 1; i >= 0; i--) {
michael@0 2151 MResumePoint *rp = callerResumePoints[i];
michael@0 2152 JSScript *script = rp->block()->info().script();
michael@0 2153 types::TypeNewScript::Initializer entry(types::TypeNewScript::Initializer::SETPROP_FRAME,
michael@0 2154 script->pcToOffset(rp->pc()));
michael@0 2155 if (!initializerList->append(entry))
michael@0 2156 return false;
michael@0 2157 }
michael@0 2158
michael@0 2159 JSScript *script = ins->block()->info().script();
michael@0 2160 types::TypeNewScript::Initializer entry(types::TypeNewScript::Initializer::SETPROP,
michael@0 2161 script->pcToOffset(setprop->resumePoint()->pc()));
michael@0 2162 if (!initializerList->append(entry))
michael@0 2163 return false;
michael@0 2164
michael@0 2165 *phandled = true;
michael@0 2166 return true;
michael@0 2167 }
michael@0 2168
michael@0 2169 if (ins->isCallGetProperty()) {
michael@0 2170 MCallGetProperty *get = ins->toCallGetProperty();
michael@0 2171
michael@0 2172 /*
michael@0 2173 * Properties can be read from the 'this' object if the following hold:
michael@0 2174 *
michael@0 2175 * - The read is not on a getter along the prototype chain, which
michael@0 2176 * could cause 'this' to escape.
michael@0 2177 *
michael@0 2178 * - The accessed property is either already a definite property or
michael@0 2179 * is not later added as one. Since the definite properties are
michael@0 2180 * added to the object at the point of its creation, reading a
michael@0 2181 * definite property before it is assigned could incorrectly hit.
michael@0 2182 */
michael@0 2183 RootedId id(cx, NameToId(get->name()));
michael@0 2184 if (!baseobj->nativeLookup(cx, id) && !accessedProperties->append(get->name()))
michael@0 2185 return false;
michael@0 2186
michael@0 2187 if (!types::AddClearDefiniteGetterSetterForPrototypeChain(cx, type, id)) {
michael@0 2188 // The |this| value can escape if any property reads it does go
michael@0 2189 // through a getter.
michael@0 2190 return true;
michael@0 2191 }
michael@0 2192
michael@0 2193 *phandled = true;
michael@0 2194 return true;
michael@0 2195 }
michael@0 2196
michael@0 2197 if (ins->isPostWriteBarrier()) {
michael@0 2198 *phandled = true;
michael@0 2199 return true;
michael@0 2200 }
michael@0 2201
michael@0 2202 return true;
michael@0 2203 }
michael@0 2204
michael@0 2205 static int
michael@0 2206 CmpInstructions(const void *a, const void *b)
michael@0 2207 {
michael@0 2208 return (*static_cast<MInstruction * const *>(a))->id() -
michael@0 2209 (*static_cast<MInstruction * const *>(b))->id();
michael@0 2210 }
michael@0 2211
michael@0 2212 bool
michael@0 2213 jit::AnalyzeNewScriptProperties(JSContext *cx, JSFunction *fun,
michael@0 2214 types::TypeObject *type, HandleObject baseobj,
michael@0 2215 Vector<types::TypeNewScript::Initializer> *initializerList)
michael@0 2216 {
michael@0 2217 JS_ASSERT(cx->compartment()->activeAnalysis);
michael@0 2218
michael@0 2219 // When invoking 'new' on the specified script, try to find some properties
michael@0 2220 // which will definitely be added to the created object before it has a
michael@0 2221 // chance to escape and be accessed elsewhere.
michael@0 2222
michael@0 2223 RootedScript script(cx, fun->getOrCreateScript(cx));
michael@0 2224 if (!script)
michael@0 2225 return false;
michael@0 2226
michael@0 2227 if (!jit::IsIonEnabled(cx) || !jit::IsBaselineEnabled(cx) ||
michael@0 2228 !script->compileAndGo() || !script->canBaselineCompile())
michael@0 2229 {
michael@0 2230 return true;
michael@0 2231 }
michael@0 2232
michael@0 2233 static const uint32_t MAX_SCRIPT_SIZE = 2000;
michael@0 2234 if (script->length() > MAX_SCRIPT_SIZE)
michael@0 2235 return true;
michael@0 2236
michael@0 2237 Vector<PropertyName *> accessedProperties(cx);
michael@0 2238
michael@0 2239 LifoAlloc alloc(types::TypeZone::TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
michael@0 2240
michael@0 2241 TempAllocator temp(&alloc);
michael@0 2242 IonContext ictx(cx, &temp);
michael@0 2243
michael@0 2244 if (!cx->compartment()->ensureJitCompartmentExists(cx))
michael@0 2245 return false;
michael@0 2246
michael@0 2247 if (!script->hasBaselineScript()) {
michael@0 2248 MethodStatus status = BaselineCompile(cx, script);
michael@0 2249 if (status == Method_Error)
michael@0 2250 return false;
michael@0 2251 if (status != Method_Compiled)
michael@0 2252 return true;
michael@0 2253 }
michael@0 2254
michael@0 2255 types::TypeScript::SetThis(cx, script, types::Type::ObjectType(type));
michael@0 2256
michael@0 2257 MIRGraph graph(&temp);
michael@0 2258 CompileInfo info(script, fun,
michael@0 2259 /* osrPc = */ nullptr, /* constructing = */ false,
michael@0 2260 DefinitePropertiesAnalysis,
michael@0 2261 script->needsArgsObj());
michael@0 2262
michael@0 2263 AutoTempAllocatorRooter root(cx, &temp);
michael@0 2264
michael@0 2265 const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_Normal);
michael@0 2266
michael@0 2267 types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp);
michael@0 2268 if (!constraints) {
michael@0 2269 js_ReportOutOfMemory(cx);
michael@0 2270 return false;
michael@0 2271 }
michael@0 2272
michael@0 2273 BaselineInspector inspector(script);
michael@0 2274 const JitCompileOptions options(cx);
michael@0 2275
michael@0 2276 IonBuilder builder(cx, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
michael@0 2277 &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
michael@0 2278
michael@0 2279 if (!builder.build()) {
michael@0 2280 if (builder.abortReason() == AbortReason_Alloc)
michael@0 2281 return false;
michael@0 2282 return true;
michael@0 2283 }
michael@0 2284
michael@0 2285 types::FinishDefinitePropertiesAnalysis(cx, constraints);
michael@0 2286
michael@0 2287 if (!SplitCriticalEdges(graph))
michael@0 2288 return false;
michael@0 2289
michael@0 2290 if (!RenumberBlocks(graph))
michael@0 2291 return false;
michael@0 2292
michael@0 2293 if (!BuildDominatorTree(graph))
michael@0 2294 return false;
michael@0 2295
michael@0 2296 if (!EliminatePhis(&builder, graph, AggressiveObservability))
michael@0 2297 return false;
michael@0 2298
michael@0 2299 MDefinition *thisValue = graph.begin()->getSlot(info.thisSlot());
michael@0 2300
michael@0 2301 // Get a list of instructions using the |this| value in the order they
michael@0 2302 // appear in the graph.
michael@0 2303 Vector<MInstruction *> instructions(cx);
michael@0 2304
michael@0 2305 for (MUseDefIterator uses(thisValue); uses; uses++) {
michael@0 2306 MDefinition *use = uses.def();
michael@0 2307
michael@0 2308 // Don't track |this| through assignments to phis.
michael@0 2309 if (!use->isInstruction())
michael@0 2310 return true;
michael@0 2311
michael@0 2312 if (!instructions.append(use->toInstruction()))
michael@0 2313 return false;
michael@0 2314 }
michael@0 2315
michael@0 2316 // Sort the instructions to visit in increasing order.
michael@0 2317 qsort(instructions.begin(), instructions.length(),
michael@0 2318 sizeof(MInstruction *), CmpInstructions);
michael@0 2319
michael@0 2320 // Find all exit blocks in the graph.
michael@0 2321 Vector<MBasicBlock *> exitBlocks(cx);
michael@0 2322 for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
michael@0 2323 if (!block->numSuccessors() && !exitBlocks.append(*block))
michael@0 2324 return false;
michael@0 2325 }
michael@0 2326
michael@0 2327 for (size_t i = 0; i < instructions.length(); i++) {
michael@0 2328 MInstruction *ins = instructions[i];
michael@0 2329
michael@0 2330 // Track whether the use of |this| is in unconditional code, i.e.
michael@0 2331 // the block dominates all graph exits.
michael@0 2332 bool definitelyExecuted = true;
michael@0 2333 for (size_t i = 0; i < exitBlocks.length(); i++) {
michael@0 2334 for (MBasicBlock *exit = exitBlocks[i];
michael@0 2335 exit != ins->block();
michael@0 2336 exit = exit->immediateDominator())
michael@0 2337 {
michael@0 2338 if (exit == exit->immediateDominator()) {
michael@0 2339 definitelyExecuted = false;
michael@0 2340 break;
michael@0 2341 }
michael@0 2342 }
michael@0 2343 }
michael@0 2344
michael@0 2345 // Also check to see if the instruction is inside a loop body. Even if
michael@0 2346 // an access will always execute in the script, if it executes multiple
michael@0 2347 // times then we can get confused when rolling back objects while
michael@0 2348 // clearing the new script information.
michael@0 2349 if (ins->block()->loopDepth() != 0)
michael@0 2350 definitelyExecuted = false;
michael@0 2351
michael@0 2352 bool handled = false;
michael@0 2353 if (!AnalyzePoppedThis(cx, type, thisValue, ins, definitelyExecuted,
michael@0 2354 baseobj, initializerList, &accessedProperties, &handled))
michael@0 2355 {
michael@0 2356 return false;
michael@0 2357 }
michael@0 2358 if (!handled)
michael@0 2359 return true;
michael@0 2360 }
michael@0 2361
michael@0 2362 return true;
michael@0 2363 }
michael@0 2364
michael@0 2365 static bool
michael@0 2366 ArgumentsUseCanBeLazy(JSContext *cx, JSScript *script, MInstruction *ins, size_t index)
michael@0 2367 {
michael@0 2368 // We can read the frame's arguments directly for f.apply(x, arguments).
michael@0 2369 if (ins->isCall()) {
michael@0 2370 if (*ins->toCall()->resumePoint()->pc() == JSOP_FUNAPPLY &&
michael@0 2371 ins->toCall()->numActualArgs() == 2 &&
michael@0 2372 index == MCall::IndexOfArgument(1))
michael@0 2373 {
michael@0 2374 return true;
michael@0 2375 }
michael@0 2376 }
michael@0 2377
michael@0 2378 // arguments[i] can read fp->canonicalActualArg(i) directly.
michael@0 2379 if (ins->isCallGetElement() && index == 0)
michael@0 2380 return true;
michael@0 2381
michael@0 2382 // arguments.length length can read fp->numActualArgs() directly.
michael@0 2383 if (ins->isCallGetProperty() && index == 0 && ins->toCallGetProperty()->name() == cx->names().length)
michael@0 2384 return true;
michael@0 2385
michael@0 2386 return false;
michael@0 2387 }
michael@0 2388
michael@0 2389 bool
michael@0 2390 jit::AnalyzeArgumentsUsage(JSContext *cx, JSScript *scriptArg)
michael@0 2391 {
michael@0 2392 RootedScript script(cx, scriptArg);
michael@0 2393 types::AutoEnterAnalysis enter(cx);
michael@0 2394
michael@0 2395 JS_ASSERT(!script->analyzedArgsUsage());
michael@0 2396
michael@0 2397 // Treat the script as needing an arguments object until we determine it
michael@0 2398 // does not need one. This both allows us to easily see where the arguments
michael@0 2399 // object can escape through assignments to the function's named arguments,
michael@0 2400 // and also simplifies handling of early returns.
michael@0 2401 script->setNeedsArgsObj(true);
michael@0 2402
michael@0 2403 if (!jit::IsIonEnabled(cx) || !script->compileAndGo())
michael@0 2404 return true;
michael@0 2405
michael@0 2406 static const uint32_t MAX_SCRIPT_SIZE = 10000;
michael@0 2407 if (script->length() > MAX_SCRIPT_SIZE)
michael@0 2408 return true;
michael@0 2409
michael@0 2410 if (!script->ensureHasTypes(cx))
michael@0 2411 return false;
michael@0 2412
michael@0 2413 LifoAlloc alloc(types::TypeZone::TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE);
michael@0 2414
michael@0 2415 TempAllocator temp(&alloc);
michael@0 2416 IonContext ictx(cx, &temp);
michael@0 2417
michael@0 2418 if (!cx->compartment()->ensureJitCompartmentExists(cx))
michael@0 2419 return false;
michael@0 2420
michael@0 2421 MIRGraph graph(&temp);
michael@0 2422 CompileInfo info(script, script->functionNonDelazifying(),
michael@0 2423 /* osrPc = */ nullptr, /* constructing = */ false,
michael@0 2424 ArgumentsUsageAnalysis,
michael@0 2425 /* needsArgsObj = */ true);
michael@0 2426
michael@0 2427 AutoTempAllocatorRooter root(cx, &temp);
michael@0 2428
michael@0 2429 const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_Normal);
michael@0 2430
michael@0 2431 types::CompilerConstraintList *constraints = types::NewCompilerConstraintList(temp);
michael@0 2432 if (!constraints)
michael@0 2433 return false;
michael@0 2434
michael@0 2435 BaselineInspector inspector(script);
michael@0 2436 const JitCompileOptions options(cx);
michael@0 2437
michael@0 2438 IonBuilder builder(nullptr, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
michael@0 2439 &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
michael@0 2440
michael@0 2441 if (!builder.build()) {
michael@0 2442 if (builder.abortReason() == AbortReason_Alloc)
michael@0 2443 return false;
michael@0 2444 return true;
michael@0 2445 }
michael@0 2446
michael@0 2447 if (!SplitCriticalEdges(graph))
michael@0 2448 return false;
michael@0 2449
michael@0 2450 if (!RenumberBlocks(graph))
michael@0 2451 return false;
michael@0 2452
michael@0 2453 if (!BuildDominatorTree(graph))
michael@0 2454 return false;
michael@0 2455
michael@0 2456 if (!EliminatePhis(&builder, graph, AggressiveObservability))
michael@0 2457 return false;
michael@0 2458
michael@0 2459 MDefinition *argumentsValue = graph.begin()->getSlot(info.argsObjSlot());
michael@0 2460
michael@0 2461 for (MUseDefIterator uses(argumentsValue); uses; uses++) {
michael@0 2462 MDefinition *use = uses.def();
michael@0 2463
michael@0 2464 // Don't track |arguments| through assignments to phis.
michael@0 2465 if (!use->isInstruction())
michael@0 2466 return true;
michael@0 2467
michael@0 2468 if (!ArgumentsUseCanBeLazy(cx, script, use->toInstruction(), uses.index()))
michael@0 2469 return true;
michael@0 2470 }
michael@0 2471
michael@0 2472 script->setNeedsArgsObj(false);
michael@0 2473 return true;
michael@0 2474 }

mercurial