js/src/jit/RangeAnalysis.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/RangeAnalysis.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,2677 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#include "jit/RangeAnalysis.h"
    1.11 +
    1.12 +#include "mozilla/MathAlgorithms.h"
    1.13 +
    1.14 +#include "jsanalyze.h"
    1.15 +
    1.16 +#include "jit/Ion.h"
    1.17 +#include "jit/IonAnalysis.h"
    1.18 +#include "jit/IonSpewer.h"
    1.19 +#include "jit/MIR.h"
    1.20 +#include "jit/MIRGenerator.h"
    1.21 +#include "jit/MIRGraph.h"
    1.22 +#include "vm/NumericConversions.h"
    1.23 +
    1.24 +#include "jsopcodeinlines.h"
    1.25 +
    1.26 +using namespace js;
    1.27 +using namespace js::jit;
    1.28 +
    1.29 +using mozilla::Abs;
    1.30 +using mozilla::CountLeadingZeroes32;
    1.31 +using mozilla::NumberEqualsInt32;
    1.32 +using mozilla::ExponentComponent;
    1.33 +using mozilla::FloorLog2;
    1.34 +using mozilla::IsInfinite;
    1.35 +using mozilla::IsNaN;
    1.36 +using mozilla::IsNegative;
    1.37 +using mozilla::NegativeInfinity;
    1.38 +using mozilla::PositiveInfinity;
    1.39 +using mozilla::Swap;
    1.40 +using JS::GenericNaN;
    1.41 +
    1.42 +// This algorithm is based on the paper "Eliminating Range Checks Using
    1.43 +// Static Single Assignment Form" by Gough and Klaren.
    1.44 +//
    1.45 +// We associate a range object with each SSA name, and the ranges are consulted
    1.46 +// in order to determine whether overflow is possible for arithmetic
    1.47 +// computations.
    1.48 +//
    1.49 +// An important source of range information that requires care to take
    1.50 +// advantage of is conditional control flow. Consider the code below:
    1.51 +//
    1.52 +// if (x < 0) {
    1.53 +//   y = x + 2000000000;
    1.54 +// } else {
    1.55 +//   if (x < 1000000000) {
    1.56 +//     y = x * 2;
    1.57 +//   } else {
    1.58 +//     y = x - 3000000000;
    1.59 +//   }
    1.60 +// }
    1.61 +//
    1.62 +// The arithmetic operations in this code cannot overflow, but it is not
    1.63 +// sufficient to simply associate each name with a range, since the information
    1.64 +// differs between basic blocks. The traditional dataflow approach would be
    1.65 +// associate ranges with (name, basic block) pairs. This solution is not
    1.66 +// satisfying, since we lose the benefit of SSA form: in SSA form, each
    1.67 +// definition has a unique name, so there is no need to track information about
    1.68 +// the control flow of the program.
    1.69 +//
    1.70 +// The approach used here is to add a new form of pseudo operation called a
    1.71 +// beta node, which associates range information with a value. These beta
    1.72 +// instructions take one argument and additionally have an auxiliary constant
    1.73 +// range associated with them. Operationally, beta nodes are just copies, but
    1.74 +// the invariant expressed by beta node copies is that the output will fall
    1.75 +// inside the range given by the beta node.  Gough and Klaeren refer to SSA
    1.76 +// extended with these beta nodes as XSA form. The following shows the example
    1.77 +// code transformed into XSA form:
    1.78 +//
    1.79 +// if (x < 0) {
    1.80 +//   x1 = Beta(x, [INT_MIN, -1]);
    1.81 +//   y1 = x1 + 2000000000;
    1.82 +// } else {
    1.83 +//   x2 = Beta(x, [0, INT_MAX]);
    1.84 +//   if (x2 < 1000000000) {
    1.85 +//     x3 = Beta(x2, [INT_MIN, 999999999]);
    1.86 +//     y2 = x3*2;
    1.87 +//   } else {
    1.88 +//     x4 = Beta(x2, [1000000000, INT_MAX]);
    1.89 +//     y3 = x4 - 3000000000;
    1.90 +//   }
    1.91 +//   y4 = Phi(y2, y3);
    1.92 +// }
    1.93 +// y = Phi(y1, y4);
    1.94 +//
    1.95 +// We insert beta nodes for the purposes of range analysis (they might also be
    1.96 +// usefully used for other forms of bounds check elimination) and remove them
    1.97 +// after range analysis is performed. The remaining compiler phases do not ever
    1.98 +// encounter beta nodes.
    1.99 +
   1.100 +static bool
   1.101 +IsDominatedUse(MBasicBlock *block, MUse *use)
   1.102 +{
   1.103 +    MNode *n = use->consumer();
   1.104 +    bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
   1.105 +
   1.106 +    if (isPhi)
   1.107 +        return block->dominates(n->block()->getPredecessor(use->index()));
   1.108 +
   1.109 +    return block->dominates(n->block());
   1.110 +}
   1.111 +
   1.112 +static inline void
   1.113 +SpewRange(MDefinition *def)
   1.114 +{
   1.115 +#ifdef DEBUG
   1.116 +    if (IonSpewEnabled(IonSpew_Range) && def->type() != MIRType_None && def->range()) {
   1.117 +        IonSpewHeader(IonSpew_Range);
   1.118 +        def->printName(IonSpewFile);
   1.119 +        fprintf(IonSpewFile, " has range ");
   1.120 +        def->range()->dump(IonSpewFile);
   1.121 +    }
   1.122 +#endif
   1.123 +}
   1.124 +
   1.125 +TempAllocator &
   1.126 +RangeAnalysis::alloc() const
   1.127 +{
   1.128 +    return graph_.alloc();
   1.129 +}
   1.130 +
   1.131 +void
   1.132 +RangeAnalysis::replaceDominatedUsesWith(MDefinition *orig, MDefinition *dom,
   1.133 +                                            MBasicBlock *block)
   1.134 +{
   1.135 +    for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd(); ) {
   1.136 +        if (i->consumer() != dom && IsDominatedUse(block, *i))
   1.137 +            i = i->consumer()->replaceOperand(i, dom);
   1.138 +        else
   1.139 +            i++;
   1.140 +    }
   1.141 +}
   1.142 +
   1.143 +bool
   1.144 +RangeAnalysis::addBetaNodes()
   1.145 +{
   1.146 +    IonSpew(IonSpew_Range, "Adding beta nodes");
   1.147 +
   1.148 +    for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
   1.149 +        MBasicBlock *block = *i;
   1.150 +        IonSpew(IonSpew_Range, "Looking at block %d", block->id());
   1.151 +
   1.152 +        BranchDirection branch_dir;
   1.153 +        MTest *test = block->immediateDominatorBranch(&branch_dir);
   1.154 +
   1.155 +        if (!test || !test->getOperand(0)->isCompare())
   1.156 +            continue;
   1.157 +
   1.158 +        MCompare *compare = test->getOperand(0)->toCompare();
   1.159 +
   1.160 +        // TODO: support unsigned comparisons
   1.161 +        if (compare->compareType() == MCompare::Compare_UInt32)
   1.162 +            continue;
   1.163 +
   1.164 +        MDefinition *left = compare->getOperand(0);
   1.165 +        MDefinition *right = compare->getOperand(1);
   1.166 +        double bound;
   1.167 +        double conservativeLower = NegativeInfinity<double>();
   1.168 +        double conservativeUpper = PositiveInfinity<double>();
   1.169 +        MDefinition *val = nullptr;
   1.170 +
   1.171 +        JSOp jsop = compare->jsop();
   1.172 +
   1.173 +        if (branch_dir == FALSE_BRANCH) {
   1.174 +            jsop = NegateCompareOp(jsop);
   1.175 +            conservativeLower = GenericNaN();
   1.176 +            conservativeUpper = GenericNaN();
   1.177 +        }
   1.178 +
   1.179 +        if (left->isConstant() && left->toConstant()->value().isNumber()) {
   1.180 +            bound = left->toConstant()->value().toNumber();
   1.181 +            val = right;
   1.182 +            jsop = ReverseCompareOp(jsop);
   1.183 +        } else if (right->isConstant() && right->toConstant()->value().isNumber()) {
   1.184 +            bound = right->toConstant()->value().toNumber();
   1.185 +            val = left;
   1.186 +        } else if (left->type() == MIRType_Int32 && right->type() == MIRType_Int32) {
   1.187 +            MDefinition *smaller = nullptr;
   1.188 +            MDefinition *greater = nullptr;
   1.189 +            if (jsop == JSOP_LT) {
   1.190 +                smaller = left;
   1.191 +                greater = right;
   1.192 +            } else if (jsop == JSOP_GT) {
   1.193 +                smaller = right;
   1.194 +                greater = left;
   1.195 +            }
   1.196 +            if (smaller && greater) {
   1.197 +                MBeta *beta;
   1.198 +                beta = MBeta::New(alloc(), smaller,
   1.199 +                                  Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1));
   1.200 +                block->insertBefore(*block->begin(), beta);
   1.201 +                replaceDominatedUsesWith(smaller, beta, block);
   1.202 +                IonSpew(IonSpew_Range, "Adding beta node for smaller %d", smaller->id());
   1.203 +                beta = MBeta::New(alloc(), greater,
   1.204 +                                  Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX));
   1.205 +                block->insertBefore(*block->begin(), beta);
   1.206 +                replaceDominatedUsesWith(greater, beta, block);
   1.207 +                IonSpew(IonSpew_Range, "Adding beta node for greater %d", greater->id());
   1.208 +            }
   1.209 +            continue;
   1.210 +        } else {
   1.211 +            continue;
   1.212 +        }
   1.213 +
   1.214 +        // At this point, one of the operands if the compare is a constant, and
   1.215 +        // val is the other operand.
   1.216 +        JS_ASSERT(val);
   1.217 +
   1.218 +        Range comp;
   1.219 +        switch (jsop) {
   1.220 +          case JSOP_LE:
   1.221 +            comp.setDouble(conservativeLower, bound);
   1.222 +            break;
   1.223 +          case JSOP_LT:
   1.224 +            // For integers, if x < c, the upper bound of x is c-1.
   1.225 +            if (val->type() == MIRType_Int32) {
   1.226 +                int32_t intbound;
   1.227 +                if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound))
   1.228 +                    bound = intbound;
   1.229 +            }
   1.230 +            comp.setDouble(conservativeLower, bound);
   1.231 +            break;
   1.232 +          case JSOP_GE:
   1.233 +            comp.setDouble(bound, conservativeUpper);
   1.234 +            break;
   1.235 +          case JSOP_GT:
   1.236 +            // For integers, if x > c, the lower bound of x is c+1.
   1.237 +            if (val->type() == MIRType_Int32) {
   1.238 +                int32_t intbound;
   1.239 +                if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound))
   1.240 +                    bound = intbound;
   1.241 +            }
   1.242 +            comp.setDouble(bound, conservativeUpper);
   1.243 +            break;
   1.244 +          case JSOP_EQ:
   1.245 +            comp.setDouble(bound, bound);
   1.246 +            break;
   1.247 +          default:
   1.248 +            continue; // well, for neq we could have
   1.249 +                      // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges.
   1.250 +        }
   1.251 +
   1.252 +        if (IonSpewEnabled(IonSpew_Range)) {
   1.253 +            IonSpewHeader(IonSpew_Range);
   1.254 +            fprintf(IonSpewFile, "Adding beta node for %d with range ", val->id());
   1.255 +            comp.dump(IonSpewFile);
   1.256 +        }
   1.257 +
   1.258 +        MBeta *beta = MBeta::New(alloc(), val, new(alloc()) Range(comp));
   1.259 +        block->insertBefore(*block->begin(), beta);
   1.260 +        replaceDominatedUsesWith(val, beta, block);
   1.261 +    }
   1.262 +
   1.263 +    return true;
   1.264 +}
   1.265 +
   1.266 +bool
   1.267 +RangeAnalysis::removeBetaNodes()
   1.268 +{
   1.269 +    IonSpew(IonSpew_Range, "Removing beta nodes");
   1.270 +
   1.271 +    for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
   1.272 +        MBasicBlock *block = *i;
   1.273 +        for (MDefinitionIterator iter(*i); iter; ) {
   1.274 +            MDefinition *def = *iter;
   1.275 +            if (def->isBeta()) {
   1.276 +                MDefinition *op = def->getOperand(0);
   1.277 +                IonSpew(IonSpew_Range, "Removing beta node %d for %d",
   1.278 +                        def->id(), op->id());
   1.279 +                def->replaceAllUsesWith(op);
   1.280 +                iter = block->discardDefAt(iter);
   1.281 +            } else {
   1.282 +                // We only place Beta nodes at the beginning of basic
   1.283 +                // blocks, so if we see something else, we can move on
   1.284 +                // to the next block.
   1.285 +                break;
   1.286 +            }
   1.287 +        }
   1.288 +    }
   1.289 +    return true;
   1.290 +}
   1.291 +
   1.292 +void
   1.293 +SymbolicBound::print(Sprinter &sp) const
   1.294 +{
   1.295 +    if (loop)
   1.296 +        sp.printf("[loop] ");
   1.297 +    sum.print(sp);
   1.298 +}
   1.299 +
   1.300 +void
   1.301 +SymbolicBound::dump() const
   1.302 +{
   1.303 +    Sprinter sp(GetIonContext()->cx);
   1.304 +    sp.init();
   1.305 +    print(sp);
   1.306 +    fprintf(stderr, "%s\n", sp.string());
   1.307 +}
   1.308 +
   1.309 +// Test whether the given range's exponent tells us anything that its lower
   1.310 +// and upper bound values don't.
   1.311 +static bool
   1.312 +IsExponentInteresting(const Range *r)
   1.313 +{
   1.314 +   // If it lacks either a lower or upper bound, the exponent is interesting.
   1.315 +   if (!r->hasInt32Bounds())
   1.316 +       return true;
   1.317 +
   1.318 +   // Otherwise if there's no fractional part, the lower and upper bounds,
   1.319 +   // which are integers, are perfectly precise.
   1.320 +   if (!r->canHaveFractionalPart())
   1.321 +       return false;
   1.322 +
   1.323 +   // Otherwise, if the bounds are conservatively rounded across a power-of-two
   1.324 +   // boundary, the exponent may imply a tighter range.
   1.325 +   return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
   1.326 +}
   1.327 +
   1.328 +void
   1.329 +Range::print(Sprinter &sp) const
   1.330 +{
   1.331 +    assertInvariants();
   1.332 +
   1.333 +    // Floating-point or Integer subset.
   1.334 +    if (canHaveFractionalPart_)
   1.335 +        sp.printf("F");
   1.336 +    else
   1.337 +        sp.printf("I");
   1.338 +
   1.339 +    sp.printf("[");
   1.340 +
   1.341 +    if (!hasInt32LowerBound_)
   1.342 +        sp.printf("?");
   1.343 +    else
   1.344 +        sp.printf("%d", lower_);
   1.345 +    if (symbolicLower_) {
   1.346 +        sp.printf(" {");
   1.347 +        symbolicLower_->print(sp);
   1.348 +        sp.printf("}");
   1.349 +    }
   1.350 +
   1.351 +    sp.printf(", ");
   1.352 +
   1.353 +    if (!hasInt32UpperBound_)
   1.354 +        sp.printf("?");
   1.355 +    else
   1.356 +        sp.printf("%d", upper_);
   1.357 +    if (symbolicUpper_) {
   1.358 +        sp.printf(" {");
   1.359 +        symbolicUpper_->print(sp);
   1.360 +        sp.printf("}");
   1.361 +    }
   1.362 +
   1.363 +    sp.printf("]");
   1.364 +    if (IsExponentInteresting(this)) {
   1.365 +        if (max_exponent_ == IncludesInfinityAndNaN)
   1.366 +            sp.printf(" (U inf U NaN)", max_exponent_);
   1.367 +        else if (max_exponent_ == IncludesInfinity)
   1.368 +            sp.printf(" (U inf)");
   1.369 +        else
   1.370 +            sp.printf(" (< pow(2, %d+1))", max_exponent_);
   1.371 +    }
   1.372 +}
   1.373 +
   1.374 +void
   1.375 +Range::dump(FILE *fp) const
   1.376 +{
   1.377 +    Sprinter sp(GetIonContext()->cx);
   1.378 +    sp.init();
   1.379 +    print(sp);
   1.380 +    fprintf(fp, "%s\n", sp.string());
   1.381 +}
   1.382 +
   1.383 +void
   1.384 +Range::dump() const
   1.385 +{
   1.386 +    dump(stderr);
   1.387 +}
   1.388 +
   1.389 +Range *
   1.390 +Range::intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs, bool *emptyRange)
   1.391 +{
   1.392 +    *emptyRange = false;
   1.393 +
   1.394 +    if (!lhs && !rhs)
   1.395 +        return nullptr;
   1.396 +
   1.397 +    if (!lhs)
   1.398 +        return new(alloc) Range(*rhs);
   1.399 +    if (!rhs)
   1.400 +        return new(alloc) Range(*lhs);
   1.401 +
   1.402 +    int32_t newLower = Max(lhs->lower_, rhs->lower_);
   1.403 +    int32_t newUpper = Min(lhs->upper_, rhs->upper_);
   1.404 +
   1.405 +    // :TODO: This information could be used better. If upper < lower, then we
   1.406 +    // have conflicting constraints. Consider:
   1.407 +    //
   1.408 +    // if (x < 0) {
   1.409 +    //   if (x > 0) {
   1.410 +    //     [Some code.]
   1.411 +    //   }
   1.412 +    // }
   1.413 +    //
   1.414 +    // In this case, the block is dead. Right now, we just disregard this fact
   1.415 +    // and make the range unbounded, rather than empty.
   1.416 +    //
   1.417 +    // Instead, we should use it to eliminate the dead block.
   1.418 +    // (Bug 765127)
   1.419 +    if (newUpper < newLower) {
   1.420 +        // If both ranges can be NaN, the result can still be NaN.
   1.421 +        if (!lhs->canBeNaN() || !rhs->canBeNaN())
   1.422 +            *emptyRange = true;
   1.423 +        return nullptr;
   1.424 +    }
   1.425 +
   1.426 +    bool newHasInt32LowerBound = lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
   1.427 +    bool newHasInt32UpperBound = lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
   1.428 +    bool newFractional = lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_;
   1.429 +    uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_);
   1.430 +
   1.431 +    // NaN is a special value which is neither greater than infinity or less than
   1.432 +    // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
   1.433 +    // can end up thinking we have both a lower and upper bound, even though NaN
   1.434 +    // is still possible. In this case, just be conservative, since any case where
   1.435 +    // we can have NaN is not especially interesting.
   1.436 +    if (newHasInt32LowerBound && newHasInt32UpperBound && newExponent == IncludesInfinityAndNaN)
   1.437 +        return nullptr;
   1.438 +
   1.439 +    // If one of the ranges has a fractional part and the other doesn't, it's
   1.440 +    // possible that we will have computed a newExponent that's more precise
   1.441 +    // than our newLower and newUpper. This is unusual, so we handle it here
   1.442 +    // instead of in optimize().
   1.443 +    //
   1.444 +    // For example, consider the range F[0,1.5]. Range analysis represents the
   1.445 +    // lower and upper bound as integers, so we'd actually have
   1.446 +    // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
   1.447 +    // more precise upper bound than the integer upper bound.
   1.448 +    //
   1.449 +    // When intersecting such a range with an integer range, the fractional part
   1.450 +    // of the range is dropped. The max exponent of 0 remains valid, so the
   1.451 +    // upper bound needs to be adjusted to 1.
   1.452 +    //
   1.453 +    // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
   1.454 +    // the naive intersection is I[2,2], but since the max exponent tells us
   1.455 +    // that the value is always less than 2, the intersection is actually empty.
   1.456 +    if (lhs->canHaveFractionalPart_ != rhs->canHaveFractionalPart_ ||
   1.457 +        (lhs->canHaveFractionalPart_ &&
   1.458 +         newHasInt32LowerBound && newHasInt32UpperBound &&
   1.459 +         newLower == newUpper))
   1.460 +    {
   1.461 +        refineInt32BoundsByExponent(newExponent, &newLower, &newUpper);
   1.462 +
   1.463 +        // If we're intersecting two ranges that don't overlap, this could also
   1.464 +        // push the bounds past each other, since the actual intersection is
   1.465 +        // the empty set.
   1.466 +        if (newLower > newUpper) {
   1.467 +            *emptyRange = true;
   1.468 +            return nullptr;
   1.469 +        }
   1.470 +    }
   1.471 +
   1.472 +    return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
   1.473 +                            newFractional, newExponent);
   1.474 +}
   1.475 +
   1.476 +void
   1.477 +Range::unionWith(const Range *other)
   1.478 +{
   1.479 +    int32_t newLower = Min(lower_, other->lower_);
   1.480 +    int32_t newUpper = Max(upper_, other->upper_);
   1.481 +
   1.482 +    bool newHasInt32LowerBound = hasInt32LowerBound_ && other->hasInt32LowerBound_;
   1.483 +    bool newHasInt32UpperBound = hasInt32UpperBound_ && other->hasInt32UpperBound_;
   1.484 +    bool newFractional = canHaveFractionalPart_ || other->canHaveFractionalPart_;
   1.485 +    uint16_t newExponent = Max(max_exponent_, other->max_exponent_);
   1.486 +
   1.487 +    rawInitialize(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
   1.488 +                  newFractional, newExponent);
   1.489 +}
   1.490 +
   1.491 +Range::Range(const MDefinition *def)
   1.492 +  : symbolicLower_(nullptr),
   1.493 +    symbolicUpper_(nullptr)
   1.494 +{
   1.495 +    if (const Range *other = def->range()) {
   1.496 +        // The instruction has range information; use it.
   1.497 +        *this = *other;
   1.498 +
   1.499 +        // Simulate the effect of converting the value to its type.
   1.500 +        switch (def->type()) {
   1.501 +          case MIRType_Int32:
   1.502 +            wrapAroundToInt32();
   1.503 +            break;
   1.504 +          case MIRType_Boolean:
   1.505 +            wrapAroundToBoolean();
   1.506 +            break;
   1.507 +          case MIRType_None:
   1.508 +            MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value");
   1.509 +          default:
   1.510 +            break;
   1.511 +        }
   1.512 +    } else {
   1.513 +        // Otherwise just use type information. We can trust the type here
   1.514 +        // because we don't care what value the instruction actually produces,
   1.515 +        // but what value we might get after we get past the bailouts.
   1.516 +        switch (def->type()) {
   1.517 +          case MIRType_Int32:
   1.518 +            setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
   1.519 +            break;
   1.520 +          case MIRType_Boolean:
   1.521 +            setInt32(0, 1);
   1.522 +            break;
   1.523 +          case MIRType_None:
   1.524 +            MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value");
   1.525 +          default:
   1.526 +            setUnknown();
   1.527 +            break;
   1.528 +        }
   1.529 +    }
   1.530 +
   1.531 +    // As a special case, MUrsh is permitted to claim a result type of
   1.532 +    // MIRType_Int32 while actually returning values in [0,UINT32_MAX] without
   1.533 +    // bailouts. If range analysis hasn't ruled out values in
   1.534 +    // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
   1.535 +    // use as either a uint32 or an int32.
   1.536 +    if (!hasInt32UpperBound() && def->isUrsh() && def->toUrsh()->bailoutsDisabled())
   1.537 +        lower_ = INT32_MIN;
   1.538 +
   1.539 +    assertInvariants();
   1.540 +}
   1.541 +
   1.542 +static uint16_t
   1.543 +ExponentImpliedByDouble(double d)
   1.544 +{
   1.545 +    // Handle the special values.
   1.546 +    if (IsNaN(d))
   1.547 +        return Range::IncludesInfinityAndNaN;
   1.548 +    if (IsInfinite(d))
   1.549 +        return Range::IncludesInfinity;
   1.550 +
   1.551 +    // Otherwise take the exponent part and clamp it at zero, since the Range
   1.552 +    // class doesn't track fractional ranges.
   1.553 +    return uint16_t(Max(int_fast16_t(0), ExponentComponent(d)));
   1.554 +}
   1.555 +
   1.556 +void
   1.557 +Range::setDouble(double l, double h)
   1.558 +{
   1.559 +    // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
   1.560 +    if (l >= INT32_MIN && l <= INT32_MAX) {
   1.561 +        lower_ = int32_t(floor(l));
   1.562 +        hasInt32LowerBound_ = true;
   1.563 +    } else {
   1.564 +        lower_ = INT32_MIN;
   1.565 +        hasInt32LowerBound_ = false;
   1.566 +    }
   1.567 +    if (h >= INT32_MIN && h <= INT32_MAX) {
   1.568 +        upper_ = int32_t(ceil(h));
   1.569 +        hasInt32UpperBound_ = true;
   1.570 +    } else {
   1.571 +        upper_ = INT32_MAX;
   1.572 +        hasInt32UpperBound_ = false;
   1.573 +    }
   1.574 +
   1.575 +    // Infer max_exponent_.
   1.576 +    uint16_t lExp = ExponentImpliedByDouble(l);
   1.577 +    uint16_t hExp = ExponentImpliedByDouble(h);
   1.578 +    max_exponent_ = Max(lExp, hExp);
   1.579 +
   1.580 +    // Infer the canHaveFractionalPart_ field. We can have a fractional part
   1.581 +    // if the range crosses through the neighborhood of zero. We won't have a
   1.582 +    // fractional value if the value is always beyond the point at which
   1.583 +    // double precision can't represent fractional values.
   1.584 +    uint16_t minExp = Min(lExp, hExp);
   1.585 +    bool includesNegative = IsNaN(l) || l < 0;
   1.586 +    bool includesPositive = IsNaN(h) || h > 0;
   1.587 +    bool crossesZero = includesNegative && includesPositive;
   1.588 +    canHaveFractionalPart_ = crossesZero || minExp < MaxTruncatableExponent;
   1.589 +
   1.590 +    optimize();
   1.591 +}
   1.592 +
   1.593 +static inline bool
   1.594 +MissingAnyInt32Bounds(const Range *lhs, const Range *rhs)
   1.595 +{
   1.596 +    return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
   1.597 +}
   1.598 +
   1.599 +Range *
   1.600 +Range::add(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.601 +{
   1.602 +    int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_;
   1.603 +    if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
   1.604 +        l = NoInt32LowerBound;
   1.605 +
   1.606 +    int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_;
   1.607 +    if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound())
   1.608 +        h = NoInt32UpperBound;
   1.609 +
   1.610 +    // The exponent is at most one greater than the greater of the operands'
   1.611 +    // exponents, except for NaN and infinity cases.
   1.612 +    uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
   1.613 +    if (e <= Range::MaxFiniteExponent)
   1.614 +        ++e;
   1.615 +
   1.616 +    // Infinity + -Infinity is NaN.
   1.617 +    if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
   1.618 +        e = Range::IncludesInfinityAndNaN;
   1.619 +
   1.620 +    return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
   1.621 +}
   1.622 +
   1.623 +Range *
   1.624 +Range::sub(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.625 +{
   1.626 +    int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_;
   1.627 +    if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
   1.628 +        l = NoInt32LowerBound;
   1.629 +
   1.630 +    int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_;
   1.631 +    if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound())
   1.632 +        h = NoInt32UpperBound;
   1.633 +
   1.634 +    // The exponent is at most one greater than the greater of the operands'
   1.635 +    // exponents, except for NaN and infinity cases.
   1.636 +    uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
   1.637 +    if (e <= Range::MaxFiniteExponent)
   1.638 +        ++e;
   1.639 +
   1.640 +    // Infinity - Infinity is NaN.
   1.641 +    if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
   1.642 +        e = Range::IncludesInfinityAndNaN;
   1.643 +
   1.644 +    return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
   1.645 +}
   1.646 +
   1.647 +Range *
   1.648 +Range::and_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.649 +{
   1.650 +    JS_ASSERT(lhs->isInt32());
   1.651 +    JS_ASSERT(rhs->isInt32());
   1.652 +
   1.653 +    // If both numbers can be negative, result can be negative in the whole range
   1.654 +    if (lhs->lower() < 0 && rhs->lower() < 0)
   1.655 +        return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper()));
   1.656 +
   1.657 +    // Only one of both numbers can be negative.
   1.658 +    // - result can't be negative
   1.659 +    // - Upper bound is minimum of both upper range,
   1.660 +    int32_t lower = 0;
   1.661 +    int32_t upper = Min(lhs->upper(), rhs->upper());
   1.662 +
   1.663 +    // EXCEPT when upper bound of non negative number is max value,
   1.664 +    // because negative value can return the whole max value.
   1.665 +    // -1 & 5 = 5
   1.666 +    if (lhs->lower() < 0)
   1.667 +       upper = rhs->upper();
   1.668 +    if (rhs->lower() < 0)
   1.669 +        upper = lhs->upper();
   1.670 +
   1.671 +    return Range::NewInt32Range(alloc, lower, upper);
   1.672 +}
   1.673 +
   1.674 +Range *
   1.675 +Range::or_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.676 +{
   1.677 +    JS_ASSERT(lhs->isInt32());
   1.678 +    JS_ASSERT(rhs->isInt32());
   1.679 +    // When one operand is always 0 or always -1, it's a special case where we
   1.680 +    // can compute a fully precise result. Handling these up front also
   1.681 +    // protects the code below from calling CountLeadingZeroes32 with a zero
   1.682 +    // operand or from shifting an int32_t by 32.
   1.683 +    if (lhs->lower() == lhs->upper()) {
   1.684 +        if (lhs->lower() == 0)
   1.685 +            return new(alloc) Range(*rhs);
   1.686 +        if (lhs->lower() == -1)
   1.687 +            return new(alloc) Range(*lhs);;
   1.688 +    }
   1.689 +    if (rhs->lower() == rhs->upper()) {
   1.690 +        if (rhs->lower() == 0)
   1.691 +            return new(alloc) Range(*lhs);
   1.692 +        if (rhs->lower() == -1)
   1.693 +            return new(alloc) Range(*rhs);;
   1.694 +    }
   1.695 +
   1.696 +    // The code below uses CountLeadingZeroes32, which has undefined behavior
   1.697 +    // if its operand is 0. We rely on the code above to protect it.
   1.698 +    JS_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
   1.699 +    JS_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
   1.700 +    JS_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
   1.701 +    JS_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
   1.702 +
   1.703 +    int32_t lower = INT32_MIN;
   1.704 +    int32_t upper = INT32_MAX;
   1.705 +
   1.706 +    if (lhs->lower() >= 0 && rhs->lower() >= 0) {
   1.707 +        // Both operands are non-negative, so the result won't be less than either.
   1.708 +        lower = Max(lhs->lower(), rhs->lower());
   1.709 +        // The result will have leading zeros where both operands have leading zeros.
   1.710 +        // CountLeadingZeroes32 of a non-negative int32 will at least be 1 to account
   1.711 +        // for the bit of sign.
   1.712 +        upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()),
   1.713 +                                          CountLeadingZeroes32(rhs->upper())));
   1.714 +    } else {
   1.715 +        // The result will have leading ones where either operand has leading ones.
   1.716 +        if (lhs->upper() < 0) {
   1.717 +            unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
   1.718 +            lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
   1.719 +            upper = -1;
   1.720 +        }
   1.721 +        if (rhs->upper() < 0) {
   1.722 +            unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
   1.723 +            lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
   1.724 +            upper = -1;
   1.725 +        }
   1.726 +    }
   1.727 +
   1.728 +    return Range::NewInt32Range(alloc, lower, upper);
   1.729 +}
   1.730 +
   1.731 +Range *
   1.732 +Range::xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.733 +{
   1.734 +    JS_ASSERT(lhs->isInt32());
   1.735 +    JS_ASSERT(rhs->isInt32());
   1.736 +    int32_t lhsLower = lhs->lower();
   1.737 +    int32_t lhsUpper = lhs->upper();
   1.738 +    int32_t rhsLower = rhs->lower();
   1.739 +    int32_t rhsUpper = rhs->upper();
   1.740 +    bool invertAfter = false;
   1.741 +
   1.742 +    // If either operand is negative, bitwise-negate it, and arrange to negate
   1.743 +    // the result; ~((~x)^y) == x^y. If both are negative the negations on the
   1.744 +    // result cancel each other out; effectively this is (~x)^(~y) == x^y.
   1.745 +    // These transformations reduce the number of cases we have to handle below.
   1.746 +    if (lhsUpper < 0) {
   1.747 +        lhsLower = ~lhsLower;
   1.748 +        lhsUpper = ~lhsUpper;
   1.749 +        Swap(lhsLower, lhsUpper);
   1.750 +        invertAfter = !invertAfter;
   1.751 +    }
   1.752 +    if (rhsUpper < 0) {
   1.753 +        rhsLower = ~rhsLower;
   1.754 +        rhsUpper = ~rhsUpper;
   1.755 +        Swap(rhsLower, rhsUpper);
   1.756 +        invertAfter = !invertAfter;
   1.757 +    }
   1.758 +
   1.759 +    // Handle cases where lhs or rhs is always zero specially, because they're
   1.760 +    // easy cases where we can be perfectly precise, and because it protects the
   1.761 +    // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
   1.762 +    // undefined behavior.
   1.763 +    int32_t lower = INT32_MIN;
   1.764 +    int32_t upper = INT32_MAX;
   1.765 +    if (lhsLower == 0 && lhsUpper == 0) {
   1.766 +        upper = rhsUpper;
   1.767 +        lower = rhsLower;
   1.768 +    } else if (rhsLower == 0 && rhsUpper == 0) {
   1.769 +        upper = lhsUpper;
   1.770 +        lower = lhsLower;
   1.771 +    } else if (lhsLower >= 0 && rhsLower >= 0) {
   1.772 +        // Both operands are non-negative. The result will be non-negative.
   1.773 +        lower = 0;
   1.774 +        // To compute the upper value, take each operand's upper value and
   1.775 +        // set all bits that don't correspond to leading zero bits in the
   1.776 +        // other to one. For each one, this gives an upper bound for the
   1.777 +        // result, so we can take the minimum between the two.
   1.778 +        unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
   1.779 +        unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
   1.780 +        upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
   1.781 +                    lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
   1.782 +    }
   1.783 +
   1.784 +    // If we bitwise-negated one (but not both) of the operands above, apply the
   1.785 +    // bitwise-negate to the result, completing ~((~x)^y) == x^y.
   1.786 +    if (invertAfter) {
   1.787 +        lower = ~lower;
   1.788 +        upper = ~upper;
   1.789 +        Swap(lower, upper);
   1.790 +    }
   1.791 +
   1.792 +    return Range::NewInt32Range(alloc, lower, upper);
   1.793 +}
   1.794 +
   1.795 +Range *
   1.796 +Range::not_(TempAllocator &alloc, const Range *op)
   1.797 +{
   1.798 +    JS_ASSERT(op->isInt32());
   1.799 +    return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
   1.800 +}
   1.801 +
   1.802 +Range *
   1.803 +Range::mul(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.804 +{
   1.805 +    bool fractional = lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart();
   1.806 +
   1.807 +    uint16_t exponent;
   1.808 +    if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
   1.809 +        // Two finite values.
   1.810 +        exponent = lhs->numBits() + rhs->numBits() - 1;
   1.811 +        if (exponent > Range::MaxFiniteExponent)
   1.812 +            exponent = Range::IncludesInfinity;
   1.813 +    } else if (!lhs->canBeNaN() &&
   1.814 +               !rhs->canBeNaN() &&
   1.815 +               !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
   1.816 +               !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN()))
   1.817 +    {
   1.818 +        // Two values that multiplied together won't produce a NaN.
   1.819 +        exponent = Range::IncludesInfinity;
   1.820 +    } else {
   1.821 +        // Could be anything.
   1.822 +        exponent = Range::IncludesInfinityAndNaN;
   1.823 +    }
   1.824 +
   1.825 +    if (MissingAnyInt32Bounds(lhs, rhs))
   1.826 +        return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent);
   1.827 +    int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
   1.828 +    int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
   1.829 +    int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
   1.830 +    int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
   1.831 +    return new(alloc) Range(
   1.832 +        Min( Min(a, b), Min(c, d) ),
   1.833 +        Max( Max(a, b), Max(c, d) ),
   1.834 +        fractional, exponent);
   1.835 +}
   1.836 +
   1.837 +Range *
   1.838 +Range::lsh(TempAllocator &alloc, const Range *lhs, int32_t c)
   1.839 +{
   1.840 +    JS_ASSERT(lhs->isInt32());
   1.841 +    int32_t shift = c & 0x1f;
   1.842 +
   1.843 +    // If the shift doesn't loose bits or shift bits into the sign bit, we
   1.844 +    // can simply compute the correct range by shifting.
   1.845 +    if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() &&
   1.846 +        (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper())
   1.847 +    {
   1.848 +        return Range::NewInt32Range(alloc,
   1.849 +            uint32_t(lhs->lower()) << shift,
   1.850 +            uint32_t(lhs->upper()) << shift);
   1.851 +    }
   1.852 +
   1.853 +    return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
   1.854 +}
   1.855 +
   1.856 +Range *
   1.857 +Range::rsh(TempAllocator &alloc, const Range *lhs, int32_t c)
   1.858 +{
   1.859 +    JS_ASSERT(lhs->isInt32());
   1.860 +    int32_t shift = c & 0x1f;
   1.861 +    return Range::NewInt32Range(alloc,
   1.862 +        lhs->lower() >> shift,
   1.863 +        lhs->upper() >> shift);
   1.864 +}
   1.865 +
   1.866 +Range *
   1.867 +Range::ursh(TempAllocator &alloc, const Range *lhs, int32_t c)
   1.868 +{
   1.869 +    // ursh's left operand is uint32, not int32, but for range analysis we
   1.870 +    // currently approximate it as int32. We assume here that the range has
   1.871 +    // already been adjusted accordingly by our callers.
   1.872 +    JS_ASSERT(lhs->isInt32());
   1.873 +
   1.874 +    int32_t shift = c & 0x1f;
   1.875 +
   1.876 +    // If the value is always non-negative or always negative, we can simply
   1.877 +    // compute the correct range by shifting.
   1.878 +    if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
   1.879 +        return Range::NewUInt32Range(alloc,
   1.880 +            uint32_t(lhs->lower()) >> shift,
   1.881 +            uint32_t(lhs->upper()) >> shift);
   1.882 +    }
   1.883 +
   1.884 +    // Otherwise return the most general range after the shift.
   1.885 +    return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
   1.886 +}
   1.887 +
   1.888 +Range *
   1.889 +Range::lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.890 +{
   1.891 +    JS_ASSERT(lhs->isInt32());
   1.892 +    JS_ASSERT(rhs->isInt32());
   1.893 +    return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
   1.894 +}
   1.895 +
   1.896 +Range *
   1.897 +Range::rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.898 +{
   1.899 +    JS_ASSERT(lhs->isInt32());
   1.900 +    JS_ASSERT(rhs->isInt32());
   1.901 +    return Range::NewInt32Range(alloc, Min(lhs->lower(), 0), Max(lhs->upper(), 0));
   1.902 +}
   1.903 +
   1.904 +Range *
   1.905 +Range::ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.906 +{
   1.907 +    // ursh's left operand is uint32, not int32, but for range analysis we
   1.908 +    // currently approximate it as int32. We assume here that the range has
   1.909 +    // already been adjusted accordingly by our callers.
   1.910 +    JS_ASSERT(lhs->isInt32());
   1.911 +    JS_ASSERT(rhs->isInt32());
   1.912 +    return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
   1.913 +}
   1.914 +
   1.915 +Range *
   1.916 +Range::abs(TempAllocator &alloc, const Range *op)
   1.917 +{
   1.918 +    int32_t l = op->lower_;
   1.919 +    int32_t u = op->upper_;
   1.920 +
   1.921 +    return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
   1.922 +                            true,
   1.923 +                            Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
   1.924 +                            op->hasInt32Bounds() && l != INT32_MIN,
   1.925 +                            op->canHaveFractionalPart_,
   1.926 +                            op->max_exponent_);
   1.927 +}
   1.928 +
   1.929 +Range *
   1.930 +Range::min(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.931 +{
   1.932 +    // If either operand is NaN, the result is NaN.
   1.933 +    if (lhs->canBeNaN() || rhs->canBeNaN())
   1.934 +        return nullptr;
   1.935 +
   1.936 +    return new(alloc) Range(Min(lhs->lower_, rhs->lower_),
   1.937 +                            lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
   1.938 +                            Min(lhs->upper_, rhs->upper_),
   1.939 +                            lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
   1.940 +                            lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
   1.941 +                            Max(lhs->max_exponent_, rhs->max_exponent_));
   1.942 +}
   1.943 +
   1.944 +Range *
   1.945 +Range::max(TempAllocator &alloc, const Range *lhs, const Range *rhs)
   1.946 +{
   1.947 +    // If either operand is NaN, the result is NaN.
   1.948 +    if (lhs->canBeNaN() || rhs->canBeNaN())
   1.949 +        return nullptr;
   1.950 +
   1.951 +    return new(alloc) Range(Max(lhs->lower_, rhs->lower_),
   1.952 +                            lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
   1.953 +                            Max(lhs->upper_, rhs->upper_),
   1.954 +                            lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
   1.955 +                            lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
   1.956 +                            Max(lhs->max_exponent_, rhs->max_exponent_));
   1.957 +}
   1.958 +
   1.959 +bool
   1.960 +Range::negativeZeroMul(const Range *lhs, const Range *rhs)
   1.961 +{
   1.962 +    // The result can only be negative zero if both sides are finite and they
   1.963 +    // have differing signs.
   1.964 +    return (lhs->canBeFiniteNegative() && rhs->canBeFiniteNonNegative()) ||
   1.965 +           (rhs->canBeFiniteNegative() && lhs->canBeFiniteNonNegative());
   1.966 +}
   1.967 +
   1.968 +bool
   1.969 +Range::update(const Range *other)
   1.970 +{
   1.971 +    bool changed =
   1.972 +        lower_ != other->lower_ ||
   1.973 +        hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
   1.974 +        upper_ != other->upper_ ||
   1.975 +        hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
   1.976 +        canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
   1.977 +        max_exponent_ != other->max_exponent_;
   1.978 +    if (changed) {
   1.979 +        lower_ = other->lower_;
   1.980 +        hasInt32LowerBound_ = other->hasInt32LowerBound_;
   1.981 +        upper_ = other->upper_;
   1.982 +        hasInt32UpperBound_ = other->hasInt32UpperBound_;
   1.983 +        canHaveFractionalPart_ = other->canHaveFractionalPart_;
   1.984 +        max_exponent_ = other->max_exponent_;
   1.985 +        assertInvariants();
   1.986 +    }
   1.987 +
   1.988 +    return changed;
   1.989 +}
   1.990 +
   1.991 +///////////////////////////////////////////////////////////////////////////////
   1.992 +// Range Computation for MIR Nodes
   1.993 +///////////////////////////////////////////////////////////////////////////////
   1.994 +
   1.995 +void
   1.996 +MPhi::computeRange(TempAllocator &alloc)
   1.997 +{
   1.998 +    if (type() != MIRType_Int32 && type() != MIRType_Double)
   1.999 +        return;
  1.1000 +
  1.1001 +    Range *range = nullptr;
  1.1002 +    for (size_t i = 0, e = numOperands(); i < e; i++) {
  1.1003 +        if (getOperand(i)->block()->unreachable()) {
  1.1004 +            IonSpew(IonSpew_Range, "Ignoring unreachable input %d", getOperand(i)->id());
  1.1005 +            continue;
  1.1006 +        }
  1.1007 +
  1.1008 +        // Peek at the pre-bailout range so we can take a short-cut; if any of
  1.1009 +        // the operands has an unknown range, this phi has an unknown range.
  1.1010 +        if (!getOperand(i)->range())
  1.1011 +            return;
  1.1012 +
  1.1013 +        Range input(getOperand(i));
  1.1014 +
  1.1015 +        if (range)
  1.1016 +            range->unionWith(&input);
  1.1017 +        else
  1.1018 +            range = new(alloc) Range(input);
  1.1019 +    }
  1.1020 +
  1.1021 +    setRange(range);
  1.1022 +}
  1.1023 +
  1.1024 +void
  1.1025 +MBeta::computeRange(TempAllocator &alloc)
  1.1026 +{
  1.1027 +    bool emptyRange = false;
  1.1028 +
  1.1029 +    Range opRange(getOperand(0));
  1.1030 +    Range *range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
  1.1031 +    if (emptyRange) {
  1.1032 +        IonSpew(IonSpew_Range, "Marking block for inst %d unreachable", id());
  1.1033 +        block()->setUnreachable();
  1.1034 +    } else {
  1.1035 +        setRange(range);
  1.1036 +    }
  1.1037 +}
  1.1038 +
  1.1039 +void
  1.1040 +MConstant::computeRange(TempAllocator &alloc)
  1.1041 +{
  1.1042 +    if (value().isNumber()) {
  1.1043 +        double d = value().toNumber();
  1.1044 +        setRange(Range::NewDoubleRange(alloc, d, d));
  1.1045 +    } else if (value().isBoolean()) {
  1.1046 +        bool b = value().toBoolean();
  1.1047 +        setRange(Range::NewInt32Range(alloc, b, b));
  1.1048 +    }
  1.1049 +}
  1.1050 +
  1.1051 +void
  1.1052 +MCharCodeAt::computeRange(TempAllocator &alloc)
  1.1053 +{
  1.1054 +    // ECMA 262 says that the integer will be non-negative and at most 65535.
  1.1055 +    setRange(Range::NewInt32Range(alloc, 0, 65535));
  1.1056 +}
  1.1057 +
  1.1058 +void
  1.1059 +MClampToUint8::computeRange(TempAllocator &alloc)
  1.1060 +{
  1.1061 +    setRange(Range::NewUInt32Range(alloc, 0, 255));
  1.1062 +}
  1.1063 +
  1.1064 +void
  1.1065 +MBitAnd::computeRange(TempAllocator &alloc)
  1.1066 +{
  1.1067 +    Range left(getOperand(0));
  1.1068 +    Range right(getOperand(1));
  1.1069 +    left.wrapAroundToInt32();
  1.1070 +    right.wrapAroundToInt32();
  1.1071 +
  1.1072 +    setRange(Range::and_(alloc, &left, &right));
  1.1073 +}
  1.1074 +
  1.1075 +void
  1.1076 +MBitOr::computeRange(TempAllocator &alloc)
  1.1077 +{
  1.1078 +    Range left(getOperand(0));
  1.1079 +    Range right(getOperand(1));
  1.1080 +    left.wrapAroundToInt32();
  1.1081 +    right.wrapAroundToInt32();
  1.1082 +
  1.1083 +    setRange(Range::or_(alloc, &left, &right));
  1.1084 +}
  1.1085 +
  1.1086 +void
  1.1087 +MBitXor::computeRange(TempAllocator &alloc)
  1.1088 +{
  1.1089 +    Range left(getOperand(0));
  1.1090 +    Range right(getOperand(1));
  1.1091 +    left.wrapAroundToInt32();
  1.1092 +    right.wrapAroundToInt32();
  1.1093 +
  1.1094 +    setRange(Range::xor_(alloc, &left, &right));
  1.1095 +}
  1.1096 +
  1.1097 +void
  1.1098 +MBitNot::computeRange(TempAllocator &alloc)
  1.1099 +{
  1.1100 +    Range op(getOperand(0));
  1.1101 +    op.wrapAroundToInt32();
  1.1102 +
  1.1103 +    setRange(Range::not_(alloc, &op));
  1.1104 +}
  1.1105 +
  1.1106 +void
  1.1107 +MLsh::computeRange(TempAllocator &alloc)
  1.1108 +{
  1.1109 +    Range left(getOperand(0));
  1.1110 +    Range right(getOperand(1));
  1.1111 +    left.wrapAroundToInt32();
  1.1112 +
  1.1113 +    MDefinition *rhs = getOperand(1);
  1.1114 +    if (!rhs->isConstant()) {
  1.1115 +        right.wrapAroundToShiftCount();
  1.1116 +        setRange(Range::lsh(alloc, &left, &right));
  1.1117 +        return;
  1.1118 +    }
  1.1119 +
  1.1120 +    int32_t c = rhs->toConstant()->value().toInt32();
  1.1121 +    setRange(Range::lsh(alloc, &left, c));
  1.1122 +}
  1.1123 +
  1.1124 +void
  1.1125 +MRsh::computeRange(TempAllocator &alloc)
  1.1126 +{
  1.1127 +    Range left(getOperand(0));
  1.1128 +    Range right(getOperand(1));
  1.1129 +    left.wrapAroundToInt32();
  1.1130 +
  1.1131 +    MDefinition *rhs = getOperand(1);
  1.1132 +    if (!rhs->isConstant()) {
  1.1133 +        right.wrapAroundToShiftCount();
  1.1134 +        setRange(Range::rsh(alloc, &left, &right));
  1.1135 +        return;
  1.1136 +    }
  1.1137 +
  1.1138 +    int32_t c = rhs->toConstant()->value().toInt32();
  1.1139 +    setRange(Range::rsh(alloc, &left, c));
  1.1140 +}
  1.1141 +
  1.1142 +void
  1.1143 +MUrsh::computeRange(TempAllocator &alloc)
  1.1144 +{
  1.1145 +    Range left(getOperand(0));
  1.1146 +    Range right(getOperand(1));
  1.1147 +
  1.1148 +    // ursh can be thought of as converting its left operand to uint32, or it
  1.1149 +    // can be thought of as converting its left operand to int32, and then
  1.1150 +    // reinterpreting the int32 bits as a uint32 value. Both approaches yield
  1.1151 +    // the same result. Since we lack support for full uint32 ranges, we use
  1.1152 +    // the second interpretation, though it does cause us to be conservative.
  1.1153 +    left.wrapAroundToInt32();
  1.1154 +    right.wrapAroundToShiftCount();
  1.1155 +
  1.1156 +    MDefinition *rhs = getOperand(1);
  1.1157 +    if (!rhs->isConstant()) {
  1.1158 +        setRange(Range::ursh(alloc, &left, &right));
  1.1159 +    } else {
  1.1160 +        int32_t c = rhs->toConstant()->value().toInt32();
  1.1161 +        setRange(Range::ursh(alloc, &left, c));
  1.1162 +    }
  1.1163 +
  1.1164 +    JS_ASSERT(range()->lower() >= 0);
  1.1165 +}
  1.1166 +
  1.1167 +void
  1.1168 +MAbs::computeRange(TempAllocator &alloc)
  1.1169 +{
  1.1170 +    if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
  1.1171 +        return;
  1.1172 +
  1.1173 +    Range other(getOperand(0));
  1.1174 +    Range *next = Range::abs(alloc, &other);
  1.1175 +    if (implicitTruncate_)
  1.1176 +        next->wrapAroundToInt32();
  1.1177 +    setRange(next);
  1.1178 +}
  1.1179 +
  1.1180 +void
  1.1181 +MMinMax::computeRange(TempAllocator &alloc)
  1.1182 +{
  1.1183 +    if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
  1.1184 +        return;
  1.1185 +
  1.1186 +    Range left(getOperand(0));
  1.1187 +    Range right(getOperand(1));
  1.1188 +    setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right));
  1.1189 +}
  1.1190 +
  1.1191 +void
  1.1192 +MAdd::computeRange(TempAllocator &alloc)
  1.1193 +{
  1.1194 +    if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
  1.1195 +        return;
  1.1196 +    Range left(getOperand(0));
  1.1197 +    Range right(getOperand(1));
  1.1198 +    Range *next = Range::add(alloc, &left, &right);
  1.1199 +    if (isTruncated())
  1.1200 +        next->wrapAroundToInt32();
  1.1201 +    setRange(next);
  1.1202 +}
  1.1203 +
  1.1204 +void
  1.1205 +MSub::computeRange(TempAllocator &alloc)
  1.1206 +{
  1.1207 +    if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
  1.1208 +        return;
  1.1209 +    Range left(getOperand(0));
  1.1210 +    Range right(getOperand(1));
  1.1211 +    Range *next = Range::sub(alloc, &left, &right);
  1.1212 +    if (isTruncated())
  1.1213 +        next->wrapAroundToInt32();
  1.1214 +    setRange(next);
  1.1215 +}
  1.1216 +
  1.1217 +void
  1.1218 +MMul::computeRange(TempAllocator &alloc)
  1.1219 +{
  1.1220 +    if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
  1.1221 +        return;
  1.1222 +    Range left(getOperand(0));
  1.1223 +    Range right(getOperand(1));
  1.1224 +    if (canBeNegativeZero())
  1.1225 +        canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
  1.1226 +    Range *next = Range::mul(alloc, &left, &right);
  1.1227 +    // Truncated multiplications could overflow in both directions
  1.1228 +    if (isTruncated())
  1.1229 +        next->wrapAroundToInt32();
  1.1230 +    setRange(next);
  1.1231 +}
  1.1232 +
  1.1233 +void
  1.1234 +MMod::computeRange(TempAllocator &alloc)
  1.1235 +{
  1.1236 +    if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
  1.1237 +        return;
  1.1238 +    Range lhs(getOperand(0));
  1.1239 +    Range rhs(getOperand(1));
  1.1240 +
  1.1241 +    // If either operand is a NaN, the result is NaN. This also conservatively
  1.1242 +    // handles Infinity cases.
  1.1243 +    if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
  1.1244 +        return;
  1.1245 +
  1.1246 +    // If RHS can be zero, the result can be NaN.
  1.1247 +    if (rhs.lower() <= 0 && rhs.upper() >= 0)
  1.1248 +        return;
  1.1249 +
  1.1250 +    // If both operands are non-negative integers, we can optimize this to an
  1.1251 +    // unsigned mod.
  1.1252 +    if (specialization() == MIRType_Int32 && lhs.lower() >= 0 && rhs.lower() > 0 &&
  1.1253 +        !lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
  1.1254 +    {
  1.1255 +        unsigned_ = true;
  1.1256 +    }
  1.1257 +
  1.1258 +    // For unsigned mod, we have to convert both operands to unsigned.
  1.1259 +    // Note that we handled the case of a zero rhs above.
  1.1260 +    if (unsigned_) {
  1.1261 +        // The result of an unsigned mod will never be unsigned-greater than
  1.1262 +        // either operand.
  1.1263 +        uint32_t lhsBound = Max<uint32_t>(lhs.lower(), lhs.upper());
  1.1264 +        uint32_t rhsBound = Max<uint32_t>(rhs.lower(), rhs.upper());
  1.1265 +
  1.1266 +        // If either range crosses through -1 as a signed value, it could be
  1.1267 +        // the maximum unsigned value when interpreted as unsigned. If the range
  1.1268 +        // doesn't include -1, then the simple max value we computed above is
  1.1269 +        // correct.
  1.1270 +        if (lhs.lower() <= -1 && lhs.upper() >= -1)
  1.1271 +            lhsBound = UINT32_MAX;
  1.1272 +        if (rhs.lower() <= -1 && rhs.upper() >= -1)
  1.1273 +            rhsBound = UINT32_MAX;
  1.1274 +
  1.1275 +        // The result will never be equal to the rhs, and we shouldn't have
  1.1276 +        // any rounding to worry about.
  1.1277 +        JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
  1.1278 +        --rhsBound;
  1.1279 +
  1.1280 +        // This gives us two upper bounds, so we can take the best one.
  1.1281 +        setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
  1.1282 +        return;
  1.1283 +    }
  1.1284 +
  1.1285 +    // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
  1.1286 +    // First, the absolute value of the result will always be less than the
  1.1287 +    // absolute value of rhs. (And if rhs is zero, the result is NaN).
  1.1288 +    int64_t a = Abs<int64_t>(rhs.lower());
  1.1289 +    int64_t b = Abs<int64_t>(rhs.upper());
  1.1290 +    if (a == 0 && b == 0)
  1.1291 +        return;
  1.1292 +    int64_t rhsAbsBound = Max(a, b);
  1.1293 +
  1.1294 +    // If the value is known to be integer, less-than abs(rhs) is equivalent
  1.1295 +    // to less-than-or-equal abs(rhs)-1. This is important for being able to
  1.1296 +    // say that the result of x%256 is an 8-bit unsigned number.
  1.1297 +    if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
  1.1298 +        --rhsAbsBound;
  1.1299 +
  1.1300 +    // Next, the absolute value of the result will never be greater than the
  1.1301 +    // absolute value of lhs.
  1.1302 +    int64_t lhsAbsBound = Max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
  1.1303 +
  1.1304 +    // This gives us two upper bounds, so we can take the best one.
  1.1305 +    int64_t absBound = Min(lhsAbsBound, rhsAbsBound);
  1.1306 +
  1.1307 +    // Now consider the sign of the result.
  1.1308 +    // If lhs is non-negative, the result will be non-negative.
  1.1309 +    // If lhs is non-positive, the result will be non-positive.
  1.1310 +    int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
  1.1311 +    int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
  1.1312 +
  1.1313 +    setRange(new(alloc) Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(),
  1.1314 +                              Min(lhs.exponent(), rhs.exponent())));
  1.1315 +}
  1.1316 +
  1.1317 +void
  1.1318 +MDiv::computeRange(TempAllocator &alloc)
  1.1319 +{
  1.1320 +    if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
  1.1321 +        return;
  1.1322 +    Range lhs(getOperand(0));
  1.1323 +    Range rhs(getOperand(1));
  1.1324 +
  1.1325 +    // If either operand is a NaN, the result is NaN. This also conservatively
  1.1326 +    // handles Infinity cases.
  1.1327 +    if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
  1.1328 +        return;
  1.1329 +
  1.1330 +    // Something simple for now: When dividing by a positive rhs, the result
  1.1331 +    // won't be further from zero than lhs.
  1.1332 +    if (lhs.lower() >= 0 && rhs.lower() >= 1) {
  1.1333 +        setRange(new(alloc) Range(0, lhs.upper(), true, lhs.exponent()));
  1.1334 +    } else if (unsigned_ && rhs.lower() >= 1) {
  1.1335 +        // We shouldn't set the unsigned flag if the inputs can have
  1.1336 +        // fractional parts.
  1.1337 +        JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
  1.1338 +        // Unsigned division by a non-zero rhs will return a uint32 value.
  1.1339 +        setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
  1.1340 +    }
  1.1341 +}
  1.1342 +
  1.1343 +void
  1.1344 +MSqrt::computeRange(TempAllocator &alloc)
  1.1345 +{
  1.1346 +    Range input(getOperand(0));
  1.1347 +
  1.1348 +    // If either operand is a NaN, the result is NaN. This also conservatively
  1.1349 +    // handles Infinity cases.
  1.1350 +    if (!input.hasInt32Bounds())
  1.1351 +        return;
  1.1352 +
  1.1353 +    // Sqrt of a negative non-zero value is NaN.
  1.1354 +    if (input.lower() < 0)
  1.1355 +        return;
  1.1356 +
  1.1357 +    // Something simple for now: When taking the sqrt of a positive value, the
  1.1358 +    // result won't be further from zero than the input.
  1.1359 +    setRange(new(alloc) Range(0, input.upper(), true, input.exponent()));
  1.1360 +}
  1.1361 +
  1.1362 +void
  1.1363 +MToDouble::computeRange(TempAllocator &alloc)
  1.1364 +{
  1.1365 +    setRange(new(alloc) Range(getOperand(0)));
  1.1366 +}
  1.1367 +
  1.1368 +void
  1.1369 +MToFloat32::computeRange(TempAllocator &alloc)
  1.1370 +{
  1.1371 +}
  1.1372 +
  1.1373 +void
  1.1374 +MTruncateToInt32::computeRange(TempAllocator &alloc)
  1.1375 +{
  1.1376 +    Range *output = new(alloc) Range(getOperand(0));
  1.1377 +    output->wrapAroundToInt32();
  1.1378 +    setRange(output);
  1.1379 +}
  1.1380 +
  1.1381 +void
  1.1382 +MToInt32::computeRange(TempAllocator &alloc)
  1.1383 +{
  1.1384 +    Range *output = new(alloc) Range(getOperand(0));
  1.1385 +    output->clampToInt32();
  1.1386 +    setRange(output);
  1.1387 +}
  1.1388 +
  1.1389 +static Range *GetTypedArrayRange(TempAllocator &alloc, int type)
  1.1390 +{
  1.1391 +    switch (type) {
  1.1392 +      case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
  1.1393 +      case ScalarTypeDescr::TYPE_UINT8:
  1.1394 +        return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
  1.1395 +      case ScalarTypeDescr::TYPE_UINT16:
  1.1396 +        return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
  1.1397 +      case ScalarTypeDescr::TYPE_UINT32:
  1.1398 +        return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
  1.1399 +
  1.1400 +      case ScalarTypeDescr::TYPE_INT8:
  1.1401 +        return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
  1.1402 +      case ScalarTypeDescr::TYPE_INT16:
  1.1403 +        return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
  1.1404 +      case ScalarTypeDescr::TYPE_INT32:
  1.1405 +        return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
  1.1406 +
  1.1407 +      case ScalarTypeDescr::TYPE_FLOAT32:
  1.1408 +      case ScalarTypeDescr::TYPE_FLOAT64:
  1.1409 +        break;
  1.1410 +    }
  1.1411 +
  1.1412 +  return nullptr;
  1.1413 +}
  1.1414 +
  1.1415 +void
  1.1416 +MLoadTypedArrayElement::computeRange(TempAllocator &alloc)
  1.1417 +{
  1.1418 +    // We have an Int32 type and if this is a UInt32 load it may produce a value
  1.1419 +    // outside of our range, but we have a bailout to handle those cases.
  1.1420 +    setRange(GetTypedArrayRange(alloc, arrayType()));
  1.1421 +}
  1.1422 +
  1.1423 +void
  1.1424 +MLoadTypedArrayElementStatic::computeRange(TempAllocator &alloc)
  1.1425 +{
  1.1426 +    // We don't currently use MLoadTypedArrayElementStatic for uint32, so we
  1.1427 +    // don't have to worry about it returning a value outside our type.
  1.1428 +    JS_ASSERT(typedArray_->type() != ScalarTypeDescr::TYPE_UINT32);
  1.1429 +
  1.1430 +    setRange(GetTypedArrayRange(alloc, typedArray_->type()));
  1.1431 +}
  1.1432 +
  1.1433 +void
  1.1434 +MArrayLength::computeRange(TempAllocator &alloc)
  1.1435 +{
  1.1436 +    // Array lengths can go up to UINT32_MAX, but we only create MArrayLength
  1.1437 +    // nodes when the value is known to be int32 (see the
  1.1438 +    // OBJECT_FLAG_LENGTH_OVERFLOW flag).
  1.1439 +    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
  1.1440 +}
  1.1441 +
  1.1442 +void
  1.1443 +MInitializedLength::computeRange(TempAllocator &alloc)
  1.1444 +{
  1.1445 +    setRange(Range::NewUInt32Range(alloc, 0, JSObject::NELEMENTS_LIMIT));
  1.1446 +}
  1.1447 +
  1.1448 +void
  1.1449 +MTypedArrayLength::computeRange(TempAllocator &alloc)
  1.1450 +{
  1.1451 +    setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
  1.1452 +}
  1.1453 +
  1.1454 +void
  1.1455 +MStringLength::computeRange(TempAllocator &alloc)
  1.1456 +{
  1.1457 +    static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
  1.1458 +                  "NewUInt32Range requires a uint32 value");
  1.1459 +    setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
  1.1460 +}
  1.1461 +
  1.1462 +void
  1.1463 +MArgumentsLength::computeRange(TempAllocator &alloc)
  1.1464 +{
  1.1465 +    // This is is a conservative upper bound on what |TooManyArguments| checks.
  1.1466 +    // If exceeded, Ion will not be entered in the first place.
  1.1467 +    static_assert(SNAPSHOT_MAX_NARGS <= UINT32_MAX,
  1.1468 +                  "NewUInt32Range requires a uint32 value");
  1.1469 +    setRange(Range::NewUInt32Range(alloc, 0, SNAPSHOT_MAX_NARGS));
  1.1470 +}
  1.1471 +
  1.1472 +void
  1.1473 +MBoundsCheck::computeRange(TempAllocator &alloc)
  1.1474 +{
  1.1475 +    // Just transfer the incoming index range to the output. The length() is
  1.1476 +    // also interesting, but it is handled as a bailout check, and we're
  1.1477 +    // computing a pre-bailout range here.
  1.1478 +    setRange(new(alloc) Range(index()));
  1.1479 +}
  1.1480 +
  1.1481 +void
  1.1482 +MArrayPush::computeRange(TempAllocator &alloc)
  1.1483 +{
  1.1484 +    // MArrayPush returns the new array length.
  1.1485 +    setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
  1.1486 +}
  1.1487 +
  1.1488 +void
  1.1489 +MMathFunction::computeRange(TempAllocator &alloc)
  1.1490 +{
  1.1491 +    Range opRange(getOperand(0));
  1.1492 +    switch (function()) {
  1.1493 +      case Sin:
  1.1494 +      case Cos:
  1.1495 +        if (!opRange.canBeInfiniteOrNaN())
  1.1496 +            setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
  1.1497 +        break;
  1.1498 +      case Sign:
  1.1499 +        if (!opRange.canBeNaN()) {
  1.1500 +            // Note that Math.sign(-0) is -0, and we treat -0 as equal to 0.
  1.1501 +            int32_t lower = -1;
  1.1502 +            int32_t upper = 1;
  1.1503 +            if (opRange.hasInt32LowerBound() && opRange.lower() >= 0)
  1.1504 +                lower = 0;
  1.1505 +            if (opRange.hasInt32UpperBound() && opRange.upper() <= 0)
  1.1506 +                upper = 0;
  1.1507 +            setRange(Range::NewInt32Range(alloc, lower, upper));
  1.1508 +        }
  1.1509 +        break;
  1.1510 +    default:
  1.1511 +        break;
  1.1512 +    }
  1.1513 +}
  1.1514 +
  1.1515 +void
  1.1516 +MRandom::computeRange(TempAllocator &alloc)
  1.1517 +{
  1.1518 +    setRange(Range::NewDoubleRange(alloc, 0.0, 1.0));
  1.1519 +}
  1.1520 +
  1.1521 +///////////////////////////////////////////////////////////////////////////////
  1.1522 +// Range Analysis
  1.1523 +///////////////////////////////////////////////////////////////////////////////
  1.1524 +
  1.1525 +bool
  1.1526 +RangeAnalysis::markBlocksInLoopBody(MBasicBlock *header, MBasicBlock *backedge)
  1.1527 +{
  1.1528 +    Vector<MBasicBlock *, 16, IonAllocPolicy> worklist(alloc());
  1.1529 +
  1.1530 +    // Mark the header as being in the loop. This terminates the walk.
  1.1531 +    header->mark();
  1.1532 +
  1.1533 +    backedge->mark();
  1.1534 +    if (!worklist.append(backedge))
  1.1535 +        return false;
  1.1536 +
  1.1537 +    // If we haven't reached the loop header yet, walk up the predecessors
  1.1538 +    // we haven't seen already.
  1.1539 +    while (!worklist.empty()) {
  1.1540 +        MBasicBlock *current = worklist.popCopy();
  1.1541 +        for (size_t i = 0; i < current->numPredecessors(); i++) {
  1.1542 +            MBasicBlock *pred = current->getPredecessor(i);
  1.1543 +
  1.1544 +            if (pred->isMarked())
  1.1545 +                continue;
  1.1546 +
  1.1547 +            pred->mark();
  1.1548 +            if (!worklist.append(pred))
  1.1549 +                return false;
  1.1550 +        }
  1.1551 +    }
  1.1552 +
  1.1553 +    return true;
  1.1554 +}
  1.1555 +
  1.1556 +bool
  1.1557 +RangeAnalysis::analyzeLoop(MBasicBlock *header)
  1.1558 +{
  1.1559 +    JS_ASSERT(header->hasUniqueBackedge());
  1.1560 +
  1.1561 +    // Try to compute an upper bound on the number of times the loop backedge
  1.1562 +    // will be taken. Look for tests that dominate the backedge and which have
  1.1563 +    // an edge leaving the loop body.
  1.1564 +    MBasicBlock *backedge = header->backedge();
  1.1565 +
  1.1566 +    // Ignore trivial infinite loops.
  1.1567 +    if (backedge == header)
  1.1568 +        return true;
  1.1569 +
  1.1570 +    if (!markBlocksInLoopBody(header, backedge))
  1.1571 +        return false;
  1.1572 +
  1.1573 +    LoopIterationBound *iterationBound = nullptr;
  1.1574 +
  1.1575 +    MBasicBlock *block = backedge;
  1.1576 +    do {
  1.1577 +        BranchDirection direction;
  1.1578 +        MTest *branch = block->immediateDominatorBranch(&direction);
  1.1579 +
  1.1580 +        if (block == block->immediateDominator())
  1.1581 +            break;
  1.1582 +
  1.1583 +        block = block->immediateDominator();
  1.1584 +
  1.1585 +        if (branch) {
  1.1586 +            direction = NegateBranchDirection(direction);
  1.1587 +            MBasicBlock *otherBlock = branch->branchSuccessor(direction);
  1.1588 +            if (!otherBlock->isMarked()) {
  1.1589 +                iterationBound =
  1.1590 +                    analyzeLoopIterationCount(header, branch, direction);
  1.1591 +                if (iterationBound)
  1.1592 +                    break;
  1.1593 +            }
  1.1594 +        }
  1.1595 +    } while (block != header);
  1.1596 +
  1.1597 +    if (!iterationBound) {
  1.1598 +        graph_.unmarkBlocks();
  1.1599 +        return true;
  1.1600 +    }
  1.1601 +
  1.1602 +#ifdef DEBUG
  1.1603 +    if (IonSpewEnabled(IonSpew_Range)) {
  1.1604 +        Sprinter sp(GetIonContext()->cx);
  1.1605 +        sp.init();
  1.1606 +        iterationBound->sum.print(sp);
  1.1607 +        IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s",
  1.1608 +                sp.string());
  1.1609 +    }
  1.1610 +#endif
  1.1611 +
  1.1612 +    // Try to compute symbolic bounds for the phi nodes at the head of this
  1.1613 +    // loop, expressed in terms of the iteration bound just computed.
  1.1614 +
  1.1615 +    for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++)
  1.1616 +        analyzeLoopPhi(header, iterationBound, *iter);
  1.1617 +
  1.1618 +    if (!mir->compilingAsmJS()) {
  1.1619 +        // Try to hoist any bounds checks from the loop using symbolic bounds.
  1.1620 +
  1.1621 +        Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks(alloc());
  1.1622 +
  1.1623 +        for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) {
  1.1624 +            MBasicBlock *block = *iter;
  1.1625 +            if (!block->isMarked())
  1.1626 +                continue;
  1.1627 +
  1.1628 +            for (MDefinitionIterator iter(block); iter; iter++) {
  1.1629 +                MDefinition *def = *iter;
  1.1630 +                if (def->isBoundsCheck() && def->isMovable()) {
  1.1631 +                    if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
  1.1632 +                        if (!hoistedChecks.append(def->toBoundsCheck()))
  1.1633 +                            return false;
  1.1634 +                    }
  1.1635 +                }
  1.1636 +            }
  1.1637 +        }
  1.1638 +
  1.1639 +        // Note: replace all uses of the original bounds check with the
  1.1640 +        // actual index. This is usually done during bounds check elimination,
  1.1641 +        // but in this case it's safe to do it here since the load/store is
  1.1642 +        // definitely not loop-invariant, so we will never move it before
  1.1643 +        // one of the bounds checks we just added.
  1.1644 +        for (size_t i = 0; i < hoistedChecks.length(); i++) {
  1.1645 +            MBoundsCheck *ins = hoistedChecks[i];
  1.1646 +            ins->replaceAllUsesWith(ins->index());
  1.1647 +            ins->block()->discard(ins);
  1.1648 +        }
  1.1649 +    }
  1.1650 +
  1.1651 +    graph_.unmarkBlocks();
  1.1652 +    return true;
  1.1653 +}
  1.1654 +
  1.1655 +LoopIterationBound *
  1.1656 +RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header,
  1.1657 +                                         MTest *test, BranchDirection direction)
  1.1658 +{
  1.1659 +    SimpleLinearSum lhs(nullptr, 0);
  1.1660 +    MDefinition *rhs;
  1.1661 +    bool lessEqual;
  1.1662 +    if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
  1.1663 +        return nullptr;
  1.1664 +
  1.1665 +    // Ensure the rhs is a loop invariant term.
  1.1666 +    if (rhs && rhs->block()->isMarked()) {
  1.1667 +        if (lhs.term && lhs.term->block()->isMarked())
  1.1668 +            return nullptr;
  1.1669 +        MDefinition *temp = lhs.term;
  1.1670 +        lhs.term = rhs;
  1.1671 +        rhs = temp;
  1.1672 +        if (!SafeSub(0, lhs.constant, &lhs.constant))
  1.1673 +            return nullptr;
  1.1674 +        lessEqual = !lessEqual;
  1.1675 +    }
  1.1676 +
  1.1677 +    JS_ASSERT_IF(rhs, !rhs->block()->isMarked());
  1.1678 +
  1.1679 +    // Ensure the lhs is a phi node from the start of the loop body.
  1.1680 +    if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
  1.1681 +        return nullptr;
  1.1682 +
  1.1683 +    // Check that the value of the lhs changes by a constant amount with each
  1.1684 +    // loop iteration. This requires that the lhs be written in every loop
  1.1685 +    // iteration with a value that is a constant difference from its value at
  1.1686 +    // the start of the iteration.
  1.1687 +
  1.1688 +    if (lhs.term->toPhi()->numOperands() != 2)
  1.1689 +        return nullptr;
  1.1690 +
  1.1691 +    // The first operand of the phi should be the lhs' value at the start of
  1.1692 +    // the first executed iteration, and not a value written which could
  1.1693 +    // replace the second operand below during the middle of execution.
  1.1694 +    MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0);
  1.1695 +    if (lhsInitial->block()->isMarked())
  1.1696 +        return nullptr;
  1.1697 +
  1.1698 +    // The second operand of the phi should be a value written by an add/sub
  1.1699 +    // in every loop iteration, i.e. in a block which dominates the backedge.
  1.1700 +    MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1);
  1.1701 +    if (lhsWrite->isBeta())
  1.1702 +        lhsWrite = lhsWrite->getOperand(0);
  1.1703 +    if (!lhsWrite->isAdd() && !lhsWrite->isSub())
  1.1704 +        return nullptr;
  1.1705 +    if (!lhsWrite->block()->isMarked())
  1.1706 +        return nullptr;
  1.1707 +    MBasicBlock *bb = header->backedge();
  1.1708 +    for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {}
  1.1709 +    if (bb != lhsWrite->block())
  1.1710 +        return nullptr;
  1.1711 +
  1.1712 +    SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
  1.1713 +
  1.1714 +    // Check that the value of the lhs at the backedge is of the form
  1.1715 +    // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
  1.1716 +    // of the iteration, and not that written to lhs in a previous iteration,
  1.1717 +    // as such a previous value could not appear directly in the addition:
  1.1718 +    // it could not be stored in lhs as the lhs add/sub executes in every
  1.1719 +    // iteration, and if it were stored in another variable its use here would
  1.1720 +    // be as an operand to a phi node for that variable.
  1.1721 +    if (lhsModified.term != lhs.term)
  1.1722 +        return nullptr;
  1.1723 +
  1.1724 +    LinearSum bound(alloc());
  1.1725 +
  1.1726 +    if (lhsModified.constant == 1 && !lessEqual) {
  1.1727 +        // The value of lhs is 'initial(lhs) + iterCount' and this will end
  1.1728 +        // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
  1.1729 +        // on the number of backedges executed is:
  1.1730 +        //
  1.1731 +        // initial(lhs) + iterCount + lhsN == rhs
  1.1732 +        // iterCount == rhsN - initial(lhs) - lhsN
  1.1733 +
  1.1734 +        if (rhs) {
  1.1735 +            if (!bound.add(rhs, 1))
  1.1736 +                return nullptr;
  1.1737 +        }
  1.1738 +        if (!bound.add(lhsInitial, -1))
  1.1739 +            return nullptr;
  1.1740 +
  1.1741 +        int32_t lhsConstant;
  1.1742 +        if (!SafeSub(0, lhs.constant, &lhsConstant))
  1.1743 +            return nullptr;
  1.1744 +        if (!bound.add(lhsConstant))
  1.1745 +            return nullptr;
  1.1746 +    } else if (lhsModified.constant == -1 && lessEqual) {
  1.1747 +        // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
  1.1748 +        // case, an upper bound on the number of backedges executed is:
  1.1749 +        //
  1.1750 +        // initial(lhs) - iterCount + lhsN == rhs
  1.1751 +        // iterCount == initial(lhs) - rhs + lhsN
  1.1752 +
  1.1753 +        if (!bound.add(lhsInitial, 1))
  1.1754 +            return nullptr;
  1.1755 +        if (rhs) {
  1.1756 +            if (!bound.add(rhs, -1))
  1.1757 +                return nullptr;
  1.1758 +        }
  1.1759 +        if (!bound.add(lhs.constant))
  1.1760 +            return nullptr;
  1.1761 +    } else {
  1.1762 +        return nullptr;
  1.1763 +    }
  1.1764 +
  1.1765 +    return new(alloc()) LoopIterationBound(header, test, bound);
  1.1766 +}
  1.1767 +
  1.1768 +void
  1.1769 +RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi)
  1.1770 +{
  1.1771 +    // Given a bound on the number of backedges taken, compute an upper and
  1.1772 +    // lower bound for a phi node that may change by a constant amount each
  1.1773 +    // iteration. Unlike for the case when computing the iteration bound
  1.1774 +    // itself, the phi does not need to change the same amount every iteration,
  1.1775 +    // but is required to change at most N and be either nondecreasing or
  1.1776 +    // nonincreasing.
  1.1777 +
  1.1778 +    JS_ASSERT(phi->numOperands() == 2);
  1.1779 +
  1.1780 +    MBasicBlock *preLoop = header->loopPredecessor();
  1.1781 +    JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header);
  1.1782 +
  1.1783 +    MBasicBlock *backedge = header->backedge();
  1.1784 +    JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header);
  1.1785 +
  1.1786 +    MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor());
  1.1787 +    if (initial->block()->isMarked())
  1.1788 +        return;
  1.1789 +
  1.1790 +    SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));
  1.1791 +
  1.1792 +    if (modified.term != phi || modified.constant == 0)
  1.1793 +        return;
  1.1794 +
  1.1795 +    if (!phi->range())
  1.1796 +        phi->setRange(new(alloc()) Range());
  1.1797 +
  1.1798 +    LinearSum initialSum(alloc());
  1.1799 +    if (!initialSum.add(initial, 1))
  1.1800 +        return;
  1.1801 +
  1.1802 +    // The phi may change by N each iteration, and is either nondecreasing or
  1.1803 +    // nonincreasing. initial(phi) is either a lower or upper bound for the
  1.1804 +    // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
  1.1805 +    // at all points within the loop, provided that loopBound >= 0.
  1.1806 +    //
  1.1807 +    // We are more interested, however, in the bound for phi at points
  1.1808 +    // dominated by the loop bound's test; if the test dominates e.g. a bounds
  1.1809 +    // check we want to hoist from the loop, using the value of the phi at the
  1.1810 +    // head of the loop for this will usually be too imprecise to hoist the
  1.1811 +    // check. These points will execute only if the backedge executes at least
  1.1812 +    // one more time (as the test passed and the test dominates the backedge),
  1.1813 +    // so we know both that loopBound >= 1 and that the phi's value has changed
  1.1814 +    // at most loopBound - 1 times. Thus, another upper or lower bound for the
  1.1815 +    // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
  1.1816 +    // ensure that loopBound >= 0.
  1.1817 +
  1.1818 +    LinearSum limitSum(loopBound->sum);
  1.1819 +    if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
  1.1820 +        return;
  1.1821 +
  1.1822 +    int32_t negativeConstant;
  1.1823 +    if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
  1.1824 +        return;
  1.1825 +
  1.1826 +    Range *initRange = initial->range();
  1.1827 +    if (modified.constant > 0) {
  1.1828 +        if (initRange && initRange->hasInt32LowerBound())
  1.1829 +            phi->range()->refineLower(initRange->lower());
  1.1830 +        phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum));
  1.1831 +        phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum));
  1.1832 +    } else {
  1.1833 +        if (initRange && initRange->hasInt32UpperBound())
  1.1834 +            phi->range()->refineUpper(initRange->upper());
  1.1835 +        phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum));
  1.1836 +        phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum));
  1.1837 +    }
  1.1838 +
  1.1839 +    IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
  1.1840 +    SpewRange(phi);
  1.1841 +}
  1.1842 +
  1.1843 +// Whether bound is valid at the specified bounds check instruction in a loop,
  1.1844 +// and may be used to hoist ins.
  1.1845 +static inline bool
  1.1846 +SymbolicBoundIsValid(MBasicBlock *header, MBoundsCheck *ins, const SymbolicBound *bound)
  1.1847 +{
  1.1848 +    if (!bound->loop)
  1.1849 +        return true;
  1.1850 +    if (ins->block() == header)
  1.1851 +        return false;
  1.1852 +    MBasicBlock *bb = ins->block()->immediateDominator();
  1.1853 +    while (bb != header && bb != bound->loop->test->block())
  1.1854 +        bb = bb->immediateDominator();
  1.1855 +    return bb == bound->loop->test->block();
  1.1856 +}
  1.1857 +
  1.1858 +// Convert all components of a linear sum *except* its constant to a definition,
  1.1859 +// adding any necessary instructions to the end of block.
  1.1860 +static inline MDefinition *
  1.1861 +ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum)
  1.1862 +{
  1.1863 +    MDefinition *def = nullptr;
  1.1864 +
  1.1865 +    for (size_t i = 0; i < sum.numTerms(); i++) {
  1.1866 +        LinearTerm term = sum.term(i);
  1.1867 +        JS_ASSERT(!term.term->isConstant());
  1.1868 +        if (term.scale == 1) {
  1.1869 +            if (def) {
  1.1870 +                def = MAdd::New(alloc, def, term.term);
  1.1871 +                def->toAdd()->setInt32();
  1.1872 +                block->insertBefore(block->lastIns(), def->toInstruction());
  1.1873 +                def->computeRange(alloc);
  1.1874 +            } else {
  1.1875 +                def = term.term;
  1.1876 +            }
  1.1877 +        } else if (term.scale == -1) {
  1.1878 +            if (!def) {
  1.1879 +                def = MConstant::New(alloc, Int32Value(0));
  1.1880 +                block->insertBefore(block->lastIns(), def->toInstruction());
  1.1881 +                def->computeRange(alloc);
  1.1882 +            }
  1.1883 +            def = MSub::New(alloc, def, term.term);
  1.1884 +            def->toSub()->setInt32();
  1.1885 +            block->insertBefore(block->lastIns(), def->toInstruction());
  1.1886 +            def->computeRange(alloc);
  1.1887 +        } else {
  1.1888 +            JS_ASSERT(term.scale != 0);
  1.1889 +            MConstant *factor = MConstant::New(alloc, Int32Value(term.scale));
  1.1890 +            block->insertBefore(block->lastIns(), factor);
  1.1891 +            MMul *mul = MMul::New(alloc, term.term, factor);
  1.1892 +            mul->setInt32();
  1.1893 +            block->insertBefore(block->lastIns(), mul);
  1.1894 +            mul->computeRange(alloc);
  1.1895 +            if (def) {
  1.1896 +                def = MAdd::New(alloc, def, mul);
  1.1897 +                def->toAdd()->setInt32();
  1.1898 +                block->insertBefore(block->lastIns(), def->toInstruction());
  1.1899 +                def->computeRange(alloc);
  1.1900 +            } else {
  1.1901 +                def = mul;
  1.1902 +            }
  1.1903 +        }
  1.1904 +    }
  1.1905 +
  1.1906 +    if (!def) {
  1.1907 +        def = MConstant::New(alloc, Int32Value(0));
  1.1908 +        block->insertBefore(block->lastIns(), def->toInstruction());
  1.1909 +        def->computeRange(alloc);
  1.1910 +    }
  1.1911 +
  1.1912 +    return def;
  1.1913 +}
  1.1914 +
  1.1915 +bool
  1.1916 +RangeAnalysis::tryHoistBoundsCheck(MBasicBlock *header, MBoundsCheck *ins)
  1.1917 +{
  1.1918 +    // The bounds check's length must be loop invariant.
  1.1919 +    if (ins->length()->block()->isMarked())
  1.1920 +        return false;
  1.1921 +
  1.1922 +    // The bounds check's index should not be loop invariant (else we would
  1.1923 +    // already have hoisted it during LICM).
  1.1924 +    SimpleLinearSum index = ExtractLinearSum(ins->index());
  1.1925 +    if (!index.term || !index.term->block()->isMarked())
  1.1926 +        return false;
  1.1927 +
  1.1928 +    // Check for a symbolic lower and upper bound on the index. If either
  1.1929 +    // condition depends on an iteration bound for the loop, only hoist if
  1.1930 +    // the bounds check is dominated by the iteration bound's test.
  1.1931 +    if (!index.term->range())
  1.1932 +        return false;
  1.1933 +    const SymbolicBound *lower = index.term->range()->symbolicLower();
  1.1934 +    if (!lower || !SymbolicBoundIsValid(header, ins, lower))
  1.1935 +        return false;
  1.1936 +    const SymbolicBound *upper = index.term->range()->symbolicUpper();
  1.1937 +    if (!upper || !SymbolicBoundIsValid(header, ins, upper))
  1.1938 +        return false;
  1.1939 +
  1.1940 +    MBasicBlock *preLoop = header->loopPredecessor();
  1.1941 +    JS_ASSERT(!preLoop->isMarked());
  1.1942 +
  1.1943 +    MDefinition *lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum);
  1.1944 +    if (!lowerTerm)
  1.1945 +        return false;
  1.1946 +
  1.1947 +    MDefinition *upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum);
  1.1948 +    if (!upperTerm)
  1.1949 +        return false;
  1.1950 +
  1.1951 +    // We are checking that index + indexConstant >= 0, and know that
  1.1952 +    // index >= lowerTerm + lowerConstant. Thus, check that:
  1.1953 +    //
  1.1954 +    // lowerTerm + lowerConstant + indexConstant >= 0
  1.1955 +    // lowerTerm >= -lowerConstant - indexConstant
  1.1956 +
  1.1957 +    int32_t lowerConstant = 0;
  1.1958 +    if (!SafeSub(lowerConstant, index.constant, &lowerConstant))
  1.1959 +        return false;
  1.1960 +    if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant))
  1.1961 +        return false;
  1.1962 +
  1.1963 +    // We are checking that index < boundsLength, and know that
  1.1964 +    // index <= upperTerm + upperConstant. Thus, check that:
  1.1965 +    //
  1.1966 +    // upperTerm + upperConstant < boundsLength
  1.1967 +
  1.1968 +    int32_t upperConstant = index.constant;
  1.1969 +    if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant))
  1.1970 +        return false;
  1.1971 +
  1.1972 +    MBoundsCheckLower *lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
  1.1973 +    lowerCheck->setMinimum(lowerConstant);
  1.1974 +
  1.1975 +    MBoundsCheck *upperCheck = MBoundsCheck::New(alloc(), upperTerm, ins->length());
  1.1976 +    upperCheck->setMinimum(upperConstant);
  1.1977 +    upperCheck->setMaximum(upperConstant);
  1.1978 +
  1.1979 +    // Hoist the loop invariant upper and lower bounds checks.
  1.1980 +    preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
  1.1981 +    preLoop->insertBefore(preLoop->lastIns(), upperCheck);
  1.1982 +
  1.1983 +    return true;
  1.1984 +}
  1.1985 +
  1.1986 +bool
  1.1987 +RangeAnalysis::analyze()
  1.1988 +{
  1.1989 +    IonSpew(IonSpew_Range, "Doing range propagation");
  1.1990 +
  1.1991 +    for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
  1.1992 +        MBasicBlock *block = *iter;
  1.1993 +
  1.1994 +        if (block->unreachable())
  1.1995 +            continue;
  1.1996 +
  1.1997 +        for (MDefinitionIterator iter(block); iter; iter++) {
  1.1998 +            MDefinition *def = *iter;
  1.1999 +
  1.2000 +            def->computeRange(alloc());
  1.2001 +            IonSpew(IonSpew_Range, "computing range on %d", def->id());
  1.2002 +            SpewRange(def);
  1.2003 +        }
  1.2004 +
  1.2005 +        if (block->isLoopHeader()) {
  1.2006 +            if (!analyzeLoop(block))
  1.2007 +                return false;
  1.2008 +        }
  1.2009 +
  1.2010 +        // First pass at collecting range info - while the beta nodes are still
  1.2011 +        // around and before truncation.
  1.2012 +        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
  1.2013 +            iter->collectRangeInfoPreTrunc();
  1.2014 +
  1.2015 +            // Would have been nice to implement this using collectRangeInfoPreTrunc()
  1.2016 +            // methods but it needs the minAsmJSHeapLength().
  1.2017 +            if (mir->compilingAsmJS()) {
  1.2018 +                uint32_t minHeapLength = mir->minAsmJSHeapLength();
  1.2019 +                if (iter->isAsmJSLoadHeap()) {
  1.2020 +                    MAsmJSLoadHeap *ins = iter->toAsmJSLoadHeap();
  1.2021 +                    Range *range = ins->ptr()->range();
  1.2022 +                    if (range && range->hasInt32LowerBound() && range->lower() >= 0 &&
  1.2023 +                        range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) {
  1.2024 +                        ins->setSkipBoundsCheck(true);
  1.2025 +                    }
  1.2026 +                } else if (iter->isAsmJSStoreHeap()) {
  1.2027 +                    MAsmJSStoreHeap *ins = iter->toAsmJSStoreHeap();
  1.2028 +                    Range *range = ins->ptr()->range();
  1.2029 +                    if (range && range->hasInt32LowerBound() && range->lower() >= 0 &&
  1.2030 +                        range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) {
  1.2031 +                        ins->setSkipBoundsCheck(true);
  1.2032 +                    }
  1.2033 +                }
  1.2034 +            }
  1.2035 +        }
  1.2036 +    }
  1.2037 +
  1.2038 +    return true;
  1.2039 +}
  1.2040 +
  1.2041 +bool
  1.2042 +RangeAnalysis::addRangeAssertions()
  1.2043 +{
  1.2044 +    if (!js_JitOptions.checkRangeAnalysis)
  1.2045 +        return true;
  1.2046 +
  1.2047 +    // Check the computed range for this instruction, if the option is set. Note
  1.2048 +    // that this code is quite invasive; it adds numerous additional
  1.2049 +    // instructions for each MInstruction with a computed range, and it uses
  1.2050 +    // registers, so it also affects register allocation.
  1.2051 +    for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
  1.2052 +        MBasicBlock *block = *iter;
  1.2053 +
  1.2054 +        for (MDefinitionIterator iter(block); iter; iter++) {
  1.2055 +            MDefinition *ins = *iter;
  1.2056 +
  1.2057 +            // Perform range checking for all numeric and numeric-like types.
  1.2058 +            if (!IsNumberType(ins->type()) &&
  1.2059 +                ins->type() != MIRType_Boolean &&
  1.2060 +                ins->type() != MIRType_Value)
  1.2061 +            {
  1.2062 +                continue;
  1.2063 +            }
  1.2064 +
  1.2065 +            Range r(ins);
  1.2066 +
  1.2067 +            // Don't insert assertions if there's nothing interesting to assert.
  1.2068 +            if (r.isUnknown() || (ins->type() == MIRType_Int32 && r.isUnknownInt32()))
  1.2069 +                continue;
  1.2070 +
  1.2071 +            MAssertRange *guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r));
  1.2072 +
  1.2073 +            // Beta nodes and interrupt checks are required to be located at the
  1.2074 +            // beginnings of basic blocks, so we must insert range assertions
  1.2075 +            // after any such instructions.
  1.2076 +            MInstructionIterator insertIter = ins->isPhi()
  1.2077 +                                            ? block->begin()
  1.2078 +                                            : block->begin(ins->toInstruction());
  1.2079 +            while (insertIter->isBeta() ||
  1.2080 +                   insertIter->isInterruptCheck() ||
  1.2081 +                   insertIter->isInterruptCheckPar() ||
  1.2082 +                   insertIter->isConstant())
  1.2083 +            {
  1.2084 +                insertIter++;
  1.2085 +            }
  1.2086 +
  1.2087 +            if (*insertIter == *iter)
  1.2088 +                block->insertAfter(*insertIter,  guard);
  1.2089 +            else
  1.2090 +                block->insertBefore(*insertIter, guard);
  1.2091 +        }
  1.2092 +    }
  1.2093 +
  1.2094 +    return true;
  1.2095 +}
  1.2096 +
  1.2097 +///////////////////////////////////////////////////////////////////////////////
  1.2098 +// Range based Truncation
  1.2099 +///////////////////////////////////////////////////////////////////////////////
  1.2100 +
  1.2101 +void
  1.2102 +Range::clampToInt32()
  1.2103 +{
  1.2104 +    if (isInt32())
  1.2105 +        return;
  1.2106 +    int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
  1.2107 +    int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
  1.2108 +    setInt32(l, h);
  1.2109 +}
  1.2110 +
  1.2111 +void
  1.2112 +Range::wrapAroundToInt32()
  1.2113 +{
  1.2114 +    if (!hasInt32Bounds()) {
  1.2115 +        setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
  1.2116 +    } else if (canHaveFractionalPart()) {
  1.2117 +        canHaveFractionalPart_ = false;
  1.2118 +
  1.2119 +        // Clearing the fractional field may provide an opportunity to refine
  1.2120 +        // lower_ or upper_.
  1.2121 +        refineInt32BoundsByExponent(max_exponent_, &lower_, &upper_);
  1.2122 +
  1.2123 +        assertInvariants();
  1.2124 +    }
  1.2125 +}
  1.2126 +
  1.2127 +void
  1.2128 +Range::wrapAroundToShiftCount()
  1.2129 +{
  1.2130 +    wrapAroundToInt32();
  1.2131 +    if (lower() < 0 || upper() >= 32)
  1.2132 +        setInt32(0, 31);
  1.2133 +}
  1.2134 +
  1.2135 +void
  1.2136 +Range::wrapAroundToBoolean()
  1.2137 +{
  1.2138 +    wrapAroundToInt32();
  1.2139 +    if (!isBoolean())
  1.2140 +        setInt32(0, 1);
  1.2141 +}
  1.2142 +
  1.2143 +bool
  1.2144 +MDefinition::truncate()
  1.2145 +{
  1.2146 +    // No procedure defined for truncating this instruction.
  1.2147 +    return false;
  1.2148 +}
  1.2149 +
  1.2150 +bool
  1.2151 +MConstant::truncate()
  1.2152 +{
  1.2153 +    if (!value_.isDouble())
  1.2154 +        return false;
  1.2155 +
  1.2156 +    // Truncate the double to int, since all uses truncates it.
  1.2157 +    int32_t res = ToInt32(value_.toDouble());
  1.2158 +    value_.setInt32(res);
  1.2159 +    setResultType(MIRType_Int32);
  1.2160 +    if (range())
  1.2161 +        range()->setInt32(res, res);
  1.2162 +    return true;
  1.2163 +}
  1.2164 +
  1.2165 +bool
  1.2166 +MAdd::truncate()
  1.2167 +{
  1.2168 +    // Remember analysis, needed for fallible checks.
  1.2169 +    setTruncated(true);
  1.2170 +
  1.2171 +    if (type() == MIRType_Double || type() == MIRType_Int32) {
  1.2172 +        specialization_ = MIRType_Int32;
  1.2173 +        setResultType(MIRType_Int32);
  1.2174 +        if (range())
  1.2175 +            range()->wrapAroundToInt32();
  1.2176 +        return true;
  1.2177 +    }
  1.2178 +
  1.2179 +    return false;
  1.2180 +}
  1.2181 +
  1.2182 +bool
  1.2183 +MSub::truncate()
  1.2184 +{
  1.2185 +    // Remember analysis, needed for fallible checks.
  1.2186 +    setTruncated(true);
  1.2187 +
  1.2188 +    if (type() == MIRType_Double || type() == MIRType_Int32) {
  1.2189 +        specialization_ = MIRType_Int32;
  1.2190 +        setResultType(MIRType_Int32);
  1.2191 +        if (range())
  1.2192 +            range()->wrapAroundToInt32();
  1.2193 +        return true;
  1.2194 +    }
  1.2195 +
  1.2196 +    return false;
  1.2197 +}
  1.2198 +
  1.2199 +bool
  1.2200 +MMul::truncate()
  1.2201 +{
  1.2202 +    // Remember analysis, needed to remove negative zero checks.
  1.2203 +    setTruncated(true);
  1.2204 +
  1.2205 +    if (type() == MIRType_Double || type() == MIRType_Int32) {
  1.2206 +        specialization_ = MIRType_Int32;
  1.2207 +        setResultType(MIRType_Int32);
  1.2208 +        setCanBeNegativeZero(false);
  1.2209 +        if (range())
  1.2210 +            range()->wrapAroundToInt32();
  1.2211 +        return true;
  1.2212 +    }
  1.2213 +
  1.2214 +    return false;
  1.2215 +}
  1.2216 +
  1.2217 +bool
  1.2218 +MDiv::truncate()
  1.2219 +{
  1.2220 +    // Remember analysis, needed to remove negative zero checks.
  1.2221 +    setTruncatedIndirectly(true);
  1.2222 +
  1.2223 +    // Check if this division only flows in bitwise instructions.
  1.2224 +    if (!isTruncated()) {
  1.2225 +        bool allUsesExplictlyTruncate = true;
  1.2226 +        for (MUseDefIterator use(this); allUsesExplictlyTruncate && use; use++) {
  1.2227 +            switch (use.def()->op()) {
  1.2228 +              case MDefinition::Op_BitAnd:
  1.2229 +              case MDefinition::Op_BitOr:
  1.2230 +              case MDefinition::Op_BitXor:
  1.2231 +              case MDefinition::Op_Lsh:
  1.2232 +              case MDefinition::Op_Rsh:
  1.2233 +              case MDefinition::Op_Ursh:
  1.2234 +                break;
  1.2235 +              default:
  1.2236 +                allUsesExplictlyTruncate = false;
  1.2237 +            }
  1.2238 +        }
  1.2239 +
  1.2240 +        if (allUsesExplictlyTruncate)
  1.2241 +            setTruncated(true);
  1.2242 +    }
  1.2243 +
  1.2244 +    // Divisions where the lhs and rhs are unsigned and the result is
  1.2245 +    // truncated can be lowered more efficiently.
  1.2246 +    if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) {
  1.2247 +        unsigned_ = true;
  1.2248 +        return true;
  1.2249 +    }
  1.2250 +
  1.2251 +    // No modifications.
  1.2252 +    return false;
  1.2253 +}
  1.2254 +
  1.2255 +bool
  1.2256 +MMod::truncate()
  1.2257 +{
  1.2258 +    // Remember analysis, needed to remove negative zero checks.
  1.2259 +    setTruncated(true);
  1.2260 +
  1.2261 +    // As for division, handle unsigned modulus with a truncated result.
  1.2262 +    if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) {
  1.2263 +        unsigned_ = true;
  1.2264 +        return true;
  1.2265 +    }
  1.2266 +
  1.2267 +    // No modifications.
  1.2268 +    return false;
  1.2269 +}
  1.2270 +
  1.2271 +bool
  1.2272 +MToDouble::truncate()
  1.2273 +{
  1.2274 +    JS_ASSERT(type() == MIRType_Double);
  1.2275 +
  1.2276 +    // We use the return type to flag that this MToDouble should be replaced by
  1.2277 +    // a MTruncateToInt32 when modifying the graph.
  1.2278 +    setResultType(MIRType_Int32);
  1.2279 +    if (range())
  1.2280 +        range()->wrapAroundToInt32();
  1.2281 +
  1.2282 +    return true;
  1.2283 +}
  1.2284 +
  1.2285 +bool
  1.2286 +MLoadTypedArrayElementStatic::truncate()
  1.2287 +{
  1.2288 +    setInfallible();
  1.2289 +    return false;
  1.2290 +}
  1.2291 +
  1.2292 +bool
  1.2293 +MDefinition::isOperandTruncated(size_t index) const
  1.2294 +{
  1.2295 +    return false;
  1.2296 +}
  1.2297 +
  1.2298 +bool
  1.2299 +MTruncateToInt32::isOperandTruncated(size_t index) const
  1.2300 +{
  1.2301 +    return true;
  1.2302 +}
  1.2303 +
  1.2304 +bool
  1.2305 +MBinaryBitwiseInstruction::isOperandTruncated(size_t index) const
  1.2306 +{
  1.2307 +    return true;
  1.2308 +}
  1.2309 +
  1.2310 +bool
  1.2311 +MAdd::isOperandTruncated(size_t index) const
  1.2312 +{
  1.2313 +    return isTruncated();
  1.2314 +}
  1.2315 +
  1.2316 +bool
  1.2317 +MSub::isOperandTruncated(size_t index) const
  1.2318 +{
  1.2319 +    return isTruncated();
  1.2320 +}
  1.2321 +
  1.2322 +bool
  1.2323 +MMul::isOperandTruncated(size_t index) const
  1.2324 +{
  1.2325 +    return isTruncated();
  1.2326 +}
  1.2327 +
  1.2328 +bool
  1.2329 +MToDouble::isOperandTruncated(size_t index) const
  1.2330 +{
  1.2331 +    // The return type is used to flag that we are replacing this Double by a
  1.2332 +    // Truncate of its operand if needed.
  1.2333 +    return type() == MIRType_Int32;
  1.2334 +}
  1.2335 +
  1.2336 +bool
  1.2337 +MStoreTypedArrayElement::isOperandTruncated(size_t index) const
  1.2338 +{
  1.2339 +    return index == 2 && !isFloatArray();
  1.2340 +}
  1.2341 +
  1.2342 +bool
  1.2343 +MStoreTypedArrayElementHole::isOperandTruncated(size_t index) const
  1.2344 +{
  1.2345 +    return index == 3 && !isFloatArray();
  1.2346 +}
  1.2347 +
  1.2348 +bool
  1.2349 +MStoreTypedArrayElementStatic::isOperandTruncated(size_t index) const
  1.2350 +{
  1.2351 +    return index == 1 && !isFloatArray();
  1.2352 +}
  1.2353 +
  1.2354 +bool
  1.2355 +MCompare::truncate()
  1.2356 +{
  1.2357 +    if (!isDoubleComparison())
  1.2358 +        return false;
  1.2359 +
  1.2360 +    // If both operands are naturally in the int32 range, we can convert from
  1.2361 +    // a double comparison to being an int32 comparison.
  1.2362 +    if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32())
  1.2363 +        return false;
  1.2364 +
  1.2365 +    compareType_ = Compare_Int32;
  1.2366 +
  1.2367 +    // Truncating the operands won't change their value, but it will change
  1.2368 +    // their type, which we need because we now expect integer inputs.
  1.2369 +    truncateOperands_ = true;
  1.2370 +
  1.2371 +    return true;
  1.2372 +}
  1.2373 +
  1.2374 +bool
  1.2375 +MCompare::isOperandTruncated(size_t index) const
  1.2376 +{
  1.2377 +    // If we're doing an int32 comparison on operands which were previously
  1.2378 +    // floating-point, convert them!
  1.2379 +    JS_ASSERT_IF(truncateOperands_, isInt32Comparison());
  1.2380 +    return truncateOperands_;
  1.2381 +}
  1.2382 +
  1.2383 +// Ensure that all observables uses can work with a truncated
  1.2384 +// version of the |candidate|'s result.
  1.2385 +static bool
  1.2386 +AllUsesTruncate(MInstruction *candidate)
  1.2387 +{
  1.2388 +    // If the value naturally produces an int32 value (before bailout checks)
  1.2389 +    // that needs no conversion, we don't have to worry about resume points
  1.2390 +    // seeing truncated values.
  1.2391 +    bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
  1.2392 +
  1.2393 +    for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd(); use++) {
  1.2394 +        if (!use->consumer()->isDefinition()) {
  1.2395 +            // We can only skip testing resume points, if all original uses are
  1.2396 +            // still present, or if the value does not need conversion.
  1.2397 +            // Otherwise a branch removed by UCE might rely on the non-truncated
  1.2398 +            // value, and any bailout with a truncated value might lead an
  1.2399 +            // incorrect value.
  1.2400 +            if (candidate->isUseRemoved() && needsConversion)
  1.2401 +                return false;
  1.2402 +            continue;
  1.2403 +        }
  1.2404 +
  1.2405 +        if (!use->consumer()->toDefinition()->isOperandTruncated(use->index()))
  1.2406 +            return false;
  1.2407 +    }
  1.2408 +
  1.2409 +    return true;
  1.2410 +}
  1.2411 +
  1.2412 +static bool
  1.2413 +CanTruncate(MInstruction *candidate)
  1.2414 +{
  1.2415 +    // Compare operations might coerce its inputs to int32 if the ranges are
  1.2416 +    // correct.  So we do not need to check if all uses are coerced.
  1.2417 +    if (candidate->isCompare())
  1.2418 +        return true;
  1.2419 +
  1.2420 +    // Set truncated flag if range analysis ensure that it has no
  1.2421 +    // rounding errors and no fractional part. Note that we can't use
  1.2422 +    // the MDefinition Range constructor, because we need to know if
  1.2423 +    // the value will have rounding errors before any bailout checks.
  1.2424 +    const Range *r = candidate->range();
  1.2425 +    bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
  1.2426 +
  1.2427 +    // Special case integer division: the result of a/b can be infinite
  1.2428 +    // but cannot actually have rounding errors induced by truncation.
  1.2429 +    if (candidate->isDiv() && candidate->toDiv()->specialization() == MIRType_Int32)
  1.2430 +        canHaveRoundingErrors = false;
  1.2431 +
  1.2432 +    if (canHaveRoundingErrors)
  1.2433 +        return false;
  1.2434 +
  1.2435 +    // Ensure all observable uses are truncated.
  1.2436 +    return AllUsesTruncate(candidate);
  1.2437 +}
  1.2438 +
  1.2439 +static void
  1.2440 +RemoveTruncatesOnOutput(MInstruction *truncated)
  1.2441 +{
  1.2442 +    // Compare returns a boolean so it doen't have any output truncates.
  1.2443 +    if (truncated->isCompare())
  1.2444 +        return;
  1.2445 +
  1.2446 +    JS_ASSERT(truncated->type() == MIRType_Int32);
  1.2447 +    JS_ASSERT(Range(truncated).isInt32());
  1.2448 +
  1.2449 +    for (MUseDefIterator use(truncated); use; use++) {
  1.2450 +        MDefinition *def = use.def();
  1.2451 +        if (!def->isTruncateToInt32() || !def->isToInt32())
  1.2452 +            continue;
  1.2453 +
  1.2454 +        def->replaceAllUsesWith(truncated);
  1.2455 +    }
  1.2456 +}
  1.2457 +
  1.2458 +static void
  1.2459 +AdjustTruncatedInputs(TempAllocator &alloc, MInstruction *truncated)
  1.2460 +{
  1.2461 +    MBasicBlock *block = truncated->block();
  1.2462 +    for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
  1.2463 +        if (!truncated->isOperandTruncated(i))
  1.2464 +            continue;
  1.2465 +
  1.2466 +        MDefinition *input = truncated->getOperand(i);
  1.2467 +        if (input->type() == MIRType_Int32)
  1.2468 +            continue;
  1.2469 +
  1.2470 +        if (input->isToDouble() && input->getOperand(0)->type() == MIRType_Int32) {
  1.2471 +            JS_ASSERT(input->range()->isInt32());
  1.2472 +            truncated->replaceOperand(i, input->getOperand(0));
  1.2473 +        } else {
  1.2474 +            MTruncateToInt32 *op = MTruncateToInt32::New(alloc, truncated->getOperand(i));
  1.2475 +            block->insertBefore(truncated, op);
  1.2476 +            truncated->replaceOperand(i, op);
  1.2477 +        }
  1.2478 +    }
  1.2479 +
  1.2480 +    if (truncated->isToDouble()) {
  1.2481 +        truncated->replaceAllUsesWith(truncated->getOperand(0));
  1.2482 +        block->discard(truncated);
  1.2483 +    }
  1.2484 +}
  1.2485 +
  1.2486 +// Iterate backward on all instruction and attempt to truncate operations for
  1.2487 +// each instruction which respect the following list of predicates: Has been
  1.2488 +// analyzed by range analysis, the range has no rounding errors, all uses cases
  1.2489 +// are truncating the result.
  1.2490 +//
  1.2491 +// If the truncation of the operation is successful, then the instruction is
  1.2492 +// queue for later updating the graph to restore the type correctness by
  1.2493 +// converting the operands that need to be truncated.
  1.2494 +//
  1.2495 +// We iterate backward because it is likely that a truncated operation truncates
  1.2496 +// some of its operands.
  1.2497 +bool
  1.2498 +RangeAnalysis::truncate()
  1.2499 +{
  1.2500 +    IonSpew(IonSpew_Range, "Do range-base truncation (backward loop)");
  1.2501 +
  1.2502 +    Vector<MInstruction *, 16, SystemAllocPolicy> worklist;
  1.2503 +    Vector<MBinaryBitwiseInstruction *, 16, SystemAllocPolicy> bitops;
  1.2504 +
  1.2505 +    for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) {
  1.2506 +        for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) {
  1.2507 +            if (iter->type() == MIRType_None)
  1.2508 +                continue;
  1.2509 +
  1.2510 +            // Remember all bitop instructions for folding after range analysis.
  1.2511 +            switch (iter->op()) {
  1.2512 +              case MDefinition::Op_BitAnd:
  1.2513 +              case MDefinition::Op_BitOr:
  1.2514 +              case MDefinition::Op_BitXor:
  1.2515 +              case MDefinition::Op_Lsh:
  1.2516 +              case MDefinition::Op_Rsh:
  1.2517 +              case MDefinition::Op_Ursh:
  1.2518 +                if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter)))
  1.2519 +                    return false;
  1.2520 +              default:;
  1.2521 +            }
  1.2522 +
  1.2523 +            if (!CanTruncate(*iter))
  1.2524 +                continue;
  1.2525 +
  1.2526 +            // Truncate this instruction if possible.
  1.2527 +            if (!iter->truncate())
  1.2528 +                continue;
  1.2529 +
  1.2530 +            // Delay updates of inputs/outputs to avoid creating node which
  1.2531 +            // would be removed by the truncation of the next operations.
  1.2532 +            iter->setInWorklist();
  1.2533 +            if (!worklist.append(*iter))
  1.2534 +                return false;
  1.2535 +        }
  1.2536 +    }
  1.2537 +
  1.2538 +    // Update inputs/outputs of truncated instructions.
  1.2539 +    IonSpew(IonSpew_Range, "Do graph type fixup (dequeue)");
  1.2540 +    while (!worklist.empty()) {
  1.2541 +        MInstruction *ins = worklist.popCopy();
  1.2542 +        ins->setNotInWorklist();
  1.2543 +        RemoveTruncatesOnOutput(ins);
  1.2544 +        AdjustTruncatedInputs(alloc(), ins);
  1.2545 +    }
  1.2546 +
  1.2547 +    // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
  1.2548 +    // input. This is done after range analysis rather than during GVN as the
  1.2549 +    // presence of the bitop can change which instructions are truncated.
  1.2550 +    for (size_t i = 0; i < bitops.length(); i++) {
  1.2551 +        MBinaryBitwiseInstruction *ins = bitops[i];
  1.2552 +        MDefinition *folded = ins->foldUnnecessaryBitop();
  1.2553 +        if (folded != ins)
  1.2554 +            ins->replaceAllUsesWith(folded);
  1.2555 +    }
  1.2556 +
  1.2557 +    return true;
  1.2558 +}
  1.2559 +
  1.2560 +///////////////////////////////////////////////////////////////////////////////
  1.2561 +// Collect Range information of operands
  1.2562 +///////////////////////////////////////////////////////////////////////////////
  1.2563 +
  1.2564 +void
  1.2565 +MInArray::collectRangeInfoPreTrunc()
  1.2566 +{
  1.2567 +    Range indexRange(index());
  1.2568 +    if (indexRange.isFiniteNonNegative())
  1.2569 +        needsNegativeIntCheck_ = false;
  1.2570 +}
  1.2571 +
  1.2572 +void
  1.2573 +MLoadElementHole::collectRangeInfoPreTrunc()
  1.2574 +{
  1.2575 +    Range indexRange(index());
  1.2576 +    if (indexRange.isFiniteNonNegative())
  1.2577 +        needsNegativeIntCheck_ = false;
  1.2578 +}
  1.2579 +
  1.2580 +void
  1.2581 +MDiv::collectRangeInfoPreTrunc()
  1.2582 +{
  1.2583 +    Range lhsRange(lhs());
  1.2584 +    if (lhsRange.isFiniteNonNegative())
  1.2585 +        canBeNegativeDividend_ = false;
  1.2586 +}
  1.2587 +
  1.2588 +void
  1.2589 +MMod::collectRangeInfoPreTrunc()
  1.2590 +{
  1.2591 +    Range lhsRange(lhs());
  1.2592 +    if (lhsRange.isFiniteNonNegative())
  1.2593 +        canBeNegativeDividend_ = false;
  1.2594 +}
  1.2595 +
  1.2596 +void
  1.2597 +MBoundsCheckLower::collectRangeInfoPreTrunc()
  1.2598 +{
  1.2599 +    Range indexRange(index());
  1.2600 +    if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_)
  1.2601 +        fallible_ = false;
  1.2602 +}
  1.2603 +
  1.2604 +void
  1.2605 +MCompare::collectRangeInfoPreTrunc()
  1.2606 +{
  1.2607 +    if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN())
  1.2608 +        operandsAreNeverNaN_ = true;
  1.2609 +}
  1.2610 +
  1.2611 +void
  1.2612 +MNot::collectRangeInfoPreTrunc()
  1.2613 +{
  1.2614 +    if (!Range(operand()).canBeNaN())
  1.2615 +        operandIsNeverNaN_ = true;
  1.2616 +}
  1.2617 +
  1.2618 +void
  1.2619 +MPowHalf::collectRangeInfoPreTrunc()
  1.2620 +{
  1.2621 +    Range inputRange(input());
  1.2622 +    if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound())
  1.2623 +        operandIsNeverNegativeInfinity_ = true;
  1.2624 +    if (!inputRange.canBeZero())
  1.2625 +        operandIsNeverNegativeZero_ = true;
  1.2626 +    if (!inputRange.canBeNaN())
  1.2627 +        operandIsNeverNaN_ = true;
  1.2628 +}
  1.2629 +
  1.2630 +void
  1.2631 +MUrsh::collectRangeInfoPreTrunc()
  1.2632 +{
  1.2633 +    Range lhsRange(lhs()), rhsRange(rhs());
  1.2634 +
  1.2635 +    // As in MUrsh::computeRange(), convert the inputs.
  1.2636 +    lhsRange.wrapAroundToInt32();
  1.2637 +    rhsRange.wrapAroundToShiftCount();
  1.2638 +
  1.2639 +    // If the most significant bit of our result is always going to be zero,
  1.2640 +    // we can optimize by disabling bailout checks for enforcing an int32 range.
  1.2641 +    if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1)
  1.2642 +        bailoutsDisabled_ = true;
  1.2643 +}
  1.2644 +
  1.2645 +bool
  1.2646 +RangeAnalysis::prepareForUCE(bool *shouldRemoveDeadCode)
  1.2647 +{
  1.2648 +    *shouldRemoveDeadCode = false;
  1.2649 +
  1.2650 +    for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
  1.2651 +        MBasicBlock *block = *iter;
  1.2652 +
  1.2653 +        if (!block->unreachable())
  1.2654 +            continue;
  1.2655 +
  1.2656 +        MControlInstruction *cond = block->getPredecessor(0)->lastIns();
  1.2657 +        if (!cond->isTest())
  1.2658 +            continue;
  1.2659 +
  1.2660 +        // Replace the condition of the test control instruction by a constant
  1.2661 +        // chosen based which of the successors has the unreachable flag which is
  1.2662 +        // added by MBeta::computeRange on its own block.
  1.2663 +        MTest *test = cond->toTest();
  1.2664 +        MConstant *constant = nullptr;
  1.2665 +        if (block == test->ifTrue()) {
  1.2666 +            constant = MConstant::New(alloc(), BooleanValue(false));
  1.2667 +        } else {
  1.2668 +            JS_ASSERT(block == test->ifFalse());
  1.2669 +            constant = MConstant::New(alloc(), BooleanValue(true));
  1.2670 +        }
  1.2671 +        test->block()->insertBefore(test, constant);
  1.2672 +        test->replaceOperand(0, constant);
  1.2673 +        IonSpew(IonSpew_Range, "Update condition of %d to reflect unreachable branches.",
  1.2674 +                test->id());
  1.2675 +
  1.2676 +        *shouldRemoveDeadCode = true;
  1.2677 +    }
  1.2678 +
  1.2679 +    return true;
  1.2680 +}

mercurial