michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/RangeAnalysis.h" michael@0: michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "jsanalyze.h" michael@0: michael@0: #include "jit/Ion.h" michael@0: #include "jit/IonAnalysis.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/MIR.h" michael@0: #include "jit/MIRGenerator.h" michael@0: #include "jit/MIRGraph.h" michael@0: #include "vm/NumericConversions.h" michael@0: michael@0: #include "jsopcodeinlines.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::Abs; michael@0: using mozilla::CountLeadingZeroes32; michael@0: using mozilla::NumberEqualsInt32; michael@0: using mozilla::ExponentComponent; michael@0: using mozilla::FloorLog2; michael@0: using mozilla::IsInfinite; michael@0: using mozilla::IsNaN; michael@0: using mozilla::IsNegative; michael@0: using mozilla::NegativeInfinity; michael@0: using mozilla::PositiveInfinity; michael@0: using mozilla::Swap; michael@0: using JS::GenericNaN; michael@0: michael@0: // This algorithm is based on the paper "Eliminating Range Checks Using michael@0: // Static Single Assignment Form" by Gough and Klaren. michael@0: // michael@0: // We associate a range object with each SSA name, and the ranges are consulted michael@0: // in order to determine whether overflow is possible for arithmetic michael@0: // computations. michael@0: // michael@0: // An important source of range information that requires care to take michael@0: // advantage of is conditional control flow. Consider the code below: michael@0: // michael@0: // if (x < 0) { michael@0: // y = x + 2000000000; michael@0: // } else { michael@0: // if (x < 1000000000) { michael@0: // y = x * 2; michael@0: // } else { michael@0: // y = x - 3000000000; michael@0: // } michael@0: // } michael@0: // michael@0: // The arithmetic operations in this code cannot overflow, but it is not michael@0: // sufficient to simply associate each name with a range, since the information michael@0: // differs between basic blocks. The traditional dataflow approach would be michael@0: // associate ranges with (name, basic block) pairs. This solution is not michael@0: // satisfying, since we lose the benefit of SSA form: in SSA form, each michael@0: // definition has a unique name, so there is no need to track information about michael@0: // the control flow of the program. michael@0: // michael@0: // The approach used here is to add a new form of pseudo operation called a michael@0: // beta node, which associates range information with a value. These beta michael@0: // instructions take one argument and additionally have an auxiliary constant michael@0: // range associated with them. Operationally, beta nodes are just copies, but michael@0: // the invariant expressed by beta node copies is that the output will fall michael@0: // inside the range given by the beta node. Gough and Klaeren refer to SSA michael@0: // extended with these beta nodes as XSA form. The following shows the example michael@0: // code transformed into XSA form: michael@0: // michael@0: // if (x < 0) { michael@0: // x1 = Beta(x, [INT_MIN, -1]); michael@0: // y1 = x1 + 2000000000; michael@0: // } else { michael@0: // x2 = Beta(x, [0, INT_MAX]); michael@0: // if (x2 < 1000000000) { michael@0: // x3 = Beta(x2, [INT_MIN, 999999999]); michael@0: // y2 = x3*2; michael@0: // } else { michael@0: // x4 = Beta(x2, [1000000000, INT_MAX]); michael@0: // y3 = x4 - 3000000000; michael@0: // } michael@0: // y4 = Phi(y2, y3); michael@0: // } michael@0: // y = Phi(y1, y4); michael@0: // michael@0: // We insert beta nodes for the purposes of range analysis (they might also be michael@0: // usefully used for other forms of bounds check elimination) and remove them michael@0: // after range analysis is performed. The remaining compiler phases do not ever michael@0: // encounter beta nodes. michael@0: michael@0: static bool michael@0: IsDominatedUse(MBasicBlock *block, MUse *use) michael@0: { michael@0: MNode *n = use->consumer(); michael@0: bool isPhi = n->isDefinition() && n->toDefinition()->isPhi(); michael@0: michael@0: if (isPhi) michael@0: return block->dominates(n->block()->getPredecessor(use->index())); michael@0: michael@0: return block->dominates(n->block()); michael@0: } michael@0: michael@0: static inline void michael@0: SpewRange(MDefinition *def) michael@0: { michael@0: #ifdef DEBUG michael@0: if (IonSpewEnabled(IonSpew_Range) && def->type() != MIRType_None && def->range()) { michael@0: IonSpewHeader(IonSpew_Range); michael@0: def->printName(IonSpewFile); michael@0: fprintf(IonSpewFile, " has range "); michael@0: def->range()->dump(IonSpewFile); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: TempAllocator & michael@0: RangeAnalysis::alloc() const michael@0: { michael@0: return graph_.alloc(); michael@0: } michael@0: michael@0: void michael@0: RangeAnalysis::replaceDominatedUsesWith(MDefinition *orig, MDefinition *dom, michael@0: MBasicBlock *block) michael@0: { michael@0: for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd(); ) { michael@0: if (i->consumer() != dom && IsDominatedUse(block, *i)) michael@0: i = i->consumer()->replaceOperand(i, dom); michael@0: else michael@0: i++; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::addBetaNodes() michael@0: { michael@0: IonSpew(IonSpew_Range, "Adding beta nodes"); michael@0: michael@0: for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { michael@0: MBasicBlock *block = *i; michael@0: IonSpew(IonSpew_Range, "Looking at block %d", block->id()); michael@0: michael@0: BranchDirection branch_dir; michael@0: MTest *test = block->immediateDominatorBranch(&branch_dir); michael@0: michael@0: if (!test || !test->getOperand(0)->isCompare()) michael@0: continue; michael@0: michael@0: MCompare *compare = test->getOperand(0)->toCompare(); michael@0: michael@0: // TODO: support unsigned comparisons michael@0: if (compare->compareType() == MCompare::Compare_UInt32) michael@0: continue; michael@0: michael@0: MDefinition *left = compare->getOperand(0); michael@0: MDefinition *right = compare->getOperand(1); michael@0: double bound; michael@0: double conservativeLower = NegativeInfinity(); michael@0: double conservativeUpper = PositiveInfinity(); michael@0: MDefinition *val = nullptr; michael@0: michael@0: JSOp jsop = compare->jsop(); michael@0: michael@0: if (branch_dir == FALSE_BRANCH) { michael@0: jsop = NegateCompareOp(jsop); michael@0: conservativeLower = GenericNaN(); michael@0: conservativeUpper = GenericNaN(); michael@0: } michael@0: michael@0: if (left->isConstant() && left->toConstant()->value().isNumber()) { michael@0: bound = left->toConstant()->value().toNumber(); michael@0: val = right; michael@0: jsop = ReverseCompareOp(jsop); michael@0: } else if (right->isConstant() && right->toConstant()->value().isNumber()) { michael@0: bound = right->toConstant()->value().toNumber(); michael@0: val = left; michael@0: } else if (left->type() == MIRType_Int32 && right->type() == MIRType_Int32) { michael@0: MDefinition *smaller = nullptr; michael@0: MDefinition *greater = nullptr; michael@0: if (jsop == JSOP_LT) { michael@0: smaller = left; michael@0: greater = right; michael@0: } else if (jsop == JSOP_GT) { michael@0: smaller = right; michael@0: greater = left; michael@0: } michael@0: if (smaller && greater) { michael@0: MBeta *beta; michael@0: beta = MBeta::New(alloc(), smaller, michael@0: Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1)); michael@0: block->insertBefore(*block->begin(), beta); michael@0: replaceDominatedUsesWith(smaller, beta, block); michael@0: IonSpew(IonSpew_Range, "Adding beta node for smaller %d", smaller->id()); michael@0: beta = MBeta::New(alloc(), greater, michael@0: Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX)); michael@0: block->insertBefore(*block->begin(), beta); michael@0: replaceDominatedUsesWith(greater, beta, block); michael@0: IonSpew(IonSpew_Range, "Adding beta node for greater %d", greater->id()); michael@0: } michael@0: continue; michael@0: } else { michael@0: continue; michael@0: } michael@0: michael@0: // At this point, one of the operands if the compare is a constant, and michael@0: // val is the other operand. michael@0: JS_ASSERT(val); michael@0: michael@0: Range comp; michael@0: switch (jsop) { michael@0: case JSOP_LE: michael@0: comp.setDouble(conservativeLower, bound); michael@0: break; michael@0: case JSOP_LT: michael@0: // For integers, if x < c, the upper bound of x is c-1. michael@0: if (val->type() == MIRType_Int32) { michael@0: int32_t intbound; michael@0: if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound)) michael@0: bound = intbound; michael@0: } michael@0: comp.setDouble(conservativeLower, bound); michael@0: break; michael@0: case JSOP_GE: michael@0: comp.setDouble(bound, conservativeUpper); michael@0: break; michael@0: case JSOP_GT: michael@0: // For integers, if x > c, the lower bound of x is c+1. michael@0: if (val->type() == MIRType_Int32) { michael@0: int32_t intbound; michael@0: if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound)) michael@0: bound = intbound; michael@0: } michael@0: comp.setDouble(bound, conservativeUpper); michael@0: break; michael@0: case JSOP_EQ: michael@0: comp.setDouble(bound, bound); michael@0: break; michael@0: default: michael@0: continue; // well, for neq we could have michael@0: // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges. michael@0: } michael@0: michael@0: if (IonSpewEnabled(IonSpew_Range)) { michael@0: IonSpewHeader(IonSpew_Range); michael@0: fprintf(IonSpewFile, "Adding beta node for %d with range ", val->id()); michael@0: comp.dump(IonSpewFile); michael@0: } michael@0: michael@0: MBeta *beta = MBeta::New(alloc(), val, new(alloc()) Range(comp)); michael@0: block->insertBefore(*block->begin(), beta); michael@0: replaceDominatedUsesWith(val, beta, block); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::removeBetaNodes() michael@0: { michael@0: IonSpew(IonSpew_Range, "Removing beta nodes"); michael@0: michael@0: for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { michael@0: MBasicBlock *block = *i; michael@0: for (MDefinitionIterator iter(*i); iter; ) { michael@0: MDefinition *def = *iter; michael@0: if (def->isBeta()) { michael@0: MDefinition *op = def->getOperand(0); michael@0: IonSpew(IonSpew_Range, "Removing beta node %d for %d", michael@0: def->id(), op->id()); michael@0: def->replaceAllUsesWith(op); michael@0: iter = block->discardDefAt(iter); michael@0: } else { michael@0: // We only place Beta nodes at the beginning of basic michael@0: // blocks, so if we see something else, we can move on michael@0: // to the next block. michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: SymbolicBound::print(Sprinter &sp) const michael@0: { michael@0: if (loop) michael@0: sp.printf("[loop] "); michael@0: sum.print(sp); michael@0: } michael@0: michael@0: void michael@0: SymbolicBound::dump() const michael@0: { michael@0: Sprinter sp(GetIonContext()->cx); michael@0: sp.init(); michael@0: print(sp); michael@0: fprintf(stderr, "%s\n", sp.string()); michael@0: } michael@0: michael@0: // Test whether the given range's exponent tells us anything that its lower michael@0: // and upper bound values don't. michael@0: static bool michael@0: IsExponentInteresting(const Range *r) michael@0: { michael@0: // If it lacks either a lower or upper bound, the exponent is interesting. michael@0: if (!r->hasInt32Bounds()) michael@0: return true; michael@0: michael@0: // Otherwise if there's no fractional part, the lower and upper bounds, michael@0: // which are integers, are perfectly precise. michael@0: if (!r->canHaveFractionalPart()) michael@0: return false; michael@0: michael@0: // Otherwise, if the bounds are conservatively rounded across a power-of-two michael@0: // boundary, the exponent may imply a tighter range. michael@0: return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent(); michael@0: } michael@0: michael@0: void michael@0: Range::print(Sprinter &sp) const michael@0: { michael@0: assertInvariants(); michael@0: michael@0: // Floating-point or Integer subset. michael@0: if (canHaveFractionalPart_) michael@0: sp.printf("F"); michael@0: else michael@0: sp.printf("I"); michael@0: michael@0: sp.printf("["); michael@0: michael@0: if (!hasInt32LowerBound_) michael@0: sp.printf("?"); michael@0: else michael@0: sp.printf("%d", lower_); michael@0: if (symbolicLower_) { michael@0: sp.printf(" {"); michael@0: symbolicLower_->print(sp); michael@0: sp.printf("}"); michael@0: } michael@0: michael@0: sp.printf(", "); michael@0: michael@0: if (!hasInt32UpperBound_) michael@0: sp.printf("?"); michael@0: else michael@0: sp.printf("%d", upper_); michael@0: if (symbolicUpper_) { michael@0: sp.printf(" {"); michael@0: symbolicUpper_->print(sp); michael@0: sp.printf("}"); michael@0: } michael@0: michael@0: sp.printf("]"); michael@0: if (IsExponentInteresting(this)) { michael@0: if (max_exponent_ == IncludesInfinityAndNaN) michael@0: sp.printf(" (U inf U NaN)", max_exponent_); michael@0: else if (max_exponent_ == IncludesInfinity) michael@0: sp.printf(" (U inf)"); michael@0: else michael@0: sp.printf(" (< pow(2, %d+1))", max_exponent_); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Range::dump(FILE *fp) const michael@0: { michael@0: Sprinter sp(GetIonContext()->cx); michael@0: sp.init(); michael@0: print(sp); michael@0: fprintf(fp, "%s\n", sp.string()); michael@0: } michael@0: michael@0: void michael@0: Range::dump() const michael@0: { michael@0: dump(stderr); michael@0: } michael@0: michael@0: Range * michael@0: Range::intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs, bool *emptyRange) michael@0: { michael@0: *emptyRange = false; michael@0: michael@0: if (!lhs && !rhs) michael@0: return nullptr; michael@0: michael@0: if (!lhs) michael@0: return new(alloc) Range(*rhs); michael@0: if (!rhs) michael@0: return new(alloc) Range(*lhs); michael@0: michael@0: int32_t newLower = Max(lhs->lower_, rhs->lower_); michael@0: int32_t newUpper = Min(lhs->upper_, rhs->upper_); michael@0: michael@0: // :TODO: This information could be used better. If upper < lower, then we michael@0: // have conflicting constraints. Consider: michael@0: // michael@0: // if (x < 0) { michael@0: // if (x > 0) { michael@0: // [Some code.] michael@0: // } michael@0: // } michael@0: // michael@0: // In this case, the block is dead. Right now, we just disregard this fact michael@0: // and make the range unbounded, rather than empty. michael@0: // michael@0: // Instead, we should use it to eliminate the dead block. michael@0: // (Bug 765127) michael@0: if (newUpper < newLower) { michael@0: // If both ranges can be NaN, the result can still be NaN. michael@0: if (!lhs->canBeNaN() || !rhs->canBeNaN()) michael@0: *emptyRange = true; michael@0: return nullptr; michael@0: } michael@0: michael@0: bool newHasInt32LowerBound = lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_; michael@0: bool newHasInt32UpperBound = lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_; michael@0: bool newFractional = lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_; michael@0: uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_); michael@0: michael@0: // NaN is a special value which is neither greater than infinity or less than michael@0: // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we michael@0: // can end up thinking we have both a lower and upper bound, even though NaN michael@0: // is still possible. In this case, just be conservative, since any case where michael@0: // we can have NaN is not especially interesting. michael@0: if (newHasInt32LowerBound && newHasInt32UpperBound && newExponent == IncludesInfinityAndNaN) michael@0: return nullptr; michael@0: michael@0: // If one of the ranges has a fractional part and the other doesn't, it's michael@0: // possible that we will have computed a newExponent that's more precise michael@0: // than our newLower and newUpper. This is unusual, so we handle it here michael@0: // instead of in optimize(). michael@0: // michael@0: // For example, consider the range F[0,1.5]. Range analysis represents the michael@0: // lower and upper bound as integers, so we'd actually have michael@0: // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly michael@0: // more precise upper bound than the integer upper bound. michael@0: // michael@0: // When intersecting such a range with an integer range, the fractional part michael@0: // of the range is dropped. The max exponent of 0 remains valid, so the michael@0: // upper bound needs to be adjusted to 1. michael@0: // michael@0: // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4], michael@0: // the naive intersection is I[2,2], but since the max exponent tells us michael@0: // that the value is always less than 2, the intersection is actually empty. michael@0: if (lhs->canHaveFractionalPart_ != rhs->canHaveFractionalPart_ || michael@0: (lhs->canHaveFractionalPart_ && michael@0: newHasInt32LowerBound && newHasInt32UpperBound && michael@0: newLower == newUpper)) michael@0: { michael@0: refineInt32BoundsByExponent(newExponent, &newLower, &newUpper); michael@0: michael@0: // If we're intersecting two ranges that don't overlap, this could also michael@0: // push the bounds past each other, since the actual intersection is michael@0: // the empty set. michael@0: if (newLower > newUpper) { michael@0: *emptyRange = true; michael@0: return nullptr; michael@0: } michael@0: } michael@0: michael@0: return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound, michael@0: newFractional, newExponent); michael@0: } michael@0: michael@0: void michael@0: Range::unionWith(const Range *other) michael@0: { michael@0: int32_t newLower = Min(lower_, other->lower_); michael@0: int32_t newUpper = Max(upper_, other->upper_); michael@0: michael@0: bool newHasInt32LowerBound = hasInt32LowerBound_ && other->hasInt32LowerBound_; michael@0: bool newHasInt32UpperBound = hasInt32UpperBound_ && other->hasInt32UpperBound_; michael@0: bool newFractional = canHaveFractionalPart_ || other->canHaveFractionalPart_; michael@0: uint16_t newExponent = Max(max_exponent_, other->max_exponent_); michael@0: michael@0: rawInitialize(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound, michael@0: newFractional, newExponent); michael@0: } michael@0: michael@0: Range::Range(const MDefinition *def) michael@0: : symbolicLower_(nullptr), michael@0: symbolicUpper_(nullptr) michael@0: { michael@0: if (const Range *other = def->range()) { michael@0: // The instruction has range information; use it. michael@0: *this = *other; michael@0: michael@0: // Simulate the effect of converting the value to its type. michael@0: switch (def->type()) { michael@0: case MIRType_Int32: michael@0: wrapAroundToInt32(); michael@0: break; michael@0: case MIRType_Boolean: michael@0: wrapAroundToBoolean(); michael@0: break; michael@0: case MIRType_None: michael@0: MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value"); michael@0: default: michael@0: break; michael@0: } michael@0: } else { michael@0: // Otherwise just use type information. We can trust the type here michael@0: // because we don't care what value the instruction actually produces, michael@0: // but what value we might get after we get past the bailouts. michael@0: switch (def->type()) { michael@0: case MIRType_Int32: michael@0: setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX); michael@0: break; michael@0: case MIRType_Boolean: michael@0: setInt32(0, 1); michael@0: break; michael@0: case MIRType_None: michael@0: MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value"); michael@0: default: michael@0: setUnknown(); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // As a special case, MUrsh is permitted to claim a result type of michael@0: // MIRType_Int32 while actually returning values in [0,UINT32_MAX] without michael@0: // bailouts. If range analysis hasn't ruled out values in michael@0: // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for michael@0: // use as either a uint32 or an int32. michael@0: if (!hasInt32UpperBound() && def->isUrsh() && def->toUrsh()->bailoutsDisabled()) michael@0: lower_ = INT32_MIN; michael@0: michael@0: assertInvariants(); michael@0: } michael@0: michael@0: static uint16_t michael@0: ExponentImpliedByDouble(double d) michael@0: { michael@0: // Handle the special values. michael@0: if (IsNaN(d)) michael@0: return Range::IncludesInfinityAndNaN; michael@0: if (IsInfinite(d)) michael@0: return Range::IncludesInfinity; michael@0: michael@0: // Otherwise take the exponent part and clamp it at zero, since the Range michael@0: // class doesn't track fractional ranges. michael@0: return uint16_t(Max(int_fast16_t(0), ExponentComponent(d))); michael@0: } michael@0: michael@0: void michael@0: Range::setDouble(double l, double h) michael@0: { michael@0: // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_. michael@0: if (l >= INT32_MIN && l <= INT32_MAX) { michael@0: lower_ = int32_t(floor(l)); michael@0: hasInt32LowerBound_ = true; michael@0: } else { michael@0: lower_ = INT32_MIN; michael@0: hasInt32LowerBound_ = false; michael@0: } michael@0: if (h >= INT32_MIN && h <= INT32_MAX) { michael@0: upper_ = int32_t(ceil(h)); michael@0: hasInt32UpperBound_ = true; michael@0: } else { michael@0: upper_ = INT32_MAX; michael@0: hasInt32UpperBound_ = false; michael@0: } michael@0: michael@0: // Infer max_exponent_. michael@0: uint16_t lExp = ExponentImpliedByDouble(l); michael@0: uint16_t hExp = ExponentImpliedByDouble(h); michael@0: max_exponent_ = Max(lExp, hExp); michael@0: michael@0: // Infer the canHaveFractionalPart_ field. We can have a fractional part michael@0: // if the range crosses through the neighborhood of zero. We won't have a michael@0: // fractional value if the value is always beyond the point at which michael@0: // double precision can't represent fractional values. michael@0: uint16_t minExp = Min(lExp, hExp); michael@0: bool includesNegative = IsNaN(l) || l < 0; michael@0: bool includesPositive = IsNaN(h) || h > 0; michael@0: bool crossesZero = includesNegative && includesPositive; michael@0: canHaveFractionalPart_ = crossesZero || minExp < MaxTruncatableExponent; michael@0: michael@0: optimize(); michael@0: } michael@0: michael@0: static inline bool michael@0: MissingAnyInt32Bounds(const Range *lhs, const Range *rhs) michael@0: { michael@0: return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds(); michael@0: } michael@0: michael@0: Range * michael@0: Range::add(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_; michael@0: if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) michael@0: l = NoInt32LowerBound; michael@0: michael@0: int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_; michael@0: if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) michael@0: h = NoInt32UpperBound; michael@0: michael@0: // The exponent is at most one greater than the greater of the operands' michael@0: // exponents, except for NaN and infinity cases. michael@0: uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_); michael@0: if (e <= Range::MaxFiniteExponent) michael@0: ++e; michael@0: michael@0: // Infinity + -Infinity is NaN. michael@0: if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) michael@0: e = Range::IncludesInfinityAndNaN; michael@0: michael@0: return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e); michael@0: } michael@0: michael@0: Range * michael@0: Range::sub(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_; michael@0: if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) michael@0: l = NoInt32LowerBound; michael@0: michael@0: int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_; michael@0: if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) michael@0: h = NoInt32UpperBound; michael@0: michael@0: // The exponent is at most one greater than the greater of the operands' michael@0: // exponents, except for NaN and infinity cases. michael@0: uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_); michael@0: if (e <= Range::MaxFiniteExponent) michael@0: ++e; michael@0: michael@0: // Infinity - Infinity is NaN. michael@0: if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) michael@0: e = Range::IncludesInfinityAndNaN; michael@0: michael@0: return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e); michael@0: } michael@0: michael@0: Range * michael@0: Range::and_(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: JS_ASSERT(rhs->isInt32()); michael@0: michael@0: // If both numbers can be negative, result can be negative in the whole range michael@0: if (lhs->lower() < 0 && rhs->lower() < 0) michael@0: return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper())); michael@0: michael@0: // Only one of both numbers can be negative. michael@0: // - result can't be negative michael@0: // - Upper bound is minimum of both upper range, michael@0: int32_t lower = 0; michael@0: int32_t upper = Min(lhs->upper(), rhs->upper()); michael@0: michael@0: // EXCEPT when upper bound of non negative number is max value, michael@0: // because negative value can return the whole max value. michael@0: // -1 & 5 = 5 michael@0: if (lhs->lower() < 0) michael@0: upper = rhs->upper(); michael@0: if (rhs->lower() < 0) michael@0: upper = lhs->upper(); michael@0: michael@0: return Range::NewInt32Range(alloc, lower, upper); michael@0: } michael@0: michael@0: Range * michael@0: Range::or_(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: JS_ASSERT(rhs->isInt32()); michael@0: // When one operand is always 0 or always -1, it's a special case where we michael@0: // can compute a fully precise result. Handling these up front also michael@0: // protects the code below from calling CountLeadingZeroes32 with a zero michael@0: // operand or from shifting an int32_t by 32. michael@0: if (lhs->lower() == lhs->upper()) { michael@0: if (lhs->lower() == 0) michael@0: return new(alloc) Range(*rhs); michael@0: if (lhs->lower() == -1) michael@0: return new(alloc) Range(*lhs);; michael@0: } michael@0: if (rhs->lower() == rhs->upper()) { michael@0: if (rhs->lower() == 0) michael@0: return new(alloc) Range(*lhs); michael@0: if (rhs->lower() == -1) michael@0: return new(alloc) Range(*rhs);; michael@0: } michael@0: michael@0: // The code below uses CountLeadingZeroes32, which has undefined behavior michael@0: // if its operand is 0. We rely on the code above to protect it. michael@0: JS_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0); michael@0: JS_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0); michael@0: JS_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1); michael@0: JS_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1); michael@0: michael@0: int32_t lower = INT32_MIN; michael@0: int32_t upper = INT32_MAX; michael@0: michael@0: if (lhs->lower() >= 0 && rhs->lower() >= 0) { michael@0: // Both operands are non-negative, so the result won't be less than either. michael@0: lower = Max(lhs->lower(), rhs->lower()); michael@0: // The result will have leading zeros where both operands have leading zeros. michael@0: // CountLeadingZeroes32 of a non-negative int32 will at least be 1 to account michael@0: // for the bit of sign. michael@0: upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()), michael@0: CountLeadingZeroes32(rhs->upper()))); michael@0: } else { michael@0: // The result will have leading ones where either operand has leading ones. michael@0: if (lhs->upper() < 0) { michael@0: unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower()); michael@0: lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes)); michael@0: upper = -1; michael@0: } michael@0: if (rhs->upper() < 0) { michael@0: unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower()); michael@0: lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes)); michael@0: upper = -1; michael@0: } michael@0: } michael@0: michael@0: return Range::NewInt32Range(alloc, lower, upper); michael@0: } michael@0: michael@0: Range * michael@0: Range::xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: JS_ASSERT(rhs->isInt32()); michael@0: int32_t lhsLower = lhs->lower(); michael@0: int32_t lhsUpper = lhs->upper(); michael@0: int32_t rhsLower = rhs->lower(); michael@0: int32_t rhsUpper = rhs->upper(); michael@0: bool invertAfter = false; michael@0: michael@0: // If either operand is negative, bitwise-negate it, and arrange to negate michael@0: // the result; ~((~x)^y) == x^y. If both are negative the negations on the michael@0: // result cancel each other out; effectively this is (~x)^(~y) == x^y. michael@0: // These transformations reduce the number of cases we have to handle below. michael@0: if (lhsUpper < 0) { michael@0: lhsLower = ~lhsLower; michael@0: lhsUpper = ~lhsUpper; michael@0: Swap(lhsLower, lhsUpper); michael@0: invertAfter = !invertAfter; michael@0: } michael@0: if (rhsUpper < 0) { michael@0: rhsLower = ~rhsLower; michael@0: rhsUpper = ~rhsUpper; michael@0: Swap(rhsLower, rhsUpper); michael@0: invertAfter = !invertAfter; michael@0: } michael@0: michael@0: // Handle cases where lhs or rhs is always zero specially, because they're michael@0: // easy cases where we can be perfectly precise, and because it protects the michael@0: // CountLeadingZeroes32 calls below from seeing 0 operands, which would be michael@0: // undefined behavior. michael@0: int32_t lower = INT32_MIN; michael@0: int32_t upper = INT32_MAX; michael@0: if (lhsLower == 0 && lhsUpper == 0) { michael@0: upper = rhsUpper; michael@0: lower = rhsLower; michael@0: } else if (rhsLower == 0 && rhsUpper == 0) { michael@0: upper = lhsUpper; michael@0: lower = lhsLower; michael@0: } else if (lhsLower >= 0 && rhsLower >= 0) { michael@0: // Both operands are non-negative. The result will be non-negative. michael@0: lower = 0; michael@0: // To compute the upper value, take each operand's upper value and michael@0: // set all bits that don't correspond to leading zero bits in the michael@0: // other to one. For each one, this gives an upper bound for the michael@0: // result, so we can take the minimum between the two. michael@0: unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper); michael@0: unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper); michael@0: upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros), michael@0: lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros)); michael@0: } michael@0: michael@0: // If we bitwise-negated one (but not both) of the operands above, apply the michael@0: // bitwise-negate to the result, completing ~((~x)^y) == x^y. michael@0: if (invertAfter) { michael@0: lower = ~lower; michael@0: upper = ~upper; michael@0: Swap(lower, upper); michael@0: } michael@0: michael@0: return Range::NewInt32Range(alloc, lower, upper); michael@0: } michael@0: michael@0: Range * michael@0: Range::not_(TempAllocator &alloc, const Range *op) michael@0: { michael@0: JS_ASSERT(op->isInt32()); michael@0: return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower()); michael@0: } michael@0: michael@0: Range * michael@0: Range::mul(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: bool fractional = lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(); michael@0: michael@0: uint16_t exponent; michael@0: if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) { michael@0: // Two finite values. michael@0: exponent = lhs->numBits() + rhs->numBits() - 1; michael@0: if (exponent > Range::MaxFiniteExponent) michael@0: exponent = Range::IncludesInfinity; michael@0: } else if (!lhs->canBeNaN() && michael@0: !rhs->canBeNaN() && michael@0: !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) && michael@0: !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) michael@0: { michael@0: // Two values that multiplied together won't produce a NaN. michael@0: exponent = Range::IncludesInfinity; michael@0: } else { michael@0: // Could be anything. michael@0: exponent = Range::IncludesInfinityAndNaN; michael@0: } michael@0: michael@0: if (MissingAnyInt32Bounds(lhs, rhs)) michael@0: return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent); michael@0: int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower(); michael@0: int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper(); michael@0: int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower(); michael@0: int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper(); michael@0: return new(alloc) Range( michael@0: Min( Min(a, b), Min(c, d) ), michael@0: Max( Max(a, b), Max(c, d) ), michael@0: fractional, exponent); michael@0: } michael@0: michael@0: Range * michael@0: Range::lsh(TempAllocator &alloc, const Range *lhs, int32_t c) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: int32_t shift = c & 0x1f; michael@0: michael@0: // If the shift doesn't loose bits or shift bits into the sign bit, we michael@0: // can simply compute the correct range by shifting. michael@0: if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() && michael@0: (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper()) michael@0: { michael@0: return Range::NewInt32Range(alloc, michael@0: uint32_t(lhs->lower()) << shift, michael@0: uint32_t(lhs->upper()) << shift); michael@0: } michael@0: michael@0: return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX); michael@0: } michael@0: michael@0: Range * michael@0: Range::rsh(TempAllocator &alloc, const Range *lhs, int32_t c) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: int32_t shift = c & 0x1f; michael@0: return Range::NewInt32Range(alloc, michael@0: lhs->lower() >> shift, michael@0: lhs->upper() >> shift); michael@0: } michael@0: michael@0: Range * michael@0: Range::ursh(TempAllocator &alloc, const Range *lhs, int32_t c) michael@0: { michael@0: // ursh's left operand is uint32, not int32, but for range analysis we michael@0: // currently approximate it as int32. We assume here that the range has michael@0: // already been adjusted accordingly by our callers. michael@0: JS_ASSERT(lhs->isInt32()); michael@0: michael@0: int32_t shift = c & 0x1f; michael@0: michael@0: // If the value is always non-negative or always negative, we can simply michael@0: // compute the correct range by shifting. michael@0: if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) { michael@0: return Range::NewUInt32Range(alloc, michael@0: uint32_t(lhs->lower()) >> shift, michael@0: uint32_t(lhs->upper()) >> shift); michael@0: } michael@0: michael@0: // Otherwise return the most general range after the shift. michael@0: return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift); michael@0: } michael@0: michael@0: Range * michael@0: Range::lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: JS_ASSERT(rhs->isInt32()); michael@0: return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX); michael@0: } michael@0: michael@0: Range * michael@0: Range::rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: JS_ASSERT(lhs->isInt32()); michael@0: JS_ASSERT(rhs->isInt32()); michael@0: return Range::NewInt32Range(alloc, Min(lhs->lower(), 0), Max(lhs->upper(), 0)); michael@0: } michael@0: michael@0: Range * michael@0: Range::ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: // ursh's left operand is uint32, not int32, but for range analysis we michael@0: // currently approximate it as int32. We assume here that the range has michael@0: // already been adjusted accordingly by our callers. michael@0: JS_ASSERT(lhs->isInt32()); michael@0: JS_ASSERT(rhs->isInt32()); michael@0: return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX); michael@0: } michael@0: michael@0: Range * michael@0: Range::abs(TempAllocator &alloc, const Range *op) michael@0: { michael@0: int32_t l = op->lower_; michael@0: int32_t u = op->upper_; michael@0: michael@0: return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), michael@0: true, michael@0: Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l), michael@0: op->hasInt32Bounds() && l != INT32_MIN, michael@0: op->canHaveFractionalPart_, michael@0: op->max_exponent_); michael@0: } michael@0: michael@0: Range * michael@0: Range::min(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: // If either operand is NaN, the result is NaN. michael@0: if (lhs->canBeNaN() || rhs->canBeNaN()) michael@0: return nullptr; michael@0: michael@0: return new(alloc) Range(Min(lhs->lower_, rhs->lower_), michael@0: lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_, michael@0: Min(lhs->upper_, rhs->upper_), michael@0: lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_, michael@0: lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_, michael@0: Max(lhs->max_exponent_, rhs->max_exponent_)); michael@0: } michael@0: michael@0: Range * michael@0: Range::max(TempAllocator &alloc, const Range *lhs, const Range *rhs) michael@0: { michael@0: // If either operand is NaN, the result is NaN. michael@0: if (lhs->canBeNaN() || rhs->canBeNaN()) michael@0: return nullptr; michael@0: michael@0: return new(alloc) Range(Max(lhs->lower_, rhs->lower_), michael@0: lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_, michael@0: Max(lhs->upper_, rhs->upper_), michael@0: lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_, michael@0: lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_, michael@0: Max(lhs->max_exponent_, rhs->max_exponent_)); michael@0: } michael@0: michael@0: bool michael@0: Range::negativeZeroMul(const Range *lhs, const Range *rhs) michael@0: { michael@0: // The result can only be negative zero if both sides are finite and they michael@0: // have differing signs. michael@0: return (lhs->canBeFiniteNegative() && rhs->canBeFiniteNonNegative()) || michael@0: (rhs->canBeFiniteNegative() && lhs->canBeFiniteNonNegative()); michael@0: } michael@0: michael@0: bool michael@0: Range::update(const Range *other) michael@0: { michael@0: bool changed = michael@0: lower_ != other->lower_ || michael@0: hasInt32LowerBound_ != other->hasInt32LowerBound_ || michael@0: upper_ != other->upper_ || michael@0: hasInt32UpperBound_ != other->hasInt32UpperBound_ || michael@0: canHaveFractionalPart_ != other->canHaveFractionalPart_ || michael@0: max_exponent_ != other->max_exponent_; michael@0: if (changed) { michael@0: lower_ = other->lower_; michael@0: hasInt32LowerBound_ = other->hasInt32LowerBound_; michael@0: upper_ = other->upper_; michael@0: hasInt32UpperBound_ = other->hasInt32UpperBound_; michael@0: canHaveFractionalPart_ = other->canHaveFractionalPart_; michael@0: max_exponent_ = other->max_exponent_; michael@0: assertInvariants(); michael@0: } michael@0: michael@0: return changed; michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: // Range Computation for MIR Nodes michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void michael@0: MPhi::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (type() != MIRType_Int32 && type() != MIRType_Double) michael@0: return; michael@0: michael@0: Range *range = nullptr; michael@0: for (size_t i = 0, e = numOperands(); i < e; i++) { michael@0: if (getOperand(i)->block()->unreachable()) { michael@0: IonSpew(IonSpew_Range, "Ignoring unreachable input %d", getOperand(i)->id()); michael@0: continue; michael@0: } michael@0: michael@0: // Peek at the pre-bailout range so we can take a short-cut; if any of michael@0: // the operands has an unknown range, this phi has an unknown range. michael@0: if (!getOperand(i)->range()) michael@0: return; michael@0: michael@0: Range input(getOperand(i)); michael@0: michael@0: if (range) michael@0: range->unionWith(&input); michael@0: else michael@0: range = new(alloc) Range(input); michael@0: } michael@0: michael@0: setRange(range); michael@0: } michael@0: michael@0: void michael@0: MBeta::computeRange(TempAllocator &alloc) michael@0: { michael@0: bool emptyRange = false; michael@0: michael@0: Range opRange(getOperand(0)); michael@0: Range *range = Range::intersect(alloc, &opRange, comparison_, &emptyRange); michael@0: if (emptyRange) { michael@0: IonSpew(IonSpew_Range, "Marking block for inst %d unreachable", id()); michael@0: block()->setUnreachable(); michael@0: } else { michael@0: setRange(range); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MConstant::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (value().isNumber()) { michael@0: double d = value().toNumber(); michael@0: setRange(Range::NewDoubleRange(alloc, d, d)); michael@0: } else if (value().isBoolean()) { michael@0: bool b = value().toBoolean(); michael@0: setRange(Range::NewInt32Range(alloc, b, b)); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MCharCodeAt::computeRange(TempAllocator &alloc) michael@0: { michael@0: // ECMA 262 says that the integer will be non-negative and at most 65535. michael@0: setRange(Range::NewInt32Range(alloc, 0, 65535)); michael@0: } michael@0: michael@0: void michael@0: MClampToUint8::computeRange(TempAllocator &alloc) michael@0: { michael@0: setRange(Range::NewUInt32Range(alloc, 0, 255)); michael@0: } michael@0: michael@0: void michael@0: MBitAnd::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: left.wrapAroundToInt32(); michael@0: right.wrapAroundToInt32(); michael@0: michael@0: setRange(Range::and_(alloc, &left, &right)); michael@0: } michael@0: michael@0: void michael@0: MBitOr::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: left.wrapAroundToInt32(); michael@0: right.wrapAroundToInt32(); michael@0: michael@0: setRange(Range::or_(alloc, &left, &right)); michael@0: } michael@0: michael@0: void michael@0: MBitXor::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: left.wrapAroundToInt32(); michael@0: right.wrapAroundToInt32(); michael@0: michael@0: setRange(Range::xor_(alloc, &left, &right)); michael@0: } michael@0: michael@0: void michael@0: MBitNot::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range op(getOperand(0)); michael@0: op.wrapAroundToInt32(); michael@0: michael@0: setRange(Range::not_(alloc, &op)); michael@0: } michael@0: michael@0: void michael@0: MLsh::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: left.wrapAroundToInt32(); michael@0: michael@0: MDefinition *rhs = getOperand(1); michael@0: if (!rhs->isConstant()) { michael@0: right.wrapAroundToShiftCount(); michael@0: setRange(Range::lsh(alloc, &left, &right)); michael@0: return; michael@0: } michael@0: michael@0: int32_t c = rhs->toConstant()->value().toInt32(); michael@0: setRange(Range::lsh(alloc, &left, c)); michael@0: } michael@0: michael@0: void michael@0: MRsh::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: left.wrapAroundToInt32(); michael@0: michael@0: MDefinition *rhs = getOperand(1); michael@0: if (!rhs->isConstant()) { michael@0: right.wrapAroundToShiftCount(); michael@0: setRange(Range::rsh(alloc, &left, &right)); michael@0: return; michael@0: } michael@0: michael@0: int32_t c = rhs->toConstant()->value().toInt32(); michael@0: setRange(Range::rsh(alloc, &left, c)); michael@0: } michael@0: michael@0: void michael@0: MUrsh::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: michael@0: // ursh can be thought of as converting its left operand to uint32, or it michael@0: // can be thought of as converting its left operand to int32, and then michael@0: // reinterpreting the int32 bits as a uint32 value. Both approaches yield michael@0: // the same result. Since we lack support for full uint32 ranges, we use michael@0: // the second interpretation, though it does cause us to be conservative. michael@0: left.wrapAroundToInt32(); michael@0: right.wrapAroundToShiftCount(); michael@0: michael@0: MDefinition *rhs = getOperand(1); michael@0: if (!rhs->isConstant()) { michael@0: setRange(Range::ursh(alloc, &left, &right)); michael@0: } else { michael@0: int32_t c = rhs->toConstant()->value().toInt32(); michael@0: setRange(Range::ursh(alloc, &left, c)); michael@0: } michael@0: michael@0: JS_ASSERT(range()->lower() >= 0); michael@0: } michael@0: michael@0: void michael@0: MAbs::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double) michael@0: return; michael@0: michael@0: Range other(getOperand(0)); michael@0: Range *next = Range::abs(alloc, &other); michael@0: if (implicitTruncate_) michael@0: next->wrapAroundToInt32(); michael@0: setRange(next); michael@0: } michael@0: michael@0: void michael@0: MMinMax::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double) michael@0: return; michael@0: michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right)); michael@0: } michael@0: michael@0: void michael@0: MAdd::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) michael@0: return; michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: Range *next = Range::add(alloc, &left, &right); michael@0: if (isTruncated()) michael@0: next->wrapAroundToInt32(); michael@0: setRange(next); michael@0: } michael@0: michael@0: void michael@0: MSub::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) michael@0: return; michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: Range *next = Range::sub(alloc, &left, &right); michael@0: if (isTruncated()) michael@0: next->wrapAroundToInt32(); michael@0: setRange(next); michael@0: } michael@0: michael@0: void michael@0: MMul::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) michael@0: return; michael@0: Range left(getOperand(0)); michael@0: Range right(getOperand(1)); michael@0: if (canBeNegativeZero()) michael@0: canBeNegativeZero_ = Range::negativeZeroMul(&left, &right); michael@0: Range *next = Range::mul(alloc, &left, &right); michael@0: // Truncated multiplications could overflow in both directions michael@0: if (isTruncated()) michael@0: next->wrapAroundToInt32(); michael@0: setRange(next); michael@0: } michael@0: michael@0: void michael@0: MMod::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) michael@0: return; michael@0: Range lhs(getOperand(0)); michael@0: Range rhs(getOperand(1)); michael@0: michael@0: // If either operand is a NaN, the result is NaN. This also conservatively michael@0: // handles Infinity cases. michael@0: if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) michael@0: return; michael@0: michael@0: // If RHS can be zero, the result can be NaN. michael@0: if (rhs.lower() <= 0 && rhs.upper() >= 0) michael@0: return; michael@0: michael@0: // If both operands are non-negative integers, we can optimize this to an michael@0: // unsigned mod. michael@0: if (specialization() == MIRType_Int32 && lhs.lower() >= 0 && rhs.lower() > 0 && michael@0: !lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) michael@0: { michael@0: unsigned_ = true; michael@0: } michael@0: michael@0: // For unsigned mod, we have to convert both operands to unsigned. michael@0: // Note that we handled the case of a zero rhs above. michael@0: if (unsigned_) { michael@0: // The result of an unsigned mod will never be unsigned-greater than michael@0: // either operand. michael@0: uint32_t lhsBound = Max(lhs.lower(), lhs.upper()); michael@0: uint32_t rhsBound = Max(rhs.lower(), rhs.upper()); michael@0: michael@0: // If either range crosses through -1 as a signed value, it could be michael@0: // the maximum unsigned value when interpreted as unsigned. If the range michael@0: // doesn't include -1, then the simple max value we computed above is michael@0: // correct. michael@0: if (lhs.lower() <= -1 && lhs.upper() >= -1) michael@0: lhsBound = UINT32_MAX; michael@0: if (rhs.lower() <= -1 && rhs.upper() >= -1) michael@0: rhsBound = UINT32_MAX; michael@0: michael@0: // The result will never be equal to the rhs, and we shouldn't have michael@0: // any rounding to worry about. michael@0: JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()); michael@0: --rhsBound; michael@0: michael@0: // This gives us two upper bounds, so we can take the best one. michael@0: setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound))); michael@0: return; michael@0: } michael@0: michael@0: // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs). michael@0: // First, the absolute value of the result will always be less than the michael@0: // absolute value of rhs. (And if rhs is zero, the result is NaN). michael@0: int64_t a = Abs(rhs.lower()); michael@0: int64_t b = Abs(rhs.upper()); michael@0: if (a == 0 && b == 0) michael@0: return; michael@0: int64_t rhsAbsBound = Max(a, b); michael@0: michael@0: // If the value is known to be integer, less-than abs(rhs) is equivalent michael@0: // to less-than-or-equal abs(rhs)-1. This is important for being able to michael@0: // say that the result of x%256 is an 8-bit unsigned number. michael@0: if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) michael@0: --rhsAbsBound; michael@0: michael@0: // Next, the absolute value of the result will never be greater than the michael@0: // absolute value of lhs. michael@0: int64_t lhsAbsBound = Max(Abs(lhs.lower()), Abs(lhs.upper())); michael@0: michael@0: // This gives us two upper bounds, so we can take the best one. michael@0: int64_t absBound = Min(lhsAbsBound, rhsAbsBound); michael@0: michael@0: // Now consider the sign of the result. michael@0: // If lhs is non-negative, the result will be non-negative. michael@0: // If lhs is non-positive, the result will be non-positive. michael@0: int64_t lower = lhs.lower() >= 0 ? 0 : -absBound; michael@0: int64_t upper = lhs.upper() <= 0 ? 0 : absBound; michael@0: michael@0: setRange(new(alloc) Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(), michael@0: Min(lhs.exponent(), rhs.exponent()))); michael@0: } michael@0: michael@0: void michael@0: MDiv::computeRange(TempAllocator &alloc) michael@0: { michael@0: if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) michael@0: return; michael@0: Range lhs(getOperand(0)); michael@0: Range rhs(getOperand(1)); michael@0: michael@0: // If either operand is a NaN, the result is NaN. This also conservatively michael@0: // handles Infinity cases. michael@0: if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) michael@0: return; michael@0: michael@0: // Something simple for now: When dividing by a positive rhs, the result michael@0: // won't be further from zero than lhs. michael@0: if (lhs.lower() >= 0 && rhs.lower() >= 1) { michael@0: setRange(new(alloc) Range(0, lhs.upper(), true, lhs.exponent())); michael@0: } else if (unsigned_ && rhs.lower() >= 1) { michael@0: // We shouldn't set the unsigned flag if the inputs can have michael@0: // fractional parts. michael@0: JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()); michael@0: // Unsigned division by a non-zero rhs will return a uint32 value. michael@0: setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX)); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MSqrt::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range input(getOperand(0)); michael@0: michael@0: // If either operand is a NaN, the result is NaN. This also conservatively michael@0: // handles Infinity cases. michael@0: if (!input.hasInt32Bounds()) michael@0: return; michael@0: michael@0: // Sqrt of a negative non-zero value is NaN. michael@0: if (input.lower() < 0) michael@0: return; michael@0: michael@0: // Something simple for now: When taking the sqrt of a positive value, the michael@0: // result won't be further from zero than the input. michael@0: setRange(new(alloc) Range(0, input.upper(), true, input.exponent())); michael@0: } michael@0: michael@0: void michael@0: MToDouble::computeRange(TempAllocator &alloc) michael@0: { michael@0: setRange(new(alloc) Range(getOperand(0))); michael@0: } michael@0: michael@0: void michael@0: MToFloat32::computeRange(TempAllocator &alloc) michael@0: { michael@0: } michael@0: michael@0: void michael@0: MTruncateToInt32::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range *output = new(alloc) Range(getOperand(0)); michael@0: output->wrapAroundToInt32(); michael@0: setRange(output); michael@0: } michael@0: michael@0: void michael@0: MToInt32::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range *output = new(alloc) Range(getOperand(0)); michael@0: output->clampToInt32(); michael@0: setRange(output); michael@0: } michael@0: michael@0: static Range *GetTypedArrayRange(TempAllocator &alloc, int type) michael@0: { michael@0: switch (type) { michael@0: case ScalarTypeDescr::TYPE_UINT8_CLAMPED: michael@0: case ScalarTypeDescr::TYPE_UINT8: michael@0: return Range::NewUInt32Range(alloc, 0, UINT8_MAX); michael@0: case ScalarTypeDescr::TYPE_UINT16: michael@0: return Range::NewUInt32Range(alloc, 0, UINT16_MAX); michael@0: case ScalarTypeDescr::TYPE_UINT32: michael@0: return Range::NewUInt32Range(alloc, 0, UINT32_MAX); michael@0: michael@0: case ScalarTypeDescr::TYPE_INT8: michael@0: return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX); michael@0: case ScalarTypeDescr::TYPE_INT16: michael@0: return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX); michael@0: case ScalarTypeDescr::TYPE_INT32: michael@0: return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX); michael@0: michael@0: case ScalarTypeDescr::TYPE_FLOAT32: michael@0: case ScalarTypeDescr::TYPE_FLOAT64: michael@0: break; michael@0: } michael@0: michael@0: return nullptr; michael@0: } michael@0: michael@0: void michael@0: MLoadTypedArrayElement::computeRange(TempAllocator &alloc) michael@0: { michael@0: // We have an Int32 type and if this is a UInt32 load it may produce a value michael@0: // outside of our range, but we have a bailout to handle those cases. michael@0: setRange(GetTypedArrayRange(alloc, arrayType())); michael@0: } michael@0: michael@0: void michael@0: MLoadTypedArrayElementStatic::computeRange(TempAllocator &alloc) michael@0: { michael@0: // We don't currently use MLoadTypedArrayElementStatic for uint32, so we michael@0: // don't have to worry about it returning a value outside our type. michael@0: JS_ASSERT(typedArray_->type() != ScalarTypeDescr::TYPE_UINT32); michael@0: michael@0: setRange(GetTypedArrayRange(alloc, typedArray_->type())); michael@0: } michael@0: michael@0: void michael@0: MArrayLength::computeRange(TempAllocator &alloc) michael@0: { michael@0: // Array lengths can go up to UINT32_MAX, but we only create MArrayLength michael@0: // nodes when the value is known to be int32 (see the michael@0: // OBJECT_FLAG_LENGTH_OVERFLOW flag). michael@0: setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX)); michael@0: } michael@0: michael@0: void michael@0: MInitializedLength::computeRange(TempAllocator &alloc) michael@0: { michael@0: setRange(Range::NewUInt32Range(alloc, 0, JSObject::NELEMENTS_LIMIT)); michael@0: } michael@0: michael@0: void michael@0: MTypedArrayLength::computeRange(TempAllocator &alloc) michael@0: { michael@0: setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX)); michael@0: } michael@0: michael@0: void michael@0: MStringLength::computeRange(TempAllocator &alloc) michael@0: { michael@0: static_assert(JSString::MAX_LENGTH <= UINT32_MAX, michael@0: "NewUInt32Range requires a uint32 value"); michael@0: setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH)); michael@0: } michael@0: michael@0: void michael@0: MArgumentsLength::computeRange(TempAllocator &alloc) michael@0: { michael@0: // This is is a conservative upper bound on what |TooManyArguments| checks. michael@0: // If exceeded, Ion will not be entered in the first place. michael@0: static_assert(SNAPSHOT_MAX_NARGS <= UINT32_MAX, michael@0: "NewUInt32Range requires a uint32 value"); michael@0: setRange(Range::NewUInt32Range(alloc, 0, SNAPSHOT_MAX_NARGS)); michael@0: } michael@0: michael@0: void michael@0: MBoundsCheck::computeRange(TempAllocator &alloc) michael@0: { michael@0: // Just transfer the incoming index range to the output. The length() is michael@0: // also interesting, but it is handled as a bailout check, and we're michael@0: // computing a pre-bailout range here. michael@0: setRange(new(alloc) Range(index())); michael@0: } michael@0: michael@0: void michael@0: MArrayPush::computeRange(TempAllocator &alloc) michael@0: { michael@0: // MArrayPush returns the new array length. michael@0: setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX)); michael@0: } michael@0: michael@0: void michael@0: MMathFunction::computeRange(TempAllocator &alloc) michael@0: { michael@0: Range opRange(getOperand(0)); michael@0: switch (function()) { michael@0: case Sin: michael@0: case Cos: michael@0: if (!opRange.canBeInfiniteOrNaN()) michael@0: setRange(Range::NewDoubleRange(alloc, -1.0, 1.0)); michael@0: break; michael@0: case Sign: michael@0: if (!opRange.canBeNaN()) { michael@0: // Note that Math.sign(-0) is -0, and we treat -0 as equal to 0. michael@0: int32_t lower = -1; michael@0: int32_t upper = 1; michael@0: if (opRange.hasInt32LowerBound() && opRange.lower() >= 0) michael@0: lower = 0; michael@0: if (opRange.hasInt32UpperBound() && opRange.upper() <= 0) michael@0: upper = 0; michael@0: setRange(Range::NewInt32Range(alloc, lower, upper)); michael@0: } michael@0: break; michael@0: default: michael@0: break; michael@0: } michael@0: } michael@0: michael@0: void michael@0: MRandom::computeRange(TempAllocator &alloc) michael@0: { michael@0: setRange(Range::NewDoubleRange(alloc, 0.0, 1.0)); michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: // Range Analysis michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: bool michael@0: RangeAnalysis::markBlocksInLoopBody(MBasicBlock *header, MBasicBlock *backedge) michael@0: { michael@0: Vector worklist(alloc()); michael@0: michael@0: // Mark the header as being in the loop. This terminates the walk. michael@0: header->mark(); michael@0: michael@0: backedge->mark(); michael@0: if (!worklist.append(backedge)) michael@0: return false; michael@0: michael@0: // If we haven't reached the loop header yet, walk up the predecessors michael@0: // we haven't seen already. michael@0: while (!worklist.empty()) { michael@0: MBasicBlock *current = worklist.popCopy(); michael@0: for (size_t i = 0; i < current->numPredecessors(); i++) { michael@0: MBasicBlock *pred = current->getPredecessor(i); michael@0: michael@0: if (pred->isMarked()) michael@0: continue; michael@0: michael@0: pred->mark(); michael@0: if (!worklist.append(pred)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::analyzeLoop(MBasicBlock *header) michael@0: { michael@0: JS_ASSERT(header->hasUniqueBackedge()); michael@0: michael@0: // Try to compute an upper bound on the number of times the loop backedge michael@0: // will be taken. Look for tests that dominate the backedge and which have michael@0: // an edge leaving the loop body. michael@0: MBasicBlock *backedge = header->backedge(); michael@0: michael@0: // Ignore trivial infinite loops. michael@0: if (backedge == header) michael@0: return true; michael@0: michael@0: if (!markBlocksInLoopBody(header, backedge)) michael@0: return false; michael@0: michael@0: LoopIterationBound *iterationBound = nullptr; michael@0: michael@0: MBasicBlock *block = backedge; michael@0: do { michael@0: BranchDirection direction; michael@0: MTest *branch = block->immediateDominatorBranch(&direction); michael@0: michael@0: if (block == block->immediateDominator()) michael@0: break; michael@0: michael@0: block = block->immediateDominator(); michael@0: michael@0: if (branch) { michael@0: direction = NegateBranchDirection(direction); michael@0: MBasicBlock *otherBlock = branch->branchSuccessor(direction); michael@0: if (!otherBlock->isMarked()) { michael@0: iterationBound = michael@0: analyzeLoopIterationCount(header, branch, direction); michael@0: if (iterationBound) michael@0: break; michael@0: } michael@0: } michael@0: } while (block != header); michael@0: michael@0: if (!iterationBound) { michael@0: graph_.unmarkBlocks(); michael@0: return true; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: if (IonSpewEnabled(IonSpew_Range)) { michael@0: Sprinter sp(GetIonContext()->cx); michael@0: sp.init(); michael@0: iterationBound->sum.print(sp); michael@0: IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s", michael@0: sp.string()); michael@0: } michael@0: #endif michael@0: michael@0: // Try to compute symbolic bounds for the phi nodes at the head of this michael@0: // loop, expressed in terms of the iteration bound just computed. michael@0: michael@0: for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) michael@0: analyzeLoopPhi(header, iterationBound, *iter); michael@0: michael@0: if (!mir->compilingAsmJS()) { michael@0: // Try to hoist any bounds checks from the loop using symbolic bounds. michael@0: michael@0: Vector hoistedChecks(alloc()); michael@0: michael@0: for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) { michael@0: MBasicBlock *block = *iter; michael@0: if (!block->isMarked()) michael@0: continue; michael@0: michael@0: for (MDefinitionIterator iter(block); iter; iter++) { michael@0: MDefinition *def = *iter; michael@0: if (def->isBoundsCheck() && def->isMovable()) { michael@0: if (tryHoistBoundsCheck(header, def->toBoundsCheck())) { michael@0: if (!hoistedChecks.append(def->toBoundsCheck())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Note: replace all uses of the original bounds check with the michael@0: // actual index. This is usually done during bounds check elimination, michael@0: // but in this case it's safe to do it here since the load/store is michael@0: // definitely not loop-invariant, so we will never move it before michael@0: // one of the bounds checks we just added. michael@0: for (size_t i = 0; i < hoistedChecks.length(); i++) { michael@0: MBoundsCheck *ins = hoistedChecks[i]; michael@0: ins->replaceAllUsesWith(ins->index()); michael@0: ins->block()->discard(ins); michael@0: } michael@0: } michael@0: michael@0: graph_.unmarkBlocks(); michael@0: return true; michael@0: } michael@0: michael@0: LoopIterationBound * michael@0: RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header, michael@0: MTest *test, BranchDirection direction) michael@0: { michael@0: SimpleLinearSum lhs(nullptr, 0); michael@0: MDefinition *rhs; michael@0: bool lessEqual; michael@0: if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) michael@0: return nullptr; michael@0: michael@0: // Ensure the rhs is a loop invariant term. michael@0: if (rhs && rhs->block()->isMarked()) { michael@0: if (lhs.term && lhs.term->block()->isMarked()) michael@0: return nullptr; michael@0: MDefinition *temp = lhs.term; michael@0: lhs.term = rhs; michael@0: rhs = temp; michael@0: if (!SafeSub(0, lhs.constant, &lhs.constant)) michael@0: return nullptr; michael@0: lessEqual = !lessEqual; michael@0: } michael@0: michael@0: JS_ASSERT_IF(rhs, !rhs->block()->isMarked()); michael@0: michael@0: // Ensure the lhs is a phi node from the start of the loop body. michael@0: if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) michael@0: return nullptr; michael@0: michael@0: // Check that the value of the lhs changes by a constant amount with each michael@0: // loop iteration. This requires that the lhs be written in every loop michael@0: // iteration with a value that is a constant difference from its value at michael@0: // the start of the iteration. michael@0: michael@0: if (lhs.term->toPhi()->numOperands() != 2) michael@0: return nullptr; michael@0: michael@0: // The first operand of the phi should be the lhs' value at the start of michael@0: // the first executed iteration, and not a value written which could michael@0: // replace the second operand below during the middle of execution. michael@0: MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0); michael@0: if (lhsInitial->block()->isMarked()) michael@0: return nullptr; michael@0: michael@0: // The second operand of the phi should be a value written by an add/sub michael@0: // in every loop iteration, i.e. in a block which dominates the backedge. michael@0: MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1); michael@0: if (lhsWrite->isBeta()) michael@0: lhsWrite = lhsWrite->getOperand(0); michael@0: if (!lhsWrite->isAdd() && !lhsWrite->isSub()) michael@0: return nullptr; michael@0: if (!lhsWrite->block()->isMarked()) michael@0: return nullptr; michael@0: MBasicBlock *bb = header->backedge(); michael@0: for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {} michael@0: if (bb != lhsWrite->block()) michael@0: return nullptr; michael@0: michael@0: SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite); michael@0: michael@0: // Check that the value of the lhs at the backedge is of the form michael@0: // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start michael@0: // of the iteration, and not that written to lhs in a previous iteration, michael@0: // as such a previous value could not appear directly in the addition: michael@0: // it could not be stored in lhs as the lhs add/sub executes in every michael@0: // iteration, and if it were stored in another variable its use here would michael@0: // be as an operand to a phi node for that variable. michael@0: if (lhsModified.term != lhs.term) michael@0: return nullptr; michael@0: michael@0: LinearSum bound(alloc()); michael@0: michael@0: if (lhsModified.constant == 1 && !lessEqual) { michael@0: // The value of lhs is 'initial(lhs) + iterCount' and this will end michael@0: // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound michael@0: // on the number of backedges executed is: michael@0: // michael@0: // initial(lhs) + iterCount + lhsN == rhs michael@0: // iterCount == rhsN - initial(lhs) - lhsN michael@0: michael@0: if (rhs) { michael@0: if (!bound.add(rhs, 1)) michael@0: return nullptr; michael@0: } michael@0: if (!bound.add(lhsInitial, -1)) michael@0: return nullptr; michael@0: michael@0: int32_t lhsConstant; michael@0: if (!SafeSub(0, lhs.constant, &lhsConstant)) michael@0: return nullptr; michael@0: if (!bound.add(lhsConstant)) michael@0: return nullptr; michael@0: } else if (lhsModified.constant == -1 && lessEqual) { michael@0: // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above michael@0: // case, an upper bound on the number of backedges executed is: michael@0: // michael@0: // initial(lhs) - iterCount + lhsN == rhs michael@0: // iterCount == initial(lhs) - rhs + lhsN michael@0: michael@0: if (!bound.add(lhsInitial, 1)) michael@0: return nullptr; michael@0: if (rhs) { michael@0: if (!bound.add(rhs, -1)) michael@0: return nullptr; michael@0: } michael@0: if (!bound.add(lhs.constant)) michael@0: return nullptr; michael@0: } else { michael@0: return nullptr; michael@0: } michael@0: michael@0: return new(alloc()) LoopIterationBound(header, test, bound); michael@0: } michael@0: michael@0: void michael@0: RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi) michael@0: { michael@0: // Given a bound on the number of backedges taken, compute an upper and michael@0: // lower bound for a phi node that may change by a constant amount each michael@0: // iteration. Unlike for the case when computing the iteration bound michael@0: // itself, the phi does not need to change the same amount every iteration, michael@0: // but is required to change at most N and be either nondecreasing or michael@0: // nonincreasing. michael@0: michael@0: JS_ASSERT(phi->numOperands() == 2); michael@0: michael@0: MBasicBlock *preLoop = header->loopPredecessor(); michael@0: JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header); michael@0: michael@0: MBasicBlock *backedge = header->backedge(); michael@0: JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header); michael@0: michael@0: MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor()); michael@0: if (initial->block()->isMarked()) michael@0: return; michael@0: michael@0: SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor())); michael@0: michael@0: if (modified.term != phi || modified.constant == 0) michael@0: return; michael@0: michael@0: if (!phi->range()) michael@0: phi->setRange(new(alloc()) Range()); michael@0: michael@0: LinearSum initialSum(alloc()); michael@0: if (!initialSum.add(initial, 1)) michael@0: return; michael@0: michael@0: // The phi may change by N each iteration, and is either nondecreasing or michael@0: // nonincreasing. initial(phi) is either a lower or upper bound for the michael@0: // phi, and initial(phi) + loopBound * N is either an upper or lower bound, michael@0: // at all points within the loop, provided that loopBound >= 0. michael@0: // michael@0: // We are more interested, however, in the bound for phi at points michael@0: // dominated by the loop bound's test; if the test dominates e.g. a bounds michael@0: // check we want to hoist from the loop, using the value of the phi at the michael@0: // head of the loop for this will usually be too imprecise to hoist the michael@0: // check. These points will execute only if the backedge executes at least michael@0: // one more time (as the test passed and the test dominates the backedge), michael@0: // so we know both that loopBound >= 1 and that the phi's value has changed michael@0: // at most loopBound - 1 times. Thus, another upper or lower bound for the michael@0: // phi is initial(phi) + (loopBound - 1) * N, without requiring us to michael@0: // ensure that loopBound >= 0. michael@0: michael@0: LinearSum limitSum(loopBound->sum); michael@0: if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) michael@0: return; michael@0: michael@0: int32_t negativeConstant; michael@0: if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant)) michael@0: return; michael@0: michael@0: Range *initRange = initial->range(); michael@0: if (modified.constant > 0) { michael@0: if (initRange && initRange->hasInt32LowerBound()) michael@0: phi->range()->refineLower(initRange->lower()); michael@0: phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum)); michael@0: phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum)); michael@0: } else { michael@0: if (initRange && initRange->hasInt32UpperBound()) michael@0: phi->range()->refineUpper(initRange->upper()); michael@0: phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum)); michael@0: phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum)); michael@0: } michael@0: michael@0: IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id()); michael@0: SpewRange(phi); michael@0: } michael@0: michael@0: // Whether bound is valid at the specified bounds check instruction in a loop, michael@0: // and may be used to hoist ins. michael@0: static inline bool michael@0: SymbolicBoundIsValid(MBasicBlock *header, MBoundsCheck *ins, const SymbolicBound *bound) michael@0: { michael@0: if (!bound->loop) michael@0: return true; michael@0: if (ins->block() == header) michael@0: return false; michael@0: MBasicBlock *bb = ins->block()->immediateDominator(); michael@0: while (bb != header && bb != bound->loop->test->block()) michael@0: bb = bb->immediateDominator(); michael@0: return bb == bound->loop->test->block(); michael@0: } michael@0: michael@0: // Convert all components of a linear sum *except* its constant to a definition, michael@0: // adding any necessary instructions to the end of block. michael@0: static inline MDefinition * michael@0: ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum) michael@0: { michael@0: MDefinition *def = nullptr; michael@0: michael@0: for (size_t i = 0; i < sum.numTerms(); i++) { michael@0: LinearTerm term = sum.term(i); michael@0: JS_ASSERT(!term.term->isConstant()); michael@0: if (term.scale == 1) { michael@0: if (def) { michael@0: def = MAdd::New(alloc, def, term.term); michael@0: def->toAdd()->setInt32(); michael@0: block->insertBefore(block->lastIns(), def->toInstruction()); michael@0: def->computeRange(alloc); michael@0: } else { michael@0: def = term.term; michael@0: } michael@0: } else if (term.scale == -1) { michael@0: if (!def) { michael@0: def = MConstant::New(alloc, Int32Value(0)); michael@0: block->insertBefore(block->lastIns(), def->toInstruction()); michael@0: def->computeRange(alloc); michael@0: } michael@0: def = MSub::New(alloc, def, term.term); michael@0: def->toSub()->setInt32(); michael@0: block->insertBefore(block->lastIns(), def->toInstruction()); michael@0: def->computeRange(alloc); michael@0: } else { michael@0: JS_ASSERT(term.scale != 0); michael@0: MConstant *factor = MConstant::New(alloc, Int32Value(term.scale)); michael@0: block->insertBefore(block->lastIns(), factor); michael@0: MMul *mul = MMul::New(alloc, term.term, factor); michael@0: mul->setInt32(); michael@0: block->insertBefore(block->lastIns(), mul); michael@0: mul->computeRange(alloc); michael@0: if (def) { michael@0: def = MAdd::New(alloc, def, mul); michael@0: def->toAdd()->setInt32(); michael@0: block->insertBefore(block->lastIns(), def->toInstruction()); michael@0: def->computeRange(alloc); michael@0: } else { michael@0: def = mul; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (!def) { michael@0: def = MConstant::New(alloc, Int32Value(0)); michael@0: block->insertBefore(block->lastIns(), def->toInstruction()); michael@0: def->computeRange(alloc); michael@0: } michael@0: michael@0: return def; michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::tryHoistBoundsCheck(MBasicBlock *header, MBoundsCheck *ins) michael@0: { michael@0: // The bounds check's length must be loop invariant. michael@0: if (ins->length()->block()->isMarked()) michael@0: return false; michael@0: michael@0: // The bounds check's index should not be loop invariant (else we would michael@0: // already have hoisted it during LICM). michael@0: SimpleLinearSum index = ExtractLinearSum(ins->index()); michael@0: if (!index.term || !index.term->block()->isMarked()) michael@0: return false; michael@0: michael@0: // Check for a symbolic lower and upper bound on the index. If either michael@0: // condition depends on an iteration bound for the loop, only hoist if michael@0: // the bounds check is dominated by the iteration bound's test. michael@0: if (!index.term->range()) michael@0: return false; michael@0: const SymbolicBound *lower = index.term->range()->symbolicLower(); michael@0: if (!lower || !SymbolicBoundIsValid(header, ins, lower)) michael@0: return false; michael@0: const SymbolicBound *upper = index.term->range()->symbolicUpper(); michael@0: if (!upper || !SymbolicBoundIsValid(header, ins, upper)) michael@0: return false; michael@0: michael@0: MBasicBlock *preLoop = header->loopPredecessor(); michael@0: JS_ASSERT(!preLoop->isMarked()); michael@0: michael@0: MDefinition *lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum); michael@0: if (!lowerTerm) michael@0: return false; michael@0: michael@0: MDefinition *upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum); michael@0: if (!upperTerm) michael@0: return false; michael@0: michael@0: // We are checking that index + indexConstant >= 0, and know that michael@0: // index >= lowerTerm + lowerConstant. Thus, check that: michael@0: // michael@0: // lowerTerm + lowerConstant + indexConstant >= 0 michael@0: // lowerTerm >= -lowerConstant - indexConstant michael@0: michael@0: int32_t lowerConstant = 0; michael@0: if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) michael@0: return false; michael@0: if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) michael@0: return false; michael@0: michael@0: // We are checking that index < boundsLength, and know that michael@0: // index <= upperTerm + upperConstant. Thus, check that: michael@0: // michael@0: // upperTerm + upperConstant < boundsLength michael@0: michael@0: int32_t upperConstant = index.constant; michael@0: if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) michael@0: return false; michael@0: michael@0: MBoundsCheckLower *lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm); michael@0: lowerCheck->setMinimum(lowerConstant); michael@0: michael@0: MBoundsCheck *upperCheck = MBoundsCheck::New(alloc(), upperTerm, ins->length()); michael@0: upperCheck->setMinimum(upperConstant); michael@0: upperCheck->setMaximum(upperConstant); michael@0: michael@0: // Hoist the loop invariant upper and lower bounds checks. michael@0: preLoop->insertBefore(preLoop->lastIns(), lowerCheck); michael@0: preLoop->insertBefore(preLoop->lastIns(), upperCheck); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::analyze() michael@0: { michael@0: IonSpew(IonSpew_Range, "Doing range propagation"); michael@0: michael@0: for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { michael@0: MBasicBlock *block = *iter; michael@0: michael@0: if (block->unreachable()) michael@0: continue; michael@0: michael@0: for (MDefinitionIterator iter(block); iter; iter++) { michael@0: MDefinition *def = *iter; michael@0: michael@0: def->computeRange(alloc()); michael@0: IonSpew(IonSpew_Range, "computing range on %d", def->id()); michael@0: SpewRange(def); michael@0: } michael@0: michael@0: if (block->isLoopHeader()) { michael@0: if (!analyzeLoop(block)) michael@0: return false; michael@0: } michael@0: michael@0: // First pass at collecting range info - while the beta nodes are still michael@0: // around and before truncation. michael@0: for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { michael@0: iter->collectRangeInfoPreTrunc(); michael@0: michael@0: // Would have been nice to implement this using collectRangeInfoPreTrunc() michael@0: // methods but it needs the minAsmJSHeapLength(). michael@0: if (mir->compilingAsmJS()) { michael@0: uint32_t minHeapLength = mir->minAsmJSHeapLength(); michael@0: if (iter->isAsmJSLoadHeap()) { michael@0: MAsmJSLoadHeap *ins = iter->toAsmJSLoadHeap(); michael@0: Range *range = ins->ptr()->range(); michael@0: if (range && range->hasInt32LowerBound() && range->lower() >= 0 && michael@0: range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) { michael@0: ins->setSkipBoundsCheck(true); michael@0: } michael@0: } else if (iter->isAsmJSStoreHeap()) { michael@0: MAsmJSStoreHeap *ins = iter->toAsmJSStoreHeap(); michael@0: Range *range = ins->ptr()->range(); michael@0: if (range && range->hasInt32LowerBound() && range->lower() >= 0 && michael@0: range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) { michael@0: ins->setSkipBoundsCheck(true); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::addRangeAssertions() michael@0: { michael@0: if (!js_JitOptions.checkRangeAnalysis) michael@0: return true; michael@0: michael@0: // Check the computed range for this instruction, if the option is set. Note michael@0: // that this code is quite invasive; it adds numerous additional michael@0: // instructions for each MInstruction with a computed range, and it uses michael@0: // registers, so it also affects register allocation. michael@0: for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { michael@0: MBasicBlock *block = *iter; michael@0: michael@0: for (MDefinitionIterator iter(block); iter; iter++) { michael@0: MDefinition *ins = *iter; michael@0: michael@0: // Perform range checking for all numeric and numeric-like types. michael@0: if (!IsNumberType(ins->type()) && michael@0: ins->type() != MIRType_Boolean && michael@0: ins->type() != MIRType_Value) michael@0: { michael@0: continue; michael@0: } michael@0: michael@0: Range r(ins); michael@0: michael@0: // Don't insert assertions if there's nothing interesting to assert. michael@0: if (r.isUnknown() || (ins->type() == MIRType_Int32 && r.isUnknownInt32())) michael@0: continue; michael@0: michael@0: MAssertRange *guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r)); michael@0: michael@0: // Beta nodes and interrupt checks are required to be located at the michael@0: // beginnings of basic blocks, so we must insert range assertions michael@0: // after any such instructions. michael@0: MInstructionIterator insertIter = ins->isPhi() michael@0: ? block->begin() michael@0: : block->begin(ins->toInstruction()); michael@0: while (insertIter->isBeta() || michael@0: insertIter->isInterruptCheck() || michael@0: insertIter->isInterruptCheckPar() || michael@0: insertIter->isConstant()) michael@0: { michael@0: insertIter++; michael@0: } michael@0: michael@0: if (*insertIter == *iter) michael@0: block->insertAfter(*insertIter, guard); michael@0: else michael@0: block->insertBefore(*insertIter, guard); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: // Range based Truncation michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void michael@0: Range::clampToInt32() michael@0: { michael@0: if (isInt32()) michael@0: return; michael@0: int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN; michael@0: int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX; michael@0: setInt32(l, h); michael@0: } michael@0: michael@0: void michael@0: Range::wrapAroundToInt32() michael@0: { michael@0: if (!hasInt32Bounds()) { michael@0: setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX); michael@0: } else if (canHaveFractionalPart()) { michael@0: canHaveFractionalPart_ = false; michael@0: michael@0: // Clearing the fractional field may provide an opportunity to refine michael@0: // lower_ or upper_. michael@0: refineInt32BoundsByExponent(max_exponent_, &lower_, &upper_); michael@0: michael@0: assertInvariants(); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Range::wrapAroundToShiftCount() michael@0: { michael@0: wrapAroundToInt32(); michael@0: if (lower() < 0 || upper() >= 32) michael@0: setInt32(0, 31); michael@0: } michael@0: michael@0: void michael@0: Range::wrapAroundToBoolean() michael@0: { michael@0: wrapAroundToInt32(); michael@0: if (!isBoolean()) michael@0: setInt32(0, 1); michael@0: } michael@0: michael@0: bool michael@0: MDefinition::truncate() michael@0: { michael@0: // No procedure defined for truncating this instruction. michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MConstant::truncate() michael@0: { michael@0: if (!value_.isDouble()) michael@0: return false; michael@0: michael@0: // Truncate the double to int, since all uses truncates it. michael@0: int32_t res = ToInt32(value_.toDouble()); michael@0: value_.setInt32(res); michael@0: setResultType(MIRType_Int32); michael@0: if (range()) michael@0: range()->setInt32(res, res); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MAdd::truncate() michael@0: { michael@0: // Remember analysis, needed for fallible checks. michael@0: setTruncated(true); michael@0: michael@0: if (type() == MIRType_Double || type() == MIRType_Int32) { michael@0: specialization_ = MIRType_Int32; michael@0: setResultType(MIRType_Int32); michael@0: if (range()) michael@0: range()->wrapAroundToInt32(); michael@0: return true; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MSub::truncate() michael@0: { michael@0: // Remember analysis, needed for fallible checks. michael@0: setTruncated(true); michael@0: michael@0: if (type() == MIRType_Double || type() == MIRType_Int32) { michael@0: specialization_ = MIRType_Int32; michael@0: setResultType(MIRType_Int32); michael@0: if (range()) michael@0: range()->wrapAroundToInt32(); michael@0: return true; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MMul::truncate() michael@0: { michael@0: // Remember analysis, needed to remove negative zero checks. michael@0: setTruncated(true); michael@0: michael@0: if (type() == MIRType_Double || type() == MIRType_Int32) { michael@0: specialization_ = MIRType_Int32; michael@0: setResultType(MIRType_Int32); michael@0: setCanBeNegativeZero(false); michael@0: if (range()) michael@0: range()->wrapAroundToInt32(); michael@0: return true; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MDiv::truncate() michael@0: { michael@0: // Remember analysis, needed to remove negative zero checks. michael@0: setTruncatedIndirectly(true); michael@0: michael@0: // Check if this division only flows in bitwise instructions. michael@0: if (!isTruncated()) { michael@0: bool allUsesExplictlyTruncate = true; michael@0: for (MUseDefIterator use(this); allUsesExplictlyTruncate && use; use++) { michael@0: switch (use.def()->op()) { michael@0: case MDefinition::Op_BitAnd: michael@0: case MDefinition::Op_BitOr: michael@0: case MDefinition::Op_BitXor: michael@0: case MDefinition::Op_Lsh: michael@0: case MDefinition::Op_Rsh: michael@0: case MDefinition::Op_Ursh: michael@0: break; michael@0: default: michael@0: allUsesExplictlyTruncate = false; michael@0: } michael@0: } michael@0: michael@0: if (allUsesExplictlyTruncate) michael@0: setTruncated(true); michael@0: } michael@0: michael@0: // Divisions where the lhs and rhs are unsigned and the result is michael@0: // truncated can be lowered more efficiently. michael@0: if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) { michael@0: unsigned_ = true; michael@0: return true; michael@0: } michael@0: michael@0: // No modifications. michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MMod::truncate() michael@0: { michael@0: // Remember analysis, needed to remove negative zero checks. michael@0: setTruncated(true); michael@0: michael@0: // As for division, handle unsigned modulus with a truncated result. michael@0: if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) { michael@0: unsigned_ = true; michael@0: return true; michael@0: } michael@0: michael@0: // No modifications. michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MToDouble::truncate() michael@0: { michael@0: JS_ASSERT(type() == MIRType_Double); michael@0: michael@0: // We use the return type to flag that this MToDouble should be replaced by michael@0: // a MTruncateToInt32 when modifying the graph. michael@0: setResultType(MIRType_Int32); michael@0: if (range()) michael@0: range()->wrapAroundToInt32(); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MLoadTypedArrayElementStatic::truncate() michael@0: { michael@0: setInfallible(); michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MDefinition::isOperandTruncated(size_t index) const michael@0: { michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: MTruncateToInt32::isOperandTruncated(size_t index) const michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MBinaryBitwiseInstruction::isOperandTruncated(size_t index) const michael@0: { michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MAdd::isOperandTruncated(size_t index) const michael@0: { michael@0: return isTruncated(); michael@0: } michael@0: michael@0: bool michael@0: MSub::isOperandTruncated(size_t index) const michael@0: { michael@0: return isTruncated(); michael@0: } michael@0: michael@0: bool michael@0: MMul::isOperandTruncated(size_t index) const michael@0: { michael@0: return isTruncated(); michael@0: } michael@0: michael@0: bool michael@0: MToDouble::isOperandTruncated(size_t index) const michael@0: { michael@0: // The return type is used to flag that we are replacing this Double by a michael@0: // Truncate of its operand if needed. michael@0: return type() == MIRType_Int32; michael@0: } michael@0: michael@0: bool michael@0: MStoreTypedArrayElement::isOperandTruncated(size_t index) const michael@0: { michael@0: return index == 2 && !isFloatArray(); michael@0: } michael@0: michael@0: bool michael@0: MStoreTypedArrayElementHole::isOperandTruncated(size_t index) const michael@0: { michael@0: return index == 3 && !isFloatArray(); michael@0: } michael@0: michael@0: bool michael@0: MStoreTypedArrayElementStatic::isOperandTruncated(size_t index) const michael@0: { michael@0: return index == 1 && !isFloatArray(); michael@0: } michael@0: michael@0: bool michael@0: MCompare::truncate() michael@0: { michael@0: if (!isDoubleComparison()) michael@0: return false; michael@0: michael@0: // If both operands are naturally in the int32 range, we can convert from michael@0: // a double comparison to being an int32 comparison. michael@0: if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) michael@0: return false; michael@0: michael@0: compareType_ = Compare_Int32; michael@0: michael@0: // Truncating the operands won't change their value, but it will change michael@0: // their type, which we need because we now expect integer inputs. michael@0: truncateOperands_ = true; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MCompare::isOperandTruncated(size_t index) const michael@0: { michael@0: // If we're doing an int32 comparison on operands which were previously michael@0: // floating-point, convert them! michael@0: JS_ASSERT_IF(truncateOperands_, isInt32Comparison()); michael@0: return truncateOperands_; michael@0: } michael@0: michael@0: // Ensure that all observables uses can work with a truncated michael@0: // version of the |candidate|'s result. michael@0: static bool michael@0: AllUsesTruncate(MInstruction *candidate) michael@0: { michael@0: // If the value naturally produces an int32 value (before bailout checks) michael@0: // that needs no conversion, we don't have to worry about resume points michael@0: // seeing truncated values. michael@0: bool needsConversion = !candidate->range() || !candidate->range()->isInt32(); michael@0: michael@0: for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd(); use++) { michael@0: if (!use->consumer()->isDefinition()) { michael@0: // We can only skip testing resume points, if all original uses are michael@0: // still present, or if the value does not need conversion. michael@0: // Otherwise a branch removed by UCE might rely on the non-truncated michael@0: // value, and any bailout with a truncated value might lead an michael@0: // incorrect value. michael@0: if (candidate->isUseRemoved() && needsConversion) michael@0: return false; michael@0: continue; michael@0: } michael@0: michael@0: if (!use->consumer()->toDefinition()->isOperandTruncated(use->index())) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: CanTruncate(MInstruction *candidate) michael@0: { michael@0: // Compare operations might coerce its inputs to int32 if the ranges are michael@0: // correct. So we do not need to check if all uses are coerced. michael@0: if (candidate->isCompare()) michael@0: return true; michael@0: michael@0: // Set truncated flag if range analysis ensure that it has no michael@0: // rounding errors and no fractional part. Note that we can't use michael@0: // the MDefinition Range constructor, because we need to know if michael@0: // the value will have rounding errors before any bailout checks. michael@0: const Range *r = candidate->range(); michael@0: bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors(); michael@0: michael@0: // Special case integer division: the result of a/b can be infinite michael@0: // but cannot actually have rounding errors induced by truncation. michael@0: if (candidate->isDiv() && candidate->toDiv()->specialization() == MIRType_Int32) michael@0: canHaveRoundingErrors = false; michael@0: michael@0: if (canHaveRoundingErrors) michael@0: return false; michael@0: michael@0: // Ensure all observable uses are truncated. michael@0: return AllUsesTruncate(candidate); michael@0: } michael@0: michael@0: static void michael@0: RemoveTruncatesOnOutput(MInstruction *truncated) michael@0: { michael@0: // Compare returns a boolean so it doen't have any output truncates. michael@0: if (truncated->isCompare()) michael@0: return; michael@0: michael@0: JS_ASSERT(truncated->type() == MIRType_Int32); michael@0: JS_ASSERT(Range(truncated).isInt32()); michael@0: michael@0: for (MUseDefIterator use(truncated); use; use++) { michael@0: MDefinition *def = use.def(); michael@0: if (!def->isTruncateToInt32() || !def->isToInt32()) michael@0: continue; michael@0: michael@0: def->replaceAllUsesWith(truncated); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: AdjustTruncatedInputs(TempAllocator &alloc, MInstruction *truncated) michael@0: { michael@0: MBasicBlock *block = truncated->block(); michael@0: for (size_t i = 0, e = truncated->numOperands(); i < e; i++) { michael@0: if (!truncated->isOperandTruncated(i)) michael@0: continue; michael@0: michael@0: MDefinition *input = truncated->getOperand(i); michael@0: if (input->type() == MIRType_Int32) michael@0: continue; michael@0: michael@0: if (input->isToDouble() && input->getOperand(0)->type() == MIRType_Int32) { michael@0: JS_ASSERT(input->range()->isInt32()); michael@0: truncated->replaceOperand(i, input->getOperand(0)); michael@0: } else { michael@0: MTruncateToInt32 *op = MTruncateToInt32::New(alloc, truncated->getOperand(i)); michael@0: block->insertBefore(truncated, op); michael@0: truncated->replaceOperand(i, op); michael@0: } michael@0: } michael@0: michael@0: if (truncated->isToDouble()) { michael@0: truncated->replaceAllUsesWith(truncated->getOperand(0)); michael@0: block->discard(truncated); michael@0: } michael@0: } michael@0: michael@0: // Iterate backward on all instruction and attempt to truncate operations for michael@0: // each instruction which respect the following list of predicates: Has been michael@0: // analyzed by range analysis, the range has no rounding errors, all uses cases michael@0: // are truncating the result. michael@0: // michael@0: // If the truncation of the operation is successful, then the instruction is michael@0: // queue for later updating the graph to restore the type correctness by michael@0: // converting the operands that need to be truncated. michael@0: // michael@0: // We iterate backward because it is likely that a truncated operation truncates michael@0: // some of its operands. michael@0: bool michael@0: RangeAnalysis::truncate() michael@0: { michael@0: IonSpew(IonSpew_Range, "Do range-base truncation (backward loop)"); michael@0: michael@0: Vector worklist; michael@0: Vector bitops; michael@0: michael@0: for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) { michael@0: for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) { michael@0: if (iter->type() == MIRType_None) michael@0: continue; michael@0: michael@0: // Remember all bitop instructions for folding after range analysis. michael@0: switch (iter->op()) { michael@0: case MDefinition::Op_BitAnd: michael@0: case MDefinition::Op_BitOr: michael@0: case MDefinition::Op_BitXor: michael@0: case MDefinition::Op_Lsh: michael@0: case MDefinition::Op_Rsh: michael@0: case MDefinition::Op_Ursh: michael@0: if (!bitops.append(static_cast(*iter))) michael@0: return false; michael@0: default:; michael@0: } michael@0: michael@0: if (!CanTruncate(*iter)) michael@0: continue; michael@0: michael@0: // Truncate this instruction if possible. michael@0: if (!iter->truncate()) michael@0: continue; michael@0: michael@0: // Delay updates of inputs/outputs to avoid creating node which michael@0: // would be removed by the truncation of the next operations. michael@0: iter->setInWorklist(); michael@0: if (!worklist.append(*iter)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Update inputs/outputs of truncated instructions. michael@0: IonSpew(IonSpew_Range, "Do graph type fixup (dequeue)"); michael@0: while (!worklist.empty()) { michael@0: MInstruction *ins = worklist.popCopy(); michael@0: ins->setNotInWorklist(); michael@0: RemoveTruncatesOnOutput(ins); michael@0: AdjustTruncatedInputs(alloc(), ins); michael@0: } michael@0: michael@0: // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer michael@0: // input. This is done after range analysis rather than during GVN as the michael@0: // presence of the bitop can change which instructions are truncated. michael@0: for (size_t i = 0; i < bitops.length(); i++) { michael@0: MBinaryBitwiseInstruction *ins = bitops[i]; michael@0: MDefinition *folded = ins->foldUnnecessaryBitop(); michael@0: if (folded != ins) michael@0: ins->replaceAllUsesWith(folded); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: // Collect Range information of operands michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: void michael@0: MInArray::collectRangeInfoPreTrunc() michael@0: { michael@0: Range indexRange(index()); michael@0: if (indexRange.isFiniteNonNegative()) michael@0: needsNegativeIntCheck_ = false; michael@0: } michael@0: michael@0: void michael@0: MLoadElementHole::collectRangeInfoPreTrunc() michael@0: { michael@0: Range indexRange(index()); michael@0: if (indexRange.isFiniteNonNegative()) michael@0: needsNegativeIntCheck_ = false; michael@0: } michael@0: michael@0: void michael@0: MDiv::collectRangeInfoPreTrunc() michael@0: { michael@0: Range lhsRange(lhs()); michael@0: if (lhsRange.isFiniteNonNegative()) michael@0: canBeNegativeDividend_ = false; michael@0: } michael@0: michael@0: void michael@0: MMod::collectRangeInfoPreTrunc() michael@0: { michael@0: Range lhsRange(lhs()); michael@0: if (lhsRange.isFiniteNonNegative()) michael@0: canBeNegativeDividend_ = false; michael@0: } michael@0: michael@0: void michael@0: MBoundsCheckLower::collectRangeInfoPreTrunc() michael@0: { michael@0: Range indexRange(index()); michael@0: if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_) michael@0: fallible_ = false; michael@0: } michael@0: michael@0: void michael@0: MCompare::collectRangeInfoPreTrunc() michael@0: { michael@0: if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) michael@0: operandsAreNeverNaN_ = true; michael@0: } michael@0: michael@0: void michael@0: MNot::collectRangeInfoPreTrunc() michael@0: { michael@0: if (!Range(operand()).canBeNaN()) michael@0: operandIsNeverNaN_ = true; michael@0: } michael@0: michael@0: void michael@0: MPowHalf::collectRangeInfoPreTrunc() michael@0: { michael@0: Range inputRange(input()); michael@0: if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound()) michael@0: operandIsNeverNegativeInfinity_ = true; michael@0: if (!inputRange.canBeZero()) michael@0: operandIsNeverNegativeZero_ = true; michael@0: if (!inputRange.canBeNaN()) michael@0: operandIsNeverNaN_ = true; michael@0: } michael@0: michael@0: void michael@0: MUrsh::collectRangeInfoPreTrunc() michael@0: { michael@0: Range lhsRange(lhs()), rhsRange(rhs()); michael@0: michael@0: // As in MUrsh::computeRange(), convert the inputs. michael@0: lhsRange.wrapAroundToInt32(); michael@0: rhsRange.wrapAroundToShiftCount(); michael@0: michael@0: // If the most significant bit of our result is always going to be zero, michael@0: // we can optimize by disabling bailout checks for enforcing an int32 range. michael@0: if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) michael@0: bailoutsDisabled_ = true; michael@0: } michael@0: michael@0: bool michael@0: RangeAnalysis::prepareForUCE(bool *shouldRemoveDeadCode) michael@0: { michael@0: *shouldRemoveDeadCode = false; michael@0: michael@0: for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { michael@0: MBasicBlock *block = *iter; michael@0: michael@0: if (!block->unreachable()) michael@0: continue; michael@0: michael@0: MControlInstruction *cond = block->getPredecessor(0)->lastIns(); michael@0: if (!cond->isTest()) michael@0: continue; michael@0: michael@0: // Replace the condition of the test control instruction by a constant michael@0: // chosen based which of the successors has the unreachable flag which is michael@0: // added by MBeta::computeRange on its own block. michael@0: MTest *test = cond->toTest(); michael@0: MConstant *constant = nullptr; michael@0: if (block == test->ifTrue()) { michael@0: constant = MConstant::New(alloc(), BooleanValue(false)); michael@0: } else { michael@0: JS_ASSERT(block == test->ifFalse()); michael@0: constant = MConstant::New(alloc(), BooleanValue(true)); michael@0: } michael@0: test->block()->insertBefore(test, constant); michael@0: test->replaceOperand(0, constant); michael@0: IonSpew(IonSpew_Range, "Update condition of %d to reflect unreachable branches.", michael@0: test->id()); michael@0: michael@0: *shouldRemoveDeadCode = true; michael@0: } michael@0: michael@0: return true; michael@0: }