Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/RangeAnalysis.h"
9 #include "mozilla/MathAlgorithms.h"
11 #include "jsanalyze.h"
13 #include "jit/Ion.h"
14 #include "jit/IonAnalysis.h"
15 #include "jit/IonSpewer.h"
16 #include "jit/MIR.h"
17 #include "jit/MIRGenerator.h"
18 #include "jit/MIRGraph.h"
19 #include "vm/NumericConversions.h"
21 #include "jsopcodeinlines.h"
23 using namespace js;
24 using namespace js::jit;
26 using mozilla::Abs;
27 using mozilla::CountLeadingZeroes32;
28 using mozilla::NumberEqualsInt32;
29 using mozilla::ExponentComponent;
30 using mozilla::FloorLog2;
31 using mozilla::IsInfinite;
32 using mozilla::IsNaN;
33 using mozilla::IsNegative;
34 using mozilla::NegativeInfinity;
35 using mozilla::PositiveInfinity;
36 using mozilla::Swap;
37 using JS::GenericNaN;
39 // This algorithm is based on the paper "Eliminating Range Checks Using
40 // Static Single Assignment Form" by Gough and Klaren.
41 //
42 // We associate a range object with each SSA name, and the ranges are consulted
43 // in order to determine whether overflow is possible for arithmetic
44 // computations.
45 //
46 // An important source of range information that requires care to take
47 // advantage of is conditional control flow. Consider the code below:
48 //
49 // if (x < 0) {
50 // y = x + 2000000000;
51 // } else {
52 // if (x < 1000000000) {
53 // y = x * 2;
54 // } else {
55 // y = x - 3000000000;
56 // }
57 // }
58 //
59 // The arithmetic operations in this code cannot overflow, but it is not
60 // sufficient to simply associate each name with a range, since the information
61 // differs between basic blocks. The traditional dataflow approach would be
62 // associate ranges with (name, basic block) pairs. This solution is not
63 // satisfying, since we lose the benefit of SSA form: in SSA form, each
64 // definition has a unique name, so there is no need to track information about
65 // the control flow of the program.
66 //
67 // The approach used here is to add a new form of pseudo operation called a
68 // beta node, which associates range information with a value. These beta
69 // instructions take one argument and additionally have an auxiliary constant
70 // range associated with them. Operationally, beta nodes are just copies, but
71 // the invariant expressed by beta node copies is that the output will fall
72 // inside the range given by the beta node. Gough and Klaeren refer to SSA
73 // extended with these beta nodes as XSA form. The following shows the example
74 // code transformed into XSA form:
75 //
76 // if (x < 0) {
77 // x1 = Beta(x, [INT_MIN, -1]);
78 // y1 = x1 + 2000000000;
79 // } else {
80 // x2 = Beta(x, [0, INT_MAX]);
81 // if (x2 < 1000000000) {
82 // x3 = Beta(x2, [INT_MIN, 999999999]);
83 // y2 = x3*2;
84 // } else {
85 // x4 = Beta(x2, [1000000000, INT_MAX]);
86 // y3 = x4 - 3000000000;
87 // }
88 // y4 = Phi(y2, y3);
89 // }
90 // y = Phi(y1, y4);
91 //
92 // We insert beta nodes for the purposes of range analysis (they might also be
93 // usefully used for other forms of bounds check elimination) and remove them
94 // after range analysis is performed. The remaining compiler phases do not ever
95 // encounter beta nodes.
97 static bool
98 IsDominatedUse(MBasicBlock *block, MUse *use)
99 {
100 MNode *n = use->consumer();
101 bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
103 if (isPhi)
104 return block->dominates(n->block()->getPredecessor(use->index()));
106 return block->dominates(n->block());
107 }
109 static inline void
110 SpewRange(MDefinition *def)
111 {
112 #ifdef DEBUG
113 if (IonSpewEnabled(IonSpew_Range) && def->type() != MIRType_None && def->range()) {
114 IonSpewHeader(IonSpew_Range);
115 def->printName(IonSpewFile);
116 fprintf(IonSpewFile, " has range ");
117 def->range()->dump(IonSpewFile);
118 }
119 #endif
120 }
122 TempAllocator &
123 RangeAnalysis::alloc() const
124 {
125 return graph_.alloc();
126 }
128 void
129 RangeAnalysis::replaceDominatedUsesWith(MDefinition *orig, MDefinition *dom,
130 MBasicBlock *block)
131 {
132 for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd(); ) {
133 if (i->consumer() != dom && IsDominatedUse(block, *i))
134 i = i->consumer()->replaceOperand(i, dom);
135 else
136 i++;
137 }
138 }
140 bool
141 RangeAnalysis::addBetaNodes()
142 {
143 IonSpew(IonSpew_Range, "Adding beta nodes");
145 for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
146 MBasicBlock *block = *i;
147 IonSpew(IonSpew_Range, "Looking at block %d", block->id());
149 BranchDirection branch_dir;
150 MTest *test = block->immediateDominatorBranch(&branch_dir);
152 if (!test || !test->getOperand(0)->isCompare())
153 continue;
155 MCompare *compare = test->getOperand(0)->toCompare();
157 // TODO: support unsigned comparisons
158 if (compare->compareType() == MCompare::Compare_UInt32)
159 continue;
161 MDefinition *left = compare->getOperand(0);
162 MDefinition *right = compare->getOperand(1);
163 double bound;
164 double conservativeLower = NegativeInfinity<double>();
165 double conservativeUpper = PositiveInfinity<double>();
166 MDefinition *val = nullptr;
168 JSOp jsop = compare->jsop();
170 if (branch_dir == FALSE_BRANCH) {
171 jsop = NegateCompareOp(jsop);
172 conservativeLower = GenericNaN();
173 conservativeUpper = GenericNaN();
174 }
176 if (left->isConstant() && left->toConstant()->value().isNumber()) {
177 bound = left->toConstant()->value().toNumber();
178 val = right;
179 jsop = ReverseCompareOp(jsop);
180 } else if (right->isConstant() && right->toConstant()->value().isNumber()) {
181 bound = right->toConstant()->value().toNumber();
182 val = left;
183 } else if (left->type() == MIRType_Int32 && right->type() == MIRType_Int32) {
184 MDefinition *smaller = nullptr;
185 MDefinition *greater = nullptr;
186 if (jsop == JSOP_LT) {
187 smaller = left;
188 greater = right;
189 } else if (jsop == JSOP_GT) {
190 smaller = right;
191 greater = left;
192 }
193 if (smaller && greater) {
194 MBeta *beta;
195 beta = MBeta::New(alloc(), smaller,
196 Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1));
197 block->insertBefore(*block->begin(), beta);
198 replaceDominatedUsesWith(smaller, beta, block);
199 IonSpew(IonSpew_Range, "Adding beta node for smaller %d", smaller->id());
200 beta = MBeta::New(alloc(), greater,
201 Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX));
202 block->insertBefore(*block->begin(), beta);
203 replaceDominatedUsesWith(greater, beta, block);
204 IonSpew(IonSpew_Range, "Adding beta node for greater %d", greater->id());
205 }
206 continue;
207 } else {
208 continue;
209 }
211 // At this point, one of the operands if the compare is a constant, and
212 // val is the other operand.
213 JS_ASSERT(val);
215 Range comp;
216 switch (jsop) {
217 case JSOP_LE:
218 comp.setDouble(conservativeLower, bound);
219 break;
220 case JSOP_LT:
221 // For integers, if x < c, the upper bound of x is c-1.
222 if (val->type() == MIRType_Int32) {
223 int32_t intbound;
224 if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound))
225 bound = intbound;
226 }
227 comp.setDouble(conservativeLower, bound);
228 break;
229 case JSOP_GE:
230 comp.setDouble(bound, conservativeUpper);
231 break;
232 case JSOP_GT:
233 // For integers, if x > c, the lower bound of x is c+1.
234 if (val->type() == MIRType_Int32) {
235 int32_t intbound;
236 if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound))
237 bound = intbound;
238 }
239 comp.setDouble(bound, conservativeUpper);
240 break;
241 case JSOP_EQ:
242 comp.setDouble(bound, bound);
243 break;
244 default:
245 continue; // well, for neq we could have
246 // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges.
247 }
249 if (IonSpewEnabled(IonSpew_Range)) {
250 IonSpewHeader(IonSpew_Range);
251 fprintf(IonSpewFile, "Adding beta node for %d with range ", val->id());
252 comp.dump(IonSpewFile);
253 }
255 MBeta *beta = MBeta::New(alloc(), val, new(alloc()) Range(comp));
256 block->insertBefore(*block->begin(), beta);
257 replaceDominatedUsesWith(val, beta, block);
258 }
260 return true;
261 }
263 bool
264 RangeAnalysis::removeBetaNodes()
265 {
266 IonSpew(IonSpew_Range, "Removing beta nodes");
268 for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
269 MBasicBlock *block = *i;
270 for (MDefinitionIterator iter(*i); iter; ) {
271 MDefinition *def = *iter;
272 if (def->isBeta()) {
273 MDefinition *op = def->getOperand(0);
274 IonSpew(IonSpew_Range, "Removing beta node %d for %d",
275 def->id(), op->id());
276 def->replaceAllUsesWith(op);
277 iter = block->discardDefAt(iter);
278 } else {
279 // We only place Beta nodes at the beginning of basic
280 // blocks, so if we see something else, we can move on
281 // to the next block.
282 break;
283 }
284 }
285 }
286 return true;
287 }
289 void
290 SymbolicBound::print(Sprinter &sp) const
291 {
292 if (loop)
293 sp.printf("[loop] ");
294 sum.print(sp);
295 }
297 void
298 SymbolicBound::dump() const
299 {
300 Sprinter sp(GetIonContext()->cx);
301 sp.init();
302 print(sp);
303 fprintf(stderr, "%s\n", sp.string());
304 }
306 // Test whether the given range's exponent tells us anything that its lower
307 // and upper bound values don't.
308 static bool
309 IsExponentInteresting(const Range *r)
310 {
311 // If it lacks either a lower or upper bound, the exponent is interesting.
312 if (!r->hasInt32Bounds())
313 return true;
315 // Otherwise if there's no fractional part, the lower and upper bounds,
316 // which are integers, are perfectly precise.
317 if (!r->canHaveFractionalPart())
318 return false;
320 // Otherwise, if the bounds are conservatively rounded across a power-of-two
321 // boundary, the exponent may imply a tighter range.
322 return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
323 }
325 void
326 Range::print(Sprinter &sp) const
327 {
328 assertInvariants();
330 // Floating-point or Integer subset.
331 if (canHaveFractionalPart_)
332 sp.printf("F");
333 else
334 sp.printf("I");
336 sp.printf("[");
338 if (!hasInt32LowerBound_)
339 sp.printf("?");
340 else
341 sp.printf("%d", lower_);
342 if (symbolicLower_) {
343 sp.printf(" {");
344 symbolicLower_->print(sp);
345 sp.printf("}");
346 }
348 sp.printf(", ");
350 if (!hasInt32UpperBound_)
351 sp.printf("?");
352 else
353 sp.printf("%d", upper_);
354 if (symbolicUpper_) {
355 sp.printf(" {");
356 symbolicUpper_->print(sp);
357 sp.printf("}");
358 }
360 sp.printf("]");
361 if (IsExponentInteresting(this)) {
362 if (max_exponent_ == IncludesInfinityAndNaN)
363 sp.printf(" (U inf U NaN)", max_exponent_);
364 else if (max_exponent_ == IncludesInfinity)
365 sp.printf(" (U inf)");
366 else
367 sp.printf(" (< pow(2, %d+1))", max_exponent_);
368 }
369 }
371 void
372 Range::dump(FILE *fp) const
373 {
374 Sprinter sp(GetIonContext()->cx);
375 sp.init();
376 print(sp);
377 fprintf(fp, "%s\n", sp.string());
378 }
380 void
381 Range::dump() const
382 {
383 dump(stderr);
384 }
386 Range *
387 Range::intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs, bool *emptyRange)
388 {
389 *emptyRange = false;
391 if (!lhs && !rhs)
392 return nullptr;
394 if (!lhs)
395 return new(alloc) Range(*rhs);
396 if (!rhs)
397 return new(alloc) Range(*lhs);
399 int32_t newLower = Max(lhs->lower_, rhs->lower_);
400 int32_t newUpper = Min(lhs->upper_, rhs->upper_);
402 // :TODO: This information could be used better. If upper < lower, then we
403 // have conflicting constraints. Consider:
404 //
405 // if (x < 0) {
406 // if (x > 0) {
407 // [Some code.]
408 // }
409 // }
410 //
411 // In this case, the block is dead. Right now, we just disregard this fact
412 // and make the range unbounded, rather than empty.
413 //
414 // Instead, we should use it to eliminate the dead block.
415 // (Bug 765127)
416 if (newUpper < newLower) {
417 // If both ranges can be NaN, the result can still be NaN.
418 if (!lhs->canBeNaN() || !rhs->canBeNaN())
419 *emptyRange = true;
420 return nullptr;
421 }
423 bool newHasInt32LowerBound = lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
424 bool newHasInt32UpperBound = lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
425 bool newFractional = lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_;
426 uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_);
428 // NaN is a special value which is neither greater than infinity or less than
429 // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
430 // can end up thinking we have both a lower and upper bound, even though NaN
431 // is still possible. In this case, just be conservative, since any case where
432 // we can have NaN is not especially interesting.
433 if (newHasInt32LowerBound && newHasInt32UpperBound && newExponent == IncludesInfinityAndNaN)
434 return nullptr;
436 // If one of the ranges has a fractional part and the other doesn't, it's
437 // possible that we will have computed a newExponent that's more precise
438 // than our newLower and newUpper. This is unusual, so we handle it here
439 // instead of in optimize().
440 //
441 // For example, consider the range F[0,1.5]. Range analysis represents the
442 // lower and upper bound as integers, so we'd actually have
443 // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
444 // more precise upper bound than the integer upper bound.
445 //
446 // When intersecting such a range with an integer range, the fractional part
447 // of the range is dropped. The max exponent of 0 remains valid, so the
448 // upper bound needs to be adjusted to 1.
449 //
450 // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
451 // the naive intersection is I[2,2], but since the max exponent tells us
452 // that the value is always less than 2, the intersection is actually empty.
453 if (lhs->canHaveFractionalPart_ != rhs->canHaveFractionalPart_ ||
454 (lhs->canHaveFractionalPart_ &&
455 newHasInt32LowerBound && newHasInt32UpperBound &&
456 newLower == newUpper))
457 {
458 refineInt32BoundsByExponent(newExponent, &newLower, &newUpper);
460 // If we're intersecting two ranges that don't overlap, this could also
461 // push the bounds past each other, since the actual intersection is
462 // the empty set.
463 if (newLower > newUpper) {
464 *emptyRange = true;
465 return nullptr;
466 }
467 }
469 return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
470 newFractional, newExponent);
471 }
473 void
474 Range::unionWith(const Range *other)
475 {
476 int32_t newLower = Min(lower_, other->lower_);
477 int32_t newUpper = Max(upper_, other->upper_);
479 bool newHasInt32LowerBound = hasInt32LowerBound_ && other->hasInt32LowerBound_;
480 bool newHasInt32UpperBound = hasInt32UpperBound_ && other->hasInt32UpperBound_;
481 bool newFractional = canHaveFractionalPart_ || other->canHaveFractionalPart_;
482 uint16_t newExponent = Max(max_exponent_, other->max_exponent_);
484 rawInitialize(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
485 newFractional, newExponent);
486 }
488 Range::Range(const MDefinition *def)
489 : symbolicLower_(nullptr),
490 symbolicUpper_(nullptr)
491 {
492 if (const Range *other = def->range()) {
493 // The instruction has range information; use it.
494 *this = *other;
496 // Simulate the effect of converting the value to its type.
497 switch (def->type()) {
498 case MIRType_Int32:
499 wrapAroundToInt32();
500 break;
501 case MIRType_Boolean:
502 wrapAroundToBoolean();
503 break;
504 case MIRType_None:
505 MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value");
506 default:
507 break;
508 }
509 } else {
510 // Otherwise just use type information. We can trust the type here
511 // because we don't care what value the instruction actually produces,
512 // but what value we might get after we get past the bailouts.
513 switch (def->type()) {
514 case MIRType_Int32:
515 setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
516 break;
517 case MIRType_Boolean:
518 setInt32(0, 1);
519 break;
520 case MIRType_None:
521 MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value");
522 default:
523 setUnknown();
524 break;
525 }
526 }
528 // As a special case, MUrsh is permitted to claim a result type of
529 // MIRType_Int32 while actually returning values in [0,UINT32_MAX] without
530 // bailouts. If range analysis hasn't ruled out values in
531 // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
532 // use as either a uint32 or an int32.
533 if (!hasInt32UpperBound() && def->isUrsh() && def->toUrsh()->bailoutsDisabled())
534 lower_ = INT32_MIN;
536 assertInvariants();
537 }
539 static uint16_t
540 ExponentImpliedByDouble(double d)
541 {
542 // Handle the special values.
543 if (IsNaN(d))
544 return Range::IncludesInfinityAndNaN;
545 if (IsInfinite(d))
546 return Range::IncludesInfinity;
548 // Otherwise take the exponent part and clamp it at zero, since the Range
549 // class doesn't track fractional ranges.
550 return uint16_t(Max(int_fast16_t(0), ExponentComponent(d)));
551 }
553 void
554 Range::setDouble(double l, double h)
555 {
556 // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
557 if (l >= INT32_MIN && l <= INT32_MAX) {
558 lower_ = int32_t(floor(l));
559 hasInt32LowerBound_ = true;
560 } else {
561 lower_ = INT32_MIN;
562 hasInt32LowerBound_ = false;
563 }
564 if (h >= INT32_MIN && h <= INT32_MAX) {
565 upper_ = int32_t(ceil(h));
566 hasInt32UpperBound_ = true;
567 } else {
568 upper_ = INT32_MAX;
569 hasInt32UpperBound_ = false;
570 }
572 // Infer max_exponent_.
573 uint16_t lExp = ExponentImpliedByDouble(l);
574 uint16_t hExp = ExponentImpliedByDouble(h);
575 max_exponent_ = Max(lExp, hExp);
577 // Infer the canHaveFractionalPart_ field. We can have a fractional part
578 // if the range crosses through the neighborhood of zero. We won't have a
579 // fractional value if the value is always beyond the point at which
580 // double precision can't represent fractional values.
581 uint16_t minExp = Min(lExp, hExp);
582 bool includesNegative = IsNaN(l) || l < 0;
583 bool includesPositive = IsNaN(h) || h > 0;
584 bool crossesZero = includesNegative && includesPositive;
585 canHaveFractionalPart_ = crossesZero || minExp < MaxTruncatableExponent;
587 optimize();
588 }
590 static inline bool
591 MissingAnyInt32Bounds(const Range *lhs, const Range *rhs)
592 {
593 return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
594 }
596 Range *
597 Range::add(TempAllocator &alloc, const Range *lhs, const Range *rhs)
598 {
599 int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_;
600 if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
601 l = NoInt32LowerBound;
603 int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_;
604 if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound())
605 h = NoInt32UpperBound;
607 // The exponent is at most one greater than the greater of the operands'
608 // exponents, except for NaN and infinity cases.
609 uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
610 if (e <= Range::MaxFiniteExponent)
611 ++e;
613 // Infinity + -Infinity is NaN.
614 if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
615 e = Range::IncludesInfinityAndNaN;
617 return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
618 }
620 Range *
621 Range::sub(TempAllocator &alloc, const Range *lhs, const Range *rhs)
622 {
623 int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_;
624 if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
625 l = NoInt32LowerBound;
627 int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_;
628 if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound())
629 h = NoInt32UpperBound;
631 // The exponent is at most one greater than the greater of the operands'
632 // exponents, except for NaN and infinity cases.
633 uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
634 if (e <= Range::MaxFiniteExponent)
635 ++e;
637 // Infinity - Infinity is NaN.
638 if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
639 e = Range::IncludesInfinityAndNaN;
641 return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e);
642 }
644 Range *
645 Range::and_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
646 {
647 JS_ASSERT(lhs->isInt32());
648 JS_ASSERT(rhs->isInt32());
650 // If both numbers can be negative, result can be negative in the whole range
651 if (lhs->lower() < 0 && rhs->lower() < 0)
652 return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper()));
654 // Only one of both numbers can be negative.
655 // - result can't be negative
656 // - Upper bound is minimum of both upper range,
657 int32_t lower = 0;
658 int32_t upper = Min(lhs->upper(), rhs->upper());
660 // EXCEPT when upper bound of non negative number is max value,
661 // because negative value can return the whole max value.
662 // -1 & 5 = 5
663 if (lhs->lower() < 0)
664 upper = rhs->upper();
665 if (rhs->lower() < 0)
666 upper = lhs->upper();
668 return Range::NewInt32Range(alloc, lower, upper);
669 }
671 Range *
672 Range::or_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
673 {
674 JS_ASSERT(lhs->isInt32());
675 JS_ASSERT(rhs->isInt32());
676 // When one operand is always 0 or always -1, it's a special case where we
677 // can compute a fully precise result. Handling these up front also
678 // protects the code below from calling CountLeadingZeroes32 with a zero
679 // operand or from shifting an int32_t by 32.
680 if (lhs->lower() == lhs->upper()) {
681 if (lhs->lower() == 0)
682 return new(alloc) Range(*rhs);
683 if (lhs->lower() == -1)
684 return new(alloc) Range(*lhs);;
685 }
686 if (rhs->lower() == rhs->upper()) {
687 if (rhs->lower() == 0)
688 return new(alloc) Range(*lhs);
689 if (rhs->lower() == -1)
690 return new(alloc) Range(*rhs);;
691 }
693 // The code below uses CountLeadingZeroes32, which has undefined behavior
694 // if its operand is 0. We rely on the code above to protect it.
695 JS_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
696 JS_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
697 JS_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
698 JS_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
700 int32_t lower = INT32_MIN;
701 int32_t upper = INT32_MAX;
703 if (lhs->lower() >= 0 && rhs->lower() >= 0) {
704 // Both operands are non-negative, so the result won't be less than either.
705 lower = Max(lhs->lower(), rhs->lower());
706 // The result will have leading zeros where both operands have leading zeros.
707 // CountLeadingZeroes32 of a non-negative int32 will at least be 1 to account
708 // for the bit of sign.
709 upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()),
710 CountLeadingZeroes32(rhs->upper())));
711 } else {
712 // The result will have leading ones where either operand has leading ones.
713 if (lhs->upper() < 0) {
714 unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
715 lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
716 upper = -1;
717 }
718 if (rhs->upper() < 0) {
719 unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
720 lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
721 upper = -1;
722 }
723 }
725 return Range::NewInt32Range(alloc, lower, upper);
726 }
728 Range *
729 Range::xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs)
730 {
731 JS_ASSERT(lhs->isInt32());
732 JS_ASSERT(rhs->isInt32());
733 int32_t lhsLower = lhs->lower();
734 int32_t lhsUpper = lhs->upper();
735 int32_t rhsLower = rhs->lower();
736 int32_t rhsUpper = rhs->upper();
737 bool invertAfter = false;
739 // If either operand is negative, bitwise-negate it, and arrange to negate
740 // the result; ~((~x)^y) == x^y. If both are negative the negations on the
741 // result cancel each other out; effectively this is (~x)^(~y) == x^y.
742 // These transformations reduce the number of cases we have to handle below.
743 if (lhsUpper < 0) {
744 lhsLower = ~lhsLower;
745 lhsUpper = ~lhsUpper;
746 Swap(lhsLower, lhsUpper);
747 invertAfter = !invertAfter;
748 }
749 if (rhsUpper < 0) {
750 rhsLower = ~rhsLower;
751 rhsUpper = ~rhsUpper;
752 Swap(rhsLower, rhsUpper);
753 invertAfter = !invertAfter;
754 }
756 // Handle cases where lhs or rhs is always zero specially, because they're
757 // easy cases where we can be perfectly precise, and because it protects the
758 // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
759 // undefined behavior.
760 int32_t lower = INT32_MIN;
761 int32_t upper = INT32_MAX;
762 if (lhsLower == 0 && lhsUpper == 0) {
763 upper = rhsUpper;
764 lower = rhsLower;
765 } else if (rhsLower == 0 && rhsUpper == 0) {
766 upper = lhsUpper;
767 lower = lhsLower;
768 } else if (lhsLower >= 0 && rhsLower >= 0) {
769 // Both operands are non-negative. The result will be non-negative.
770 lower = 0;
771 // To compute the upper value, take each operand's upper value and
772 // set all bits that don't correspond to leading zero bits in the
773 // other to one. For each one, this gives an upper bound for the
774 // result, so we can take the minimum between the two.
775 unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
776 unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
777 upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
778 lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
779 }
781 // If we bitwise-negated one (but not both) of the operands above, apply the
782 // bitwise-negate to the result, completing ~((~x)^y) == x^y.
783 if (invertAfter) {
784 lower = ~lower;
785 upper = ~upper;
786 Swap(lower, upper);
787 }
789 return Range::NewInt32Range(alloc, lower, upper);
790 }
792 Range *
793 Range::not_(TempAllocator &alloc, const Range *op)
794 {
795 JS_ASSERT(op->isInt32());
796 return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
797 }
799 Range *
800 Range::mul(TempAllocator &alloc, const Range *lhs, const Range *rhs)
801 {
802 bool fractional = lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart();
804 uint16_t exponent;
805 if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
806 // Two finite values.
807 exponent = lhs->numBits() + rhs->numBits() - 1;
808 if (exponent > Range::MaxFiniteExponent)
809 exponent = Range::IncludesInfinity;
810 } else if (!lhs->canBeNaN() &&
811 !rhs->canBeNaN() &&
812 !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
813 !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN()))
814 {
815 // Two values that multiplied together won't produce a NaN.
816 exponent = Range::IncludesInfinity;
817 } else {
818 // Could be anything.
819 exponent = Range::IncludesInfinityAndNaN;
820 }
822 if (MissingAnyInt32Bounds(lhs, rhs))
823 return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent);
824 int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
825 int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
826 int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
827 int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
828 return new(alloc) Range(
829 Min( Min(a, b), Min(c, d) ),
830 Max( Max(a, b), Max(c, d) ),
831 fractional, exponent);
832 }
834 Range *
835 Range::lsh(TempAllocator &alloc, const Range *lhs, int32_t c)
836 {
837 JS_ASSERT(lhs->isInt32());
838 int32_t shift = c & 0x1f;
840 // If the shift doesn't loose bits or shift bits into the sign bit, we
841 // can simply compute the correct range by shifting.
842 if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() &&
843 (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper())
844 {
845 return Range::NewInt32Range(alloc,
846 uint32_t(lhs->lower()) << shift,
847 uint32_t(lhs->upper()) << shift);
848 }
850 return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
851 }
853 Range *
854 Range::rsh(TempAllocator &alloc, const Range *lhs, int32_t c)
855 {
856 JS_ASSERT(lhs->isInt32());
857 int32_t shift = c & 0x1f;
858 return Range::NewInt32Range(alloc,
859 lhs->lower() >> shift,
860 lhs->upper() >> shift);
861 }
863 Range *
864 Range::ursh(TempAllocator &alloc, const Range *lhs, int32_t c)
865 {
866 // ursh's left operand is uint32, not int32, but for range analysis we
867 // currently approximate it as int32. We assume here that the range has
868 // already been adjusted accordingly by our callers.
869 JS_ASSERT(lhs->isInt32());
871 int32_t shift = c & 0x1f;
873 // If the value is always non-negative or always negative, we can simply
874 // compute the correct range by shifting.
875 if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
876 return Range::NewUInt32Range(alloc,
877 uint32_t(lhs->lower()) >> shift,
878 uint32_t(lhs->upper()) >> shift);
879 }
881 // Otherwise return the most general range after the shift.
882 return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
883 }
885 Range *
886 Range::lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
887 {
888 JS_ASSERT(lhs->isInt32());
889 JS_ASSERT(rhs->isInt32());
890 return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
891 }
893 Range *
894 Range::rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
895 {
896 JS_ASSERT(lhs->isInt32());
897 JS_ASSERT(rhs->isInt32());
898 return Range::NewInt32Range(alloc, Min(lhs->lower(), 0), Max(lhs->upper(), 0));
899 }
901 Range *
902 Range::ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs)
903 {
904 // ursh's left operand is uint32, not int32, but for range analysis we
905 // currently approximate it as int32. We assume here that the range has
906 // already been adjusted accordingly by our callers.
907 JS_ASSERT(lhs->isInt32());
908 JS_ASSERT(rhs->isInt32());
909 return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
910 }
912 Range *
913 Range::abs(TempAllocator &alloc, const Range *op)
914 {
915 int32_t l = op->lower_;
916 int32_t u = op->upper_;
918 return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
919 true,
920 Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
921 op->hasInt32Bounds() && l != INT32_MIN,
922 op->canHaveFractionalPart_,
923 op->max_exponent_);
924 }
926 Range *
927 Range::min(TempAllocator &alloc, const Range *lhs, const Range *rhs)
928 {
929 // If either operand is NaN, the result is NaN.
930 if (lhs->canBeNaN() || rhs->canBeNaN())
931 return nullptr;
933 return new(alloc) Range(Min(lhs->lower_, rhs->lower_),
934 lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
935 Min(lhs->upper_, rhs->upper_),
936 lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
937 lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
938 Max(lhs->max_exponent_, rhs->max_exponent_));
939 }
941 Range *
942 Range::max(TempAllocator &alloc, const Range *lhs, const Range *rhs)
943 {
944 // If either operand is NaN, the result is NaN.
945 if (lhs->canBeNaN() || rhs->canBeNaN())
946 return nullptr;
948 return new(alloc) Range(Max(lhs->lower_, rhs->lower_),
949 lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
950 Max(lhs->upper_, rhs->upper_),
951 lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
952 lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_,
953 Max(lhs->max_exponent_, rhs->max_exponent_));
954 }
956 bool
957 Range::negativeZeroMul(const Range *lhs, const Range *rhs)
958 {
959 // The result can only be negative zero if both sides are finite and they
960 // have differing signs.
961 return (lhs->canBeFiniteNegative() && rhs->canBeFiniteNonNegative()) ||
962 (rhs->canBeFiniteNegative() && lhs->canBeFiniteNonNegative());
963 }
965 bool
966 Range::update(const Range *other)
967 {
968 bool changed =
969 lower_ != other->lower_ ||
970 hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
971 upper_ != other->upper_ ||
972 hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
973 canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
974 max_exponent_ != other->max_exponent_;
975 if (changed) {
976 lower_ = other->lower_;
977 hasInt32LowerBound_ = other->hasInt32LowerBound_;
978 upper_ = other->upper_;
979 hasInt32UpperBound_ = other->hasInt32UpperBound_;
980 canHaveFractionalPart_ = other->canHaveFractionalPart_;
981 max_exponent_ = other->max_exponent_;
982 assertInvariants();
983 }
985 return changed;
986 }
988 ///////////////////////////////////////////////////////////////////////////////
989 // Range Computation for MIR Nodes
990 ///////////////////////////////////////////////////////////////////////////////
992 void
993 MPhi::computeRange(TempAllocator &alloc)
994 {
995 if (type() != MIRType_Int32 && type() != MIRType_Double)
996 return;
998 Range *range = nullptr;
999 for (size_t i = 0, e = numOperands(); i < e; i++) {
1000 if (getOperand(i)->block()->unreachable()) {
1001 IonSpew(IonSpew_Range, "Ignoring unreachable input %d", getOperand(i)->id());
1002 continue;
1003 }
1005 // Peek at the pre-bailout range so we can take a short-cut; if any of
1006 // the operands has an unknown range, this phi has an unknown range.
1007 if (!getOperand(i)->range())
1008 return;
1010 Range input(getOperand(i));
1012 if (range)
1013 range->unionWith(&input);
1014 else
1015 range = new(alloc) Range(input);
1016 }
1018 setRange(range);
1019 }
1021 void
1022 MBeta::computeRange(TempAllocator &alloc)
1023 {
1024 bool emptyRange = false;
1026 Range opRange(getOperand(0));
1027 Range *range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
1028 if (emptyRange) {
1029 IonSpew(IonSpew_Range, "Marking block for inst %d unreachable", id());
1030 block()->setUnreachable();
1031 } else {
1032 setRange(range);
1033 }
1034 }
1036 void
1037 MConstant::computeRange(TempAllocator &alloc)
1038 {
1039 if (value().isNumber()) {
1040 double d = value().toNumber();
1041 setRange(Range::NewDoubleRange(alloc, d, d));
1042 } else if (value().isBoolean()) {
1043 bool b = value().toBoolean();
1044 setRange(Range::NewInt32Range(alloc, b, b));
1045 }
1046 }
1048 void
1049 MCharCodeAt::computeRange(TempAllocator &alloc)
1050 {
1051 // ECMA 262 says that the integer will be non-negative and at most 65535.
1052 setRange(Range::NewInt32Range(alloc, 0, 65535));
1053 }
1055 void
1056 MClampToUint8::computeRange(TempAllocator &alloc)
1057 {
1058 setRange(Range::NewUInt32Range(alloc, 0, 255));
1059 }
1061 void
1062 MBitAnd::computeRange(TempAllocator &alloc)
1063 {
1064 Range left(getOperand(0));
1065 Range right(getOperand(1));
1066 left.wrapAroundToInt32();
1067 right.wrapAroundToInt32();
1069 setRange(Range::and_(alloc, &left, &right));
1070 }
1072 void
1073 MBitOr::computeRange(TempAllocator &alloc)
1074 {
1075 Range left(getOperand(0));
1076 Range right(getOperand(1));
1077 left.wrapAroundToInt32();
1078 right.wrapAroundToInt32();
1080 setRange(Range::or_(alloc, &left, &right));
1081 }
1083 void
1084 MBitXor::computeRange(TempAllocator &alloc)
1085 {
1086 Range left(getOperand(0));
1087 Range right(getOperand(1));
1088 left.wrapAroundToInt32();
1089 right.wrapAroundToInt32();
1091 setRange(Range::xor_(alloc, &left, &right));
1092 }
1094 void
1095 MBitNot::computeRange(TempAllocator &alloc)
1096 {
1097 Range op(getOperand(0));
1098 op.wrapAroundToInt32();
1100 setRange(Range::not_(alloc, &op));
1101 }
1103 void
1104 MLsh::computeRange(TempAllocator &alloc)
1105 {
1106 Range left(getOperand(0));
1107 Range right(getOperand(1));
1108 left.wrapAroundToInt32();
1110 MDefinition *rhs = getOperand(1);
1111 if (!rhs->isConstant()) {
1112 right.wrapAroundToShiftCount();
1113 setRange(Range::lsh(alloc, &left, &right));
1114 return;
1115 }
1117 int32_t c = rhs->toConstant()->value().toInt32();
1118 setRange(Range::lsh(alloc, &left, c));
1119 }
1121 void
1122 MRsh::computeRange(TempAllocator &alloc)
1123 {
1124 Range left(getOperand(0));
1125 Range right(getOperand(1));
1126 left.wrapAroundToInt32();
1128 MDefinition *rhs = getOperand(1);
1129 if (!rhs->isConstant()) {
1130 right.wrapAroundToShiftCount();
1131 setRange(Range::rsh(alloc, &left, &right));
1132 return;
1133 }
1135 int32_t c = rhs->toConstant()->value().toInt32();
1136 setRange(Range::rsh(alloc, &left, c));
1137 }
1139 void
1140 MUrsh::computeRange(TempAllocator &alloc)
1141 {
1142 Range left(getOperand(0));
1143 Range right(getOperand(1));
1145 // ursh can be thought of as converting its left operand to uint32, or it
1146 // can be thought of as converting its left operand to int32, and then
1147 // reinterpreting the int32 bits as a uint32 value. Both approaches yield
1148 // the same result. Since we lack support for full uint32 ranges, we use
1149 // the second interpretation, though it does cause us to be conservative.
1150 left.wrapAroundToInt32();
1151 right.wrapAroundToShiftCount();
1153 MDefinition *rhs = getOperand(1);
1154 if (!rhs->isConstant()) {
1155 setRange(Range::ursh(alloc, &left, &right));
1156 } else {
1157 int32_t c = rhs->toConstant()->value().toInt32();
1158 setRange(Range::ursh(alloc, &left, c));
1159 }
1161 JS_ASSERT(range()->lower() >= 0);
1162 }
1164 void
1165 MAbs::computeRange(TempAllocator &alloc)
1166 {
1167 if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
1168 return;
1170 Range other(getOperand(0));
1171 Range *next = Range::abs(alloc, &other);
1172 if (implicitTruncate_)
1173 next->wrapAroundToInt32();
1174 setRange(next);
1175 }
1177 void
1178 MMinMax::computeRange(TempAllocator &alloc)
1179 {
1180 if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double)
1181 return;
1183 Range left(getOperand(0));
1184 Range right(getOperand(1));
1185 setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right));
1186 }
1188 void
1189 MAdd::computeRange(TempAllocator &alloc)
1190 {
1191 if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
1192 return;
1193 Range left(getOperand(0));
1194 Range right(getOperand(1));
1195 Range *next = Range::add(alloc, &left, &right);
1196 if (isTruncated())
1197 next->wrapAroundToInt32();
1198 setRange(next);
1199 }
1201 void
1202 MSub::computeRange(TempAllocator &alloc)
1203 {
1204 if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
1205 return;
1206 Range left(getOperand(0));
1207 Range right(getOperand(1));
1208 Range *next = Range::sub(alloc, &left, &right);
1209 if (isTruncated())
1210 next->wrapAroundToInt32();
1211 setRange(next);
1212 }
1214 void
1215 MMul::computeRange(TempAllocator &alloc)
1216 {
1217 if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
1218 return;
1219 Range left(getOperand(0));
1220 Range right(getOperand(1));
1221 if (canBeNegativeZero())
1222 canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
1223 Range *next = Range::mul(alloc, &left, &right);
1224 // Truncated multiplications could overflow in both directions
1225 if (isTruncated())
1226 next->wrapAroundToInt32();
1227 setRange(next);
1228 }
1230 void
1231 MMod::computeRange(TempAllocator &alloc)
1232 {
1233 if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
1234 return;
1235 Range lhs(getOperand(0));
1236 Range rhs(getOperand(1));
1238 // If either operand is a NaN, the result is NaN. This also conservatively
1239 // handles Infinity cases.
1240 if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
1241 return;
1243 // If RHS can be zero, the result can be NaN.
1244 if (rhs.lower() <= 0 && rhs.upper() >= 0)
1245 return;
1247 // If both operands are non-negative integers, we can optimize this to an
1248 // unsigned mod.
1249 if (specialization() == MIRType_Int32 && lhs.lower() >= 0 && rhs.lower() > 0 &&
1250 !lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
1251 {
1252 unsigned_ = true;
1253 }
1255 // For unsigned mod, we have to convert both operands to unsigned.
1256 // Note that we handled the case of a zero rhs above.
1257 if (unsigned_) {
1258 // The result of an unsigned mod will never be unsigned-greater than
1259 // either operand.
1260 uint32_t lhsBound = Max<uint32_t>(lhs.lower(), lhs.upper());
1261 uint32_t rhsBound = Max<uint32_t>(rhs.lower(), rhs.upper());
1263 // If either range crosses through -1 as a signed value, it could be
1264 // the maximum unsigned value when interpreted as unsigned. If the range
1265 // doesn't include -1, then the simple max value we computed above is
1266 // correct.
1267 if (lhs.lower() <= -1 && lhs.upper() >= -1)
1268 lhsBound = UINT32_MAX;
1269 if (rhs.lower() <= -1 && rhs.upper() >= -1)
1270 rhsBound = UINT32_MAX;
1272 // The result will never be equal to the rhs, and we shouldn't have
1273 // any rounding to worry about.
1274 JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
1275 --rhsBound;
1277 // This gives us two upper bounds, so we can take the best one.
1278 setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
1279 return;
1280 }
1282 // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
1283 // First, the absolute value of the result will always be less than the
1284 // absolute value of rhs. (And if rhs is zero, the result is NaN).
1285 int64_t a = Abs<int64_t>(rhs.lower());
1286 int64_t b = Abs<int64_t>(rhs.upper());
1287 if (a == 0 && b == 0)
1288 return;
1289 int64_t rhsAbsBound = Max(a, b);
1291 // If the value is known to be integer, less-than abs(rhs) is equivalent
1292 // to less-than-or-equal abs(rhs)-1. This is important for being able to
1293 // say that the result of x%256 is an 8-bit unsigned number.
1294 if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
1295 --rhsAbsBound;
1297 // Next, the absolute value of the result will never be greater than the
1298 // absolute value of lhs.
1299 int64_t lhsAbsBound = Max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
1301 // This gives us two upper bounds, so we can take the best one.
1302 int64_t absBound = Min(lhsAbsBound, rhsAbsBound);
1304 // Now consider the sign of the result.
1305 // If lhs is non-negative, the result will be non-negative.
1306 // If lhs is non-positive, the result will be non-positive.
1307 int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
1308 int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
1310 setRange(new(alloc) Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(),
1311 Min(lhs.exponent(), rhs.exponent())));
1312 }
1314 void
1315 MDiv::computeRange(TempAllocator &alloc)
1316 {
1317 if (specialization() != MIRType_Int32 && specialization() != MIRType_Double)
1318 return;
1319 Range lhs(getOperand(0));
1320 Range rhs(getOperand(1));
1322 // If either operand is a NaN, the result is NaN. This also conservatively
1323 // handles Infinity cases.
1324 if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
1325 return;
1327 // Something simple for now: When dividing by a positive rhs, the result
1328 // won't be further from zero than lhs.
1329 if (lhs.lower() >= 0 && rhs.lower() >= 1) {
1330 setRange(new(alloc) Range(0, lhs.upper(), true, lhs.exponent()));
1331 } else if (unsigned_ && rhs.lower() >= 1) {
1332 // We shouldn't set the unsigned flag if the inputs can have
1333 // fractional parts.
1334 JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
1335 // Unsigned division by a non-zero rhs will return a uint32 value.
1336 setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
1337 }
1338 }
1340 void
1341 MSqrt::computeRange(TempAllocator &alloc)
1342 {
1343 Range input(getOperand(0));
1345 // If either operand is a NaN, the result is NaN. This also conservatively
1346 // handles Infinity cases.
1347 if (!input.hasInt32Bounds())
1348 return;
1350 // Sqrt of a negative non-zero value is NaN.
1351 if (input.lower() < 0)
1352 return;
1354 // Something simple for now: When taking the sqrt of a positive value, the
1355 // result won't be further from zero than the input.
1356 setRange(new(alloc) Range(0, input.upper(), true, input.exponent()));
1357 }
1359 void
1360 MToDouble::computeRange(TempAllocator &alloc)
1361 {
1362 setRange(new(alloc) Range(getOperand(0)));
1363 }
1365 void
1366 MToFloat32::computeRange(TempAllocator &alloc)
1367 {
1368 }
1370 void
1371 MTruncateToInt32::computeRange(TempAllocator &alloc)
1372 {
1373 Range *output = new(alloc) Range(getOperand(0));
1374 output->wrapAroundToInt32();
1375 setRange(output);
1376 }
1378 void
1379 MToInt32::computeRange(TempAllocator &alloc)
1380 {
1381 Range *output = new(alloc) Range(getOperand(0));
1382 output->clampToInt32();
1383 setRange(output);
1384 }
1386 static Range *GetTypedArrayRange(TempAllocator &alloc, int type)
1387 {
1388 switch (type) {
1389 case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
1390 case ScalarTypeDescr::TYPE_UINT8:
1391 return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
1392 case ScalarTypeDescr::TYPE_UINT16:
1393 return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
1394 case ScalarTypeDescr::TYPE_UINT32:
1395 return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
1397 case ScalarTypeDescr::TYPE_INT8:
1398 return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
1399 case ScalarTypeDescr::TYPE_INT16:
1400 return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
1401 case ScalarTypeDescr::TYPE_INT32:
1402 return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
1404 case ScalarTypeDescr::TYPE_FLOAT32:
1405 case ScalarTypeDescr::TYPE_FLOAT64:
1406 break;
1407 }
1409 return nullptr;
1410 }
1412 void
1413 MLoadTypedArrayElement::computeRange(TempAllocator &alloc)
1414 {
1415 // We have an Int32 type and if this is a UInt32 load it may produce a value
1416 // outside of our range, but we have a bailout to handle those cases.
1417 setRange(GetTypedArrayRange(alloc, arrayType()));
1418 }
1420 void
1421 MLoadTypedArrayElementStatic::computeRange(TempAllocator &alloc)
1422 {
1423 // We don't currently use MLoadTypedArrayElementStatic for uint32, so we
1424 // don't have to worry about it returning a value outside our type.
1425 JS_ASSERT(typedArray_->type() != ScalarTypeDescr::TYPE_UINT32);
1427 setRange(GetTypedArrayRange(alloc, typedArray_->type()));
1428 }
1430 void
1431 MArrayLength::computeRange(TempAllocator &alloc)
1432 {
1433 // Array lengths can go up to UINT32_MAX, but we only create MArrayLength
1434 // nodes when the value is known to be int32 (see the
1435 // OBJECT_FLAG_LENGTH_OVERFLOW flag).
1436 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1437 }
1439 void
1440 MInitializedLength::computeRange(TempAllocator &alloc)
1441 {
1442 setRange(Range::NewUInt32Range(alloc, 0, JSObject::NELEMENTS_LIMIT));
1443 }
1445 void
1446 MTypedArrayLength::computeRange(TempAllocator &alloc)
1447 {
1448 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1449 }
1451 void
1452 MStringLength::computeRange(TempAllocator &alloc)
1453 {
1454 static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
1455 "NewUInt32Range requires a uint32 value");
1456 setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
1457 }
1459 void
1460 MArgumentsLength::computeRange(TempAllocator &alloc)
1461 {
1462 // This is is a conservative upper bound on what |TooManyArguments| checks.
1463 // If exceeded, Ion will not be entered in the first place.
1464 static_assert(SNAPSHOT_MAX_NARGS <= UINT32_MAX,
1465 "NewUInt32Range requires a uint32 value");
1466 setRange(Range::NewUInt32Range(alloc, 0, SNAPSHOT_MAX_NARGS));
1467 }
1469 void
1470 MBoundsCheck::computeRange(TempAllocator &alloc)
1471 {
1472 // Just transfer the incoming index range to the output. The length() is
1473 // also interesting, but it is handled as a bailout check, and we're
1474 // computing a pre-bailout range here.
1475 setRange(new(alloc) Range(index()));
1476 }
1478 void
1479 MArrayPush::computeRange(TempAllocator &alloc)
1480 {
1481 // MArrayPush returns the new array length.
1482 setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
1483 }
1485 void
1486 MMathFunction::computeRange(TempAllocator &alloc)
1487 {
1488 Range opRange(getOperand(0));
1489 switch (function()) {
1490 case Sin:
1491 case Cos:
1492 if (!opRange.canBeInfiniteOrNaN())
1493 setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
1494 break;
1495 case Sign:
1496 if (!opRange.canBeNaN()) {
1497 // Note that Math.sign(-0) is -0, and we treat -0 as equal to 0.
1498 int32_t lower = -1;
1499 int32_t upper = 1;
1500 if (opRange.hasInt32LowerBound() && opRange.lower() >= 0)
1501 lower = 0;
1502 if (opRange.hasInt32UpperBound() && opRange.upper() <= 0)
1503 upper = 0;
1504 setRange(Range::NewInt32Range(alloc, lower, upper));
1505 }
1506 break;
1507 default:
1508 break;
1509 }
1510 }
1512 void
1513 MRandom::computeRange(TempAllocator &alloc)
1514 {
1515 setRange(Range::NewDoubleRange(alloc, 0.0, 1.0));
1516 }
1518 ///////////////////////////////////////////////////////////////////////////////
1519 // Range Analysis
1520 ///////////////////////////////////////////////////////////////////////////////
1522 bool
1523 RangeAnalysis::markBlocksInLoopBody(MBasicBlock *header, MBasicBlock *backedge)
1524 {
1525 Vector<MBasicBlock *, 16, IonAllocPolicy> worklist(alloc());
1527 // Mark the header as being in the loop. This terminates the walk.
1528 header->mark();
1530 backedge->mark();
1531 if (!worklist.append(backedge))
1532 return false;
1534 // If we haven't reached the loop header yet, walk up the predecessors
1535 // we haven't seen already.
1536 while (!worklist.empty()) {
1537 MBasicBlock *current = worklist.popCopy();
1538 for (size_t i = 0; i < current->numPredecessors(); i++) {
1539 MBasicBlock *pred = current->getPredecessor(i);
1541 if (pred->isMarked())
1542 continue;
1544 pred->mark();
1545 if (!worklist.append(pred))
1546 return false;
1547 }
1548 }
1550 return true;
1551 }
1553 bool
1554 RangeAnalysis::analyzeLoop(MBasicBlock *header)
1555 {
1556 JS_ASSERT(header->hasUniqueBackedge());
1558 // Try to compute an upper bound on the number of times the loop backedge
1559 // will be taken. Look for tests that dominate the backedge and which have
1560 // an edge leaving the loop body.
1561 MBasicBlock *backedge = header->backedge();
1563 // Ignore trivial infinite loops.
1564 if (backedge == header)
1565 return true;
1567 if (!markBlocksInLoopBody(header, backedge))
1568 return false;
1570 LoopIterationBound *iterationBound = nullptr;
1572 MBasicBlock *block = backedge;
1573 do {
1574 BranchDirection direction;
1575 MTest *branch = block->immediateDominatorBranch(&direction);
1577 if (block == block->immediateDominator())
1578 break;
1580 block = block->immediateDominator();
1582 if (branch) {
1583 direction = NegateBranchDirection(direction);
1584 MBasicBlock *otherBlock = branch->branchSuccessor(direction);
1585 if (!otherBlock->isMarked()) {
1586 iterationBound =
1587 analyzeLoopIterationCount(header, branch, direction);
1588 if (iterationBound)
1589 break;
1590 }
1591 }
1592 } while (block != header);
1594 if (!iterationBound) {
1595 graph_.unmarkBlocks();
1596 return true;
1597 }
1599 #ifdef DEBUG
1600 if (IonSpewEnabled(IonSpew_Range)) {
1601 Sprinter sp(GetIonContext()->cx);
1602 sp.init();
1603 iterationBound->sum.print(sp);
1604 IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s",
1605 sp.string());
1606 }
1607 #endif
1609 // Try to compute symbolic bounds for the phi nodes at the head of this
1610 // loop, expressed in terms of the iteration bound just computed.
1612 for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++)
1613 analyzeLoopPhi(header, iterationBound, *iter);
1615 if (!mir->compilingAsmJS()) {
1616 // Try to hoist any bounds checks from the loop using symbolic bounds.
1618 Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks(alloc());
1620 for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) {
1621 MBasicBlock *block = *iter;
1622 if (!block->isMarked())
1623 continue;
1625 for (MDefinitionIterator iter(block); iter; iter++) {
1626 MDefinition *def = *iter;
1627 if (def->isBoundsCheck() && def->isMovable()) {
1628 if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
1629 if (!hoistedChecks.append(def->toBoundsCheck()))
1630 return false;
1631 }
1632 }
1633 }
1634 }
1636 // Note: replace all uses of the original bounds check with the
1637 // actual index. This is usually done during bounds check elimination,
1638 // but in this case it's safe to do it here since the load/store is
1639 // definitely not loop-invariant, so we will never move it before
1640 // one of the bounds checks we just added.
1641 for (size_t i = 0; i < hoistedChecks.length(); i++) {
1642 MBoundsCheck *ins = hoistedChecks[i];
1643 ins->replaceAllUsesWith(ins->index());
1644 ins->block()->discard(ins);
1645 }
1646 }
1648 graph_.unmarkBlocks();
1649 return true;
1650 }
1652 LoopIterationBound *
1653 RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header,
1654 MTest *test, BranchDirection direction)
1655 {
1656 SimpleLinearSum lhs(nullptr, 0);
1657 MDefinition *rhs;
1658 bool lessEqual;
1659 if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
1660 return nullptr;
1662 // Ensure the rhs is a loop invariant term.
1663 if (rhs && rhs->block()->isMarked()) {
1664 if (lhs.term && lhs.term->block()->isMarked())
1665 return nullptr;
1666 MDefinition *temp = lhs.term;
1667 lhs.term = rhs;
1668 rhs = temp;
1669 if (!SafeSub(0, lhs.constant, &lhs.constant))
1670 return nullptr;
1671 lessEqual = !lessEqual;
1672 }
1674 JS_ASSERT_IF(rhs, !rhs->block()->isMarked());
1676 // Ensure the lhs is a phi node from the start of the loop body.
1677 if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
1678 return nullptr;
1680 // Check that the value of the lhs changes by a constant amount with each
1681 // loop iteration. This requires that the lhs be written in every loop
1682 // iteration with a value that is a constant difference from its value at
1683 // the start of the iteration.
1685 if (lhs.term->toPhi()->numOperands() != 2)
1686 return nullptr;
1688 // The first operand of the phi should be the lhs' value at the start of
1689 // the first executed iteration, and not a value written which could
1690 // replace the second operand below during the middle of execution.
1691 MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0);
1692 if (lhsInitial->block()->isMarked())
1693 return nullptr;
1695 // The second operand of the phi should be a value written by an add/sub
1696 // in every loop iteration, i.e. in a block which dominates the backedge.
1697 MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1);
1698 if (lhsWrite->isBeta())
1699 lhsWrite = lhsWrite->getOperand(0);
1700 if (!lhsWrite->isAdd() && !lhsWrite->isSub())
1701 return nullptr;
1702 if (!lhsWrite->block()->isMarked())
1703 return nullptr;
1704 MBasicBlock *bb = header->backedge();
1705 for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {}
1706 if (bb != lhsWrite->block())
1707 return nullptr;
1709 SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
1711 // Check that the value of the lhs at the backedge is of the form
1712 // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
1713 // of the iteration, and not that written to lhs in a previous iteration,
1714 // as such a previous value could not appear directly in the addition:
1715 // it could not be stored in lhs as the lhs add/sub executes in every
1716 // iteration, and if it were stored in another variable its use here would
1717 // be as an operand to a phi node for that variable.
1718 if (lhsModified.term != lhs.term)
1719 return nullptr;
1721 LinearSum bound(alloc());
1723 if (lhsModified.constant == 1 && !lessEqual) {
1724 // The value of lhs is 'initial(lhs) + iterCount' and this will end
1725 // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
1726 // on the number of backedges executed is:
1727 //
1728 // initial(lhs) + iterCount + lhsN == rhs
1729 // iterCount == rhsN - initial(lhs) - lhsN
1731 if (rhs) {
1732 if (!bound.add(rhs, 1))
1733 return nullptr;
1734 }
1735 if (!bound.add(lhsInitial, -1))
1736 return nullptr;
1738 int32_t lhsConstant;
1739 if (!SafeSub(0, lhs.constant, &lhsConstant))
1740 return nullptr;
1741 if (!bound.add(lhsConstant))
1742 return nullptr;
1743 } else if (lhsModified.constant == -1 && lessEqual) {
1744 // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
1745 // case, an upper bound on the number of backedges executed is:
1746 //
1747 // initial(lhs) - iterCount + lhsN == rhs
1748 // iterCount == initial(lhs) - rhs + lhsN
1750 if (!bound.add(lhsInitial, 1))
1751 return nullptr;
1752 if (rhs) {
1753 if (!bound.add(rhs, -1))
1754 return nullptr;
1755 }
1756 if (!bound.add(lhs.constant))
1757 return nullptr;
1758 } else {
1759 return nullptr;
1760 }
1762 return new(alloc()) LoopIterationBound(header, test, bound);
1763 }
1765 void
1766 RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi)
1767 {
1768 // Given a bound on the number of backedges taken, compute an upper and
1769 // lower bound for a phi node that may change by a constant amount each
1770 // iteration. Unlike for the case when computing the iteration bound
1771 // itself, the phi does not need to change the same amount every iteration,
1772 // but is required to change at most N and be either nondecreasing or
1773 // nonincreasing.
1775 JS_ASSERT(phi->numOperands() == 2);
1777 MBasicBlock *preLoop = header->loopPredecessor();
1778 JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header);
1780 MBasicBlock *backedge = header->backedge();
1781 JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header);
1783 MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor());
1784 if (initial->block()->isMarked())
1785 return;
1787 SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));
1789 if (modified.term != phi || modified.constant == 0)
1790 return;
1792 if (!phi->range())
1793 phi->setRange(new(alloc()) Range());
1795 LinearSum initialSum(alloc());
1796 if (!initialSum.add(initial, 1))
1797 return;
1799 // The phi may change by N each iteration, and is either nondecreasing or
1800 // nonincreasing. initial(phi) is either a lower or upper bound for the
1801 // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
1802 // at all points within the loop, provided that loopBound >= 0.
1803 //
1804 // We are more interested, however, in the bound for phi at points
1805 // dominated by the loop bound's test; if the test dominates e.g. a bounds
1806 // check we want to hoist from the loop, using the value of the phi at the
1807 // head of the loop for this will usually be too imprecise to hoist the
1808 // check. These points will execute only if the backedge executes at least
1809 // one more time (as the test passed and the test dominates the backedge),
1810 // so we know both that loopBound >= 1 and that the phi's value has changed
1811 // at most loopBound - 1 times. Thus, another upper or lower bound for the
1812 // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
1813 // ensure that loopBound >= 0.
1815 LinearSum limitSum(loopBound->sum);
1816 if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
1817 return;
1819 int32_t negativeConstant;
1820 if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
1821 return;
1823 Range *initRange = initial->range();
1824 if (modified.constant > 0) {
1825 if (initRange && initRange->hasInt32LowerBound())
1826 phi->range()->refineLower(initRange->lower());
1827 phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum));
1828 phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum));
1829 } else {
1830 if (initRange && initRange->hasInt32UpperBound())
1831 phi->range()->refineUpper(initRange->upper());
1832 phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum));
1833 phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum));
1834 }
1836 IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
1837 SpewRange(phi);
1838 }
1840 // Whether bound is valid at the specified bounds check instruction in a loop,
1841 // and may be used to hoist ins.
1842 static inline bool
1843 SymbolicBoundIsValid(MBasicBlock *header, MBoundsCheck *ins, const SymbolicBound *bound)
1844 {
1845 if (!bound->loop)
1846 return true;
1847 if (ins->block() == header)
1848 return false;
1849 MBasicBlock *bb = ins->block()->immediateDominator();
1850 while (bb != header && bb != bound->loop->test->block())
1851 bb = bb->immediateDominator();
1852 return bb == bound->loop->test->block();
1853 }
1855 // Convert all components of a linear sum *except* its constant to a definition,
1856 // adding any necessary instructions to the end of block.
1857 static inline MDefinition *
1858 ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum)
1859 {
1860 MDefinition *def = nullptr;
1862 for (size_t i = 0; i < sum.numTerms(); i++) {
1863 LinearTerm term = sum.term(i);
1864 JS_ASSERT(!term.term->isConstant());
1865 if (term.scale == 1) {
1866 if (def) {
1867 def = MAdd::New(alloc, def, term.term);
1868 def->toAdd()->setInt32();
1869 block->insertBefore(block->lastIns(), def->toInstruction());
1870 def->computeRange(alloc);
1871 } else {
1872 def = term.term;
1873 }
1874 } else if (term.scale == -1) {
1875 if (!def) {
1876 def = MConstant::New(alloc, Int32Value(0));
1877 block->insertBefore(block->lastIns(), def->toInstruction());
1878 def->computeRange(alloc);
1879 }
1880 def = MSub::New(alloc, def, term.term);
1881 def->toSub()->setInt32();
1882 block->insertBefore(block->lastIns(), def->toInstruction());
1883 def->computeRange(alloc);
1884 } else {
1885 JS_ASSERT(term.scale != 0);
1886 MConstant *factor = MConstant::New(alloc, Int32Value(term.scale));
1887 block->insertBefore(block->lastIns(), factor);
1888 MMul *mul = MMul::New(alloc, term.term, factor);
1889 mul->setInt32();
1890 block->insertBefore(block->lastIns(), mul);
1891 mul->computeRange(alloc);
1892 if (def) {
1893 def = MAdd::New(alloc, def, mul);
1894 def->toAdd()->setInt32();
1895 block->insertBefore(block->lastIns(), def->toInstruction());
1896 def->computeRange(alloc);
1897 } else {
1898 def = mul;
1899 }
1900 }
1901 }
1903 if (!def) {
1904 def = MConstant::New(alloc, Int32Value(0));
1905 block->insertBefore(block->lastIns(), def->toInstruction());
1906 def->computeRange(alloc);
1907 }
1909 return def;
1910 }
1912 bool
1913 RangeAnalysis::tryHoistBoundsCheck(MBasicBlock *header, MBoundsCheck *ins)
1914 {
1915 // The bounds check's length must be loop invariant.
1916 if (ins->length()->block()->isMarked())
1917 return false;
1919 // The bounds check's index should not be loop invariant (else we would
1920 // already have hoisted it during LICM).
1921 SimpleLinearSum index = ExtractLinearSum(ins->index());
1922 if (!index.term || !index.term->block()->isMarked())
1923 return false;
1925 // Check for a symbolic lower and upper bound on the index. If either
1926 // condition depends on an iteration bound for the loop, only hoist if
1927 // the bounds check is dominated by the iteration bound's test.
1928 if (!index.term->range())
1929 return false;
1930 const SymbolicBound *lower = index.term->range()->symbolicLower();
1931 if (!lower || !SymbolicBoundIsValid(header, ins, lower))
1932 return false;
1933 const SymbolicBound *upper = index.term->range()->symbolicUpper();
1934 if (!upper || !SymbolicBoundIsValid(header, ins, upper))
1935 return false;
1937 MBasicBlock *preLoop = header->loopPredecessor();
1938 JS_ASSERT(!preLoop->isMarked());
1940 MDefinition *lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum);
1941 if (!lowerTerm)
1942 return false;
1944 MDefinition *upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum);
1945 if (!upperTerm)
1946 return false;
1948 // We are checking that index + indexConstant >= 0, and know that
1949 // index >= lowerTerm + lowerConstant. Thus, check that:
1950 //
1951 // lowerTerm + lowerConstant + indexConstant >= 0
1952 // lowerTerm >= -lowerConstant - indexConstant
1954 int32_t lowerConstant = 0;
1955 if (!SafeSub(lowerConstant, index.constant, &lowerConstant))
1956 return false;
1957 if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant))
1958 return false;
1960 // We are checking that index < boundsLength, and know that
1961 // index <= upperTerm + upperConstant. Thus, check that:
1962 //
1963 // upperTerm + upperConstant < boundsLength
1965 int32_t upperConstant = index.constant;
1966 if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant))
1967 return false;
1969 MBoundsCheckLower *lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
1970 lowerCheck->setMinimum(lowerConstant);
1972 MBoundsCheck *upperCheck = MBoundsCheck::New(alloc(), upperTerm, ins->length());
1973 upperCheck->setMinimum(upperConstant);
1974 upperCheck->setMaximum(upperConstant);
1976 // Hoist the loop invariant upper and lower bounds checks.
1977 preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
1978 preLoop->insertBefore(preLoop->lastIns(), upperCheck);
1980 return true;
1981 }
1983 bool
1984 RangeAnalysis::analyze()
1985 {
1986 IonSpew(IonSpew_Range, "Doing range propagation");
1988 for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
1989 MBasicBlock *block = *iter;
1991 if (block->unreachable())
1992 continue;
1994 for (MDefinitionIterator iter(block); iter; iter++) {
1995 MDefinition *def = *iter;
1997 def->computeRange(alloc());
1998 IonSpew(IonSpew_Range, "computing range on %d", def->id());
1999 SpewRange(def);
2000 }
2002 if (block->isLoopHeader()) {
2003 if (!analyzeLoop(block))
2004 return false;
2005 }
2007 // First pass at collecting range info - while the beta nodes are still
2008 // around and before truncation.
2009 for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
2010 iter->collectRangeInfoPreTrunc();
2012 // Would have been nice to implement this using collectRangeInfoPreTrunc()
2013 // methods but it needs the minAsmJSHeapLength().
2014 if (mir->compilingAsmJS()) {
2015 uint32_t minHeapLength = mir->minAsmJSHeapLength();
2016 if (iter->isAsmJSLoadHeap()) {
2017 MAsmJSLoadHeap *ins = iter->toAsmJSLoadHeap();
2018 Range *range = ins->ptr()->range();
2019 if (range && range->hasInt32LowerBound() && range->lower() >= 0 &&
2020 range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) {
2021 ins->setSkipBoundsCheck(true);
2022 }
2023 } else if (iter->isAsmJSStoreHeap()) {
2024 MAsmJSStoreHeap *ins = iter->toAsmJSStoreHeap();
2025 Range *range = ins->ptr()->range();
2026 if (range && range->hasInt32LowerBound() && range->lower() >= 0 &&
2027 range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) {
2028 ins->setSkipBoundsCheck(true);
2029 }
2030 }
2031 }
2032 }
2033 }
2035 return true;
2036 }
2038 bool
2039 RangeAnalysis::addRangeAssertions()
2040 {
2041 if (!js_JitOptions.checkRangeAnalysis)
2042 return true;
2044 // Check the computed range for this instruction, if the option is set. Note
2045 // that this code is quite invasive; it adds numerous additional
2046 // instructions for each MInstruction with a computed range, and it uses
2047 // registers, so it also affects register allocation.
2048 for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
2049 MBasicBlock *block = *iter;
2051 for (MDefinitionIterator iter(block); iter; iter++) {
2052 MDefinition *ins = *iter;
2054 // Perform range checking for all numeric and numeric-like types.
2055 if (!IsNumberType(ins->type()) &&
2056 ins->type() != MIRType_Boolean &&
2057 ins->type() != MIRType_Value)
2058 {
2059 continue;
2060 }
2062 Range r(ins);
2064 // Don't insert assertions if there's nothing interesting to assert.
2065 if (r.isUnknown() || (ins->type() == MIRType_Int32 && r.isUnknownInt32()))
2066 continue;
2068 MAssertRange *guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r));
2070 // Beta nodes and interrupt checks are required to be located at the
2071 // beginnings of basic blocks, so we must insert range assertions
2072 // after any such instructions.
2073 MInstructionIterator insertIter = ins->isPhi()
2074 ? block->begin()
2075 : block->begin(ins->toInstruction());
2076 while (insertIter->isBeta() ||
2077 insertIter->isInterruptCheck() ||
2078 insertIter->isInterruptCheckPar() ||
2079 insertIter->isConstant())
2080 {
2081 insertIter++;
2082 }
2084 if (*insertIter == *iter)
2085 block->insertAfter(*insertIter, guard);
2086 else
2087 block->insertBefore(*insertIter, guard);
2088 }
2089 }
2091 return true;
2092 }
2094 ///////////////////////////////////////////////////////////////////////////////
2095 // Range based Truncation
2096 ///////////////////////////////////////////////////////////////////////////////
2098 void
2099 Range::clampToInt32()
2100 {
2101 if (isInt32())
2102 return;
2103 int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
2104 int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
2105 setInt32(l, h);
2106 }
2108 void
2109 Range::wrapAroundToInt32()
2110 {
2111 if (!hasInt32Bounds()) {
2112 setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
2113 } else if (canHaveFractionalPart()) {
2114 canHaveFractionalPart_ = false;
2116 // Clearing the fractional field may provide an opportunity to refine
2117 // lower_ or upper_.
2118 refineInt32BoundsByExponent(max_exponent_, &lower_, &upper_);
2120 assertInvariants();
2121 }
2122 }
2124 void
2125 Range::wrapAroundToShiftCount()
2126 {
2127 wrapAroundToInt32();
2128 if (lower() < 0 || upper() >= 32)
2129 setInt32(0, 31);
2130 }
2132 void
2133 Range::wrapAroundToBoolean()
2134 {
2135 wrapAroundToInt32();
2136 if (!isBoolean())
2137 setInt32(0, 1);
2138 }
2140 bool
2141 MDefinition::truncate()
2142 {
2143 // No procedure defined for truncating this instruction.
2144 return false;
2145 }
2147 bool
2148 MConstant::truncate()
2149 {
2150 if (!value_.isDouble())
2151 return false;
2153 // Truncate the double to int, since all uses truncates it.
2154 int32_t res = ToInt32(value_.toDouble());
2155 value_.setInt32(res);
2156 setResultType(MIRType_Int32);
2157 if (range())
2158 range()->setInt32(res, res);
2159 return true;
2160 }
2162 bool
2163 MAdd::truncate()
2164 {
2165 // Remember analysis, needed for fallible checks.
2166 setTruncated(true);
2168 if (type() == MIRType_Double || type() == MIRType_Int32) {
2169 specialization_ = MIRType_Int32;
2170 setResultType(MIRType_Int32);
2171 if (range())
2172 range()->wrapAroundToInt32();
2173 return true;
2174 }
2176 return false;
2177 }
2179 bool
2180 MSub::truncate()
2181 {
2182 // Remember analysis, needed for fallible checks.
2183 setTruncated(true);
2185 if (type() == MIRType_Double || type() == MIRType_Int32) {
2186 specialization_ = MIRType_Int32;
2187 setResultType(MIRType_Int32);
2188 if (range())
2189 range()->wrapAroundToInt32();
2190 return true;
2191 }
2193 return false;
2194 }
2196 bool
2197 MMul::truncate()
2198 {
2199 // Remember analysis, needed to remove negative zero checks.
2200 setTruncated(true);
2202 if (type() == MIRType_Double || type() == MIRType_Int32) {
2203 specialization_ = MIRType_Int32;
2204 setResultType(MIRType_Int32);
2205 setCanBeNegativeZero(false);
2206 if (range())
2207 range()->wrapAroundToInt32();
2208 return true;
2209 }
2211 return false;
2212 }
2214 bool
2215 MDiv::truncate()
2216 {
2217 // Remember analysis, needed to remove negative zero checks.
2218 setTruncatedIndirectly(true);
2220 // Check if this division only flows in bitwise instructions.
2221 if (!isTruncated()) {
2222 bool allUsesExplictlyTruncate = true;
2223 for (MUseDefIterator use(this); allUsesExplictlyTruncate && use; use++) {
2224 switch (use.def()->op()) {
2225 case MDefinition::Op_BitAnd:
2226 case MDefinition::Op_BitOr:
2227 case MDefinition::Op_BitXor:
2228 case MDefinition::Op_Lsh:
2229 case MDefinition::Op_Rsh:
2230 case MDefinition::Op_Ursh:
2231 break;
2232 default:
2233 allUsesExplictlyTruncate = false;
2234 }
2235 }
2237 if (allUsesExplictlyTruncate)
2238 setTruncated(true);
2239 }
2241 // Divisions where the lhs and rhs are unsigned and the result is
2242 // truncated can be lowered more efficiently.
2243 if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) {
2244 unsigned_ = true;
2245 return true;
2246 }
2248 // No modifications.
2249 return false;
2250 }
2252 bool
2253 MMod::truncate()
2254 {
2255 // Remember analysis, needed to remove negative zero checks.
2256 setTruncated(true);
2258 // As for division, handle unsigned modulus with a truncated result.
2259 if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) {
2260 unsigned_ = true;
2261 return true;
2262 }
2264 // No modifications.
2265 return false;
2266 }
2268 bool
2269 MToDouble::truncate()
2270 {
2271 JS_ASSERT(type() == MIRType_Double);
2273 // We use the return type to flag that this MToDouble should be replaced by
2274 // a MTruncateToInt32 when modifying the graph.
2275 setResultType(MIRType_Int32);
2276 if (range())
2277 range()->wrapAroundToInt32();
2279 return true;
2280 }
2282 bool
2283 MLoadTypedArrayElementStatic::truncate()
2284 {
2285 setInfallible();
2286 return false;
2287 }
2289 bool
2290 MDefinition::isOperandTruncated(size_t index) const
2291 {
2292 return false;
2293 }
2295 bool
2296 MTruncateToInt32::isOperandTruncated(size_t index) const
2297 {
2298 return true;
2299 }
2301 bool
2302 MBinaryBitwiseInstruction::isOperandTruncated(size_t index) const
2303 {
2304 return true;
2305 }
2307 bool
2308 MAdd::isOperandTruncated(size_t index) const
2309 {
2310 return isTruncated();
2311 }
2313 bool
2314 MSub::isOperandTruncated(size_t index) const
2315 {
2316 return isTruncated();
2317 }
2319 bool
2320 MMul::isOperandTruncated(size_t index) const
2321 {
2322 return isTruncated();
2323 }
2325 bool
2326 MToDouble::isOperandTruncated(size_t index) const
2327 {
2328 // The return type is used to flag that we are replacing this Double by a
2329 // Truncate of its operand if needed.
2330 return type() == MIRType_Int32;
2331 }
2333 bool
2334 MStoreTypedArrayElement::isOperandTruncated(size_t index) const
2335 {
2336 return index == 2 && !isFloatArray();
2337 }
2339 bool
2340 MStoreTypedArrayElementHole::isOperandTruncated(size_t index) const
2341 {
2342 return index == 3 && !isFloatArray();
2343 }
2345 bool
2346 MStoreTypedArrayElementStatic::isOperandTruncated(size_t index) const
2347 {
2348 return index == 1 && !isFloatArray();
2349 }
2351 bool
2352 MCompare::truncate()
2353 {
2354 if (!isDoubleComparison())
2355 return false;
2357 // If both operands are naturally in the int32 range, we can convert from
2358 // a double comparison to being an int32 comparison.
2359 if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32())
2360 return false;
2362 compareType_ = Compare_Int32;
2364 // Truncating the operands won't change their value, but it will change
2365 // their type, which we need because we now expect integer inputs.
2366 truncateOperands_ = true;
2368 return true;
2369 }
2371 bool
2372 MCompare::isOperandTruncated(size_t index) const
2373 {
2374 // If we're doing an int32 comparison on operands which were previously
2375 // floating-point, convert them!
2376 JS_ASSERT_IF(truncateOperands_, isInt32Comparison());
2377 return truncateOperands_;
2378 }
2380 // Ensure that all observables uses can work with a truncated
2381 // version of the |candidate|'s result.
2382 static bool
2383 AllUsesTruncate(MInstruction *candidate)
2384 {
2385 // If the value naturally produces an int32 value (before bailout checks)
2386 // that needs no conversion, we don't have to worry about resume points
2387 // seeing truncated values.
2388 bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
2390 for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd(); use++) {
2391 if (!use->consumer()->isDefinition()) {
2392 // We can only skip testing resume points, if all original uses are
2393 // still present, or if the value does not need conversion.
2394 // Otherwise a branch removed by UCE might rely on the non-truncated
2395 // value, and any bailout with a truncated value might lead an
2396 // incorrect value.
2397 if (candidate->isUseRemoved() && needsConversion)
2398 return false;
2399 continue;
2400 }
2402 if (!use->consumer()->toDefinition()->isOperandTruncated(use->index()))
2403 return false;
2404 }
2406 return true;
2407 }
2409 static bool
2410 CanTruncate(MInstruction *candidate)
2411 {
2412 // Compare operations might coerce its inputs to int32 if the ranges are
2413 // correct. So we do not need to check if all uses are coerced.
2414 if (candidate->isCompare())
2415 return true;
2417 // Set truncated flag if range analysis ensure that it has no
2418 // rounding errors and no fractional part. Note that we can't use
2419 // the MDefinition Range constructor, because we need to know if
2420 // the value will have rounding errors before any bailout checks.
2421 const Range *r = candidate->range();
2422 bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
2424 // Special case integer division: the result of a/b can be infinite
2425 // but cannot actually have rounding errors induced by truncation.
2426 if (candidate->isDiv() && candidate->toDiv()->specialization() == MIRType_Int32)
2427 canHaveRoundingErrors = false;
2429 if (canHaveRoundingErrors)
2430 return false;
2432 // Ensure all observable uses are truncated.
2433 return AllUsesTruncate(candidate);
2434 }
2436 static void
2437 RemoveTruncatesOnOutput(MInstruction *truncated)
2438 {
2439 // Compare returns a boolean so it doen't have any output truncates.
2440 if (truncated->isCompare())
2441 return;
2443 JS_ASSERT(truncated->type() == MIRType_Int32);
2444 JS_ASSERT(Range(truncated).isInt32());
2446 for (MUseDefIterator use(truncated); use; use++) {
2447 MDefinition *def = use.def();
2448 if (!def->isTruncateToInt32() || !def->isToInt32())
2449 continue;
2451 def->replaceAllUsesWith(truncated);
2452 }
2453 }
2455 static void
2456 AdjustTruncatedInputs(TempAllocator &alloc, MInstruction *truncated)
2457 {
2458 MBasicBlock *block = truncated->block();
2459 for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
2460 if (!truncated->isOperandTruncated(i))
2461 continue;
2463 MDefinition *input = truncated->getOperand(i);
2464 if (input->type() == MIRType_Int32)
2465 continue;
2467 if (input->isToDouble() && input->getOperand(0)->type() == MIRType_Int32) {
2468 JS_ASSERT(input->range()->isInt32());
2469 truncated->replaceOperand(i, input->getOperand(0));
2470 } else {
2471 MTruncateToInt32 *op = MTruncateToInt32::New(alloc, truncated->getOperand(i));
2472 block->insertBefore(truncated, op);
2473 truncated->replaceOperand(i, op);
2474 }
2475 }
2477 if (truncated->isToDouble()) {
2478 truncated->replaceAllUsesWith(truncated->getOperand(0));
2479 block->discard(truncated);
2480 }
2481 }
2483 // Iterate backward on all instruction and attempt to truncate operations for
2484 // each instruction which respect the following list of predicates: Has been
2485 // analyzed by range analysis, the range has no rounding errors, all uses cases
2486 // are truncating the result.
2487 //
2488 // If the truncation of the operation is successful, then the instruction is
2489 // queue for later updating the graph to restore the type correctness by
2490 // converting the operands that need to be truncated.
2491 //
2492 // We iterate backward because it is likely that a truncated operation truncates
2493 // some of its operands.
2494 bool
2495 RangeAnalysis::truncate()
2496 {
2497 IonSpew(IonSpew_Range, "Do range-base truncation (backward loop)");
2499 Vector<MInstruction *, 16, SystemAllocPolicy> worklist;
2500 Vector<MBinaryBitwiseInstruction *, 16, SystemAllocPolicy> bitops;
2502 for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) {
2503 for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) {
2504 if (iter->type() == MIRType_None)
2505 continue;
2507 // Remember all bitop instructions for folding after range analysis.
2508 switch (iter->op()) {
2509 case MDefinition::Op_BitAnd:
2510 case MDefinition::Op_BitOr:
2511 case MDefinition::Op_BitXor:
2512 case MDefinition::Op_Lsh:
2513 case MDefinition::Op_Rsh:
2514 case MDefinition::Op_Ursh:
2515 if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter)))
2516 return false;
2517 default:;
2518 }
2520 if (!CanTruncate(*iter))
2521 continue;
2523 // Truncate this instruction if possible.
2524 if (!iter->truncate())
2525 continue;
2527 // Delay updates of inputs/outputs to avoid creating node which
2528 // would be removed by the truncation of the next operations.
2529 iter->setInWorklist();
2530 if (!worklist.append(*iter))
2531 return false;
2532 }
2533 }
2535 // Update inputs/outputs of truncated instructions.
2536 IonSpew(IonSpew_Range, "Do graph type fixup (dequeue)");
2537 while (!worklist.empty()) {
2538 MInstruction *ins = worklist.popCopy();
2539 ins->setNotInWorklist();
2540 RemoveTruncatesOnOutput(ins);
2541 AdjustTruncatedInputs(alloc(), ins);
2542 }
2544 // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
2545 // input. This is done after range analysis rather than during GVN as the
2546 // presence of the bitop can change which instructions are truncated.
2547 for (size_t i = 0; i < bitops.length(); i++) {
2548 MBinaryBitwiseInstruction *ins = bitops[i];
2549 MDefinition *folded = ins->foldUnnecessaryBitop();
2550 if (folded != ins)
2551 ins->replaceAllUsesWith(folded);
2552 }
2554 return true;
2555 }
2557 ///////////////////////////////////////////////////////////////////////////////
2558 // Collect Range information of operands
2559 ///////////////////////////////////////////////////////////////////////////////
2561 void
2562 MInArray::collectRangeInfoPreTrunc()
2563 {
2564 Range indexRange(index());
2565 if (indexRange.isFiniteNonNegative())
2566 needsNegativeIntCheck_ = false;
2567 }
2569 void
2570 MLoadElementHole::collectRangeInfoPreTrunc()
2571 {
2572 Range indexRange(index());
2573 if (indexRange.isFiniteNonNegative())
2574 needsNegativeIntCheck_ = false;
2575 }
2577 void
2578 MDiv::collectRangeInfoPreTrunc()
2579 {
2580 Range lhsRange(lhs());
2581 if (lhsRange.isFiniteNonNegative())
2582 canBeNegativeDividend_ = false;
2583 }
2585 void
2586 MMod::collectRangeInfoPreTrunc()
2587 {
2588 Range lhsRange(lhs());
2589 if (lhsRange.isFiniteNonNegative())
2590 canBeNegativeDividend_ = false;
2591 }
2593 void
2594 MBoundsCheckLower::collectRangeInfoPreTrunc()
2595 {
2596 Range indexRange(index());
2597 if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_)
2598 fallible_ = false;
2599 }
2601 void
2602 MCompare::collectRangeInfoPreTrunc()
2603 {
2604 if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN())
2605 operandsAreNeverNaN_ = true;
2606 }
2608 void
2609 MNot::collectRangeInfoPreTrunc()
2610 {
2611 if (!Range(operand()).canBeNaN())
2612 operandIsNeverNaN_ = true;
2613 }
2615 void
2616 MPowHalf::collectRangeInfoPreTrunc()
2617 {
2618 Range inputRange(input());
2619 if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound())
2620 operandIsNeverNegativeInfinity_ = true;
2621 if (!inputRange.canBeZero())
2622 operandIsNeverNegativeZero_ = true;
2623 if (!inputRange.canBeNaN())
2624 operandIsNeverNaN_ = true;
2625 }
2627 void
2628 MUrsh::collectRangeInfoPreTrunc()
2629 {
2630 Range lhsRange(lhs()), rhsRange(rhs());
2632 // As in MUrsh::computeRange(), convert the inputs.
2633 lhsRange.wrapAroundToInt32();
2634 rhsRange.wrapAroundToShiftCount();
2636 // If the most significant bit of our result is always going to be zero,
2637 // we can optimize by disabling bailout checks for enforcing an int32 range.
2638 if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1)
2639 bailoutsDisabled_ = true;
2640 }
2642 bool
2643 RangeAnalysis::prepareForUCE(bool *shouldRemoveDeadCode)
2644 {
2645 *shouldRemoveDeadCode = false;
2647 for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
2648 MBasicBlock *block = *iter;
2650 if (!block->unreachable())
2651 continue;
2653 MControlInstruction *cond = block->getPredecessor(0)->lastIns();
2654 if (!cond->isTest())
2655 continue;
2657 // Replace the condition of the test control instruction by a constant
2658 // chosen based which of the successors has the unreachable flag which is
2659 // added by MBeta::computeRange on its own block.
2660 MTest *test = cond->toTest();
2661 MConstant *constant = nullptr;
2662 if (block == test->ifTrue()) {
2663 constant = MConstant::New(alloc(), BooleanValue(false));
2664 } else {
2665 JS_ASSERT(block == test->ifFalse());
2666 constant = MConstant::New(alloc(), BooleanValue(true));
2667 }
2668 test->block()->insertBefore(test, constant);
2669 test->replaceOperand(0, constant);
2670 IonSpew(IonSpew_Range, "Update condition of %d to reflect unreachable branches.",
2671 test->id());
2673 *shouldRemoveDeadCode = true;
2674 }
2676 return true;
2677 }