Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/RangeAnalysis.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/MathAlgorithms.h" |
michael@0 | 10 | |
michael@0 | 11 | #include "jsanalyze.h" |
michael@0 | 12 | |
michael@0 | 13 | #include "jit/Ion.h" |
michael@0 | 14 | #include "jit/IonAnalysis.h" |
michael@0 | 15 | #include "jit/IonSpewer.h" |
michael@0 | 16 | #include "jit/MIR.h" |
michael@0 | 17 | #include "jit/MIRGenerator.h" |
michael@0 | 18 | #include "jit/MIRGraph.h" |
michael@0 | 19 | #include "vm/NumericConversions.h" |
michael@0 | 20 | |
michael@0 | 21 | #include "jsopcodeinlines.h" |
michael@0 | 22 | |
michael@0 | 23 | using namespace js; |
michael@0 | 24 | using namespace js::jit; |
michael@0 | 25 | |
michael@0 | 26 | using mozilla::Abs; |
michael@0 | 27 | using mozilla::CountLeadingZeroes32; |
michael@0 | 28 | using mozilla::NumberEqualsInt32; |
michael@0 | 29 | using mozilla::ExponentComponent; |
michael@0 | 30 | using mozilla::FloorLog2; |
michael@0 | 31 | using mozilla::IsInfinite; |
michael@0 | 32 | using mozilla::IsNaN; |
michael@0 | 33 | using mozilla::IsNegative; |
michael@0 | 34 | using mozilla::NegativeInfinity; |
michael@0 | 35 | using mozilla::PositiveInfinity; |
michael@0 | 36 | using mozilla::Swap; |
michael@0 | 37 | using JS::GenericNaN; |
michael@0 | 38 | |
michael@0 | 39 | // This algorithm is based on the paper "Eliminating Range Checks Using |
michael@0 | 40 | // Static Single Assignment Form" by Gough and Klaren. |
michael@0 | 41 | // |
michael@0 | 42 | // We associate a range object with each SSA name, and the ranges are consulted |
michael@0 | 43 | // in order to determine whether overflow is possible for arithmetic |
michael@0 | 44 | // computations. |
michael@0 | 45 | // |
michael@0 | 46 | // An important source of range information that requires care to take |
michael@0 | 47 | // advantage of is conditional control flow. Consider the code below: |
michael@0 | 48 | // |
michael@0 | 49 | // if (x < 0) { |
michael@0 | 50 | // y = x + 2000000000; |
michael@0 | 51 | // } else { |
michael@0 | 52 | // if (x < 1000000000) { |
michael@0 | 53 | // y = x * 2; |
michael@0 | 54 | // } else { |
michael@0 | 55 | // y = x - 3000000000; |
michael@0 | 56 | // } |
michael@0 | 57 | // } |
michael@0 | 58 | // |
michael@0 | 59 | // The arithmetic operations in this code cannot overflow, but it is not |
michael@0 | 60 | // sufficient to simply associate each name with a range, since the information |
michael@0 | 61 | // differs between basic blocks. The traditional dataflow approach would be |
michael@0 | 62 | // associate ranges with (name, basic block) pairs. This solution is not |
michael@0 | 63 | // satisfying, since we lose the benefit of SSA form: in SSA form, each |
michael@0 | 64 | // definition has a unique name, so there is no need to track information about |
michael@0 | 65 | // the control flow of the program. |
michael@0 | 66 | // |
michael@0 | 67 | // The approach used here is to add a new form of pseudo operation called a |
michael@0 | 68 | // beta node, which associates range information with a value. These beta |
michael@0 | 69 | // instructions take one argument and additionally have an auxiliary constant |
michael@0 | 70 | // range associated with them. Operationally, beta nodes are just copies, but |
michael@0 | 71 | // the invariant expressed by beta node copies is that the output will fall |
michael@0 | 72 | // inside the range given by the beta node. Gough and Klaeren refer to SSA |
michael@0 | 73 | // extended with these beta nodes as XSA form. The following shows the example |
michael@0 | 74 | // code transformed into XSA form: |
michael@0 | 75 | // |
michael@0 | 76 | // if (x < 0) { |
michael@0 | 77 | // x1 = Beta(x, [INT_MIN, -1]); |
michael@0 | 78 | // y1 = x1 + 2000000000; |
michael@0 | 79 | // } else { |
michael@0 | 80 | // x2 = Beta(x, [0, INT_MAX]); |
michael@0 | 81 | // if (x2 < 1000000000) { |
michael@0 | 82 | // x3 = Beta(x2, [INT_MIN, 999999999]); |
michael@0 | 83 | // y2 = x3*2; |
michael@0 | 84 | // } else { |
michael@0 | 85 | // x4 = Beta(x2, [1000000000, INT_MAX]); |
michael@0 | 86 | // y3 = x4 - 3000000000; |
michael@0 | 87 | // } |
michael@0 | 88 | // y4 = Phi(y2, y3); |
michael@0 | 89 | // } |
michael@0 | 90 | // y = Phi(y1, y4); |
michael@0 | 91 | // |
michael@0 | 92 | // We insert beta nodes for the purposes of range analysis (they might also be |
michael@0 | 93 | // usefully used for other forms of bounds check elimination) and remove them |
michael@0 | 94 | // after range analysis is performed. The remaining compiler phases do not ever |
michael@0 | 95 | // encounter beta nodes. |
michael@0 | 96 | |
michael@0 | 97 | static bool |
michael@0 | 98 | IsDominatedUse(MBasicBlock *block, MUse *use) |
michael@0 | 99 | { |
michael@0 | 100 | MNode *n = use->consumer(); |
michael@0 | 101 | bool isPhi = n->isDefinition() && n->toDefinition()->isPhi(); |
michael@0 | 102 | |
michael@0 | 103 | if (isPhi) |
michael@0 | 104 | return block->dominates(n->block()->getPredecessor(use->index())); |
michael@0 | 105 | |
michael@0 | 106 | return block->dominates(n->block()); |
michael@0 | 107 | } |
michael@0 | 108 | |
michael@0 | 109 | static inline void |
michael@0 | 110 | SpewRange(MDefinition *def) |
michael@0 | 111 | { |
michael@0 | 112 | #ifdef DEBUG |
michael@0 | 113 | if (IonSpewEnabled(IonSpew_Range) && def->type() != MIRType_None && def->range()) { |
michael@0 | 114 | IonSpewHeader(IonSpew_Range); |
michael@0 | 115 | def->printName(IonSpewFile); |
michael@0 | 116 | fprintf(IonSpewFile, " has range "); |
michael@0 | 117 | def->range()->dump(IonSpewFile); |
michael@0 | 118 | } |
michael@0 | 119 | #endif |
michael@0 | 120 | } |
michael@0 | 121 | |
michael@0 | 122 | TempAllocator & |
michael@0 | 123 | RangeAnalysis::alloc() const |
michael@0 | 124 | { |
michael@0 | 125 | return graph_.alloc(); |
michael@0 | 126 | } |
michael@0 | 127 | |
michael@0 | 128 | void |
michael@0 | 129 | RangeAnalysis::replaceDominatedUsesWith(MDefinition *orig, MDefinition *dom, |
michael@0 | 130 | MBasicBlock *block) |
michael@0 | 131 | { |
michael@0 | 132 | for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd(); ) { |
michael@0 | 133 | if (i->consumer() != dom && IsDominatedUse(block, *i)) |
michael@0 | 134 | i = i->consumer()->replaceOperand(i, dom); |
michael@0 | 135 | else |
michael@0 | 136 | i++; |
michael@0 | 137 | } |
michael@0 | 138 | } |
michael@0 | 139 | |
michael@0 | 140 | bool |
michael@0 | 141 | RangeAnalysis::addBetaNodes() |
michael@0 | 142 | { |
michael@0 | 143 | IonSpew(IonSpew_Range, "Adding beta nodes"); |
michael@0 | 144 | |
michael@0 | 145 | for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { |
michael@0 | 146 | MBasicBlock *block = *i; |
michael@0 | 147 | IonSpew(IonSpew_Range, "Looking at block %d", block->id()); |
michael@0 | 148 | |
michael@0 | 149 | BranchDirection branch_dir; |
michael@0 | 150 | MTest *test = block->immediateDominatorBranch(&branch_dir); |
michael@0 | 151 | |
michael@0 | 152 | if (!test || !test->getOperand(0)->isCompare()) |
michael@0 | 153 | continue; |
michael@0 | 154 | |
michael@0 | 155 | MCompare *compare = test->getOperand(0)->toCompare(); |
michael@0 | 156 | |
michael@0 | 157 | // TODO: support unsigned comparisons |
michael@0 | 158 | if (compare->compareType() == MCompare::Compare_UInt32) |
michael@0 | 159 | continue; |
michael@0 | 160 | |
michael@0 | 161 | MDefinition *left = compare->getOperand(0); |
michael@0 | 162 | MDefinition *right = compare->getOperand(1); |
michael@0 | 163 | double bound; |
michael@0 | 164 | double conservativeLower = NegativeInfinity<double>(); |
michael@0 | 165 | double conservativeUpper = PositiveInfinity<double>(); |
michael@0 | 166 | MDefinition *val = nullptr; |
michael@0 | 167 | |
michael@0 | 168 | JSOp jsop = compare->jsop(); |
michael@0 | 169 | |
michael@0 | 170 | if (branch_dir == FALSE_BRANCH) { |
michael@0 | 171 | jsop = NegateCompareOp(jsop); |
michael@0 | 172 | conservativeLower = GenericNaN(); |
michael@0 | 173 | conservativeUpper = GenericNaN(); |
michael@0 | 174 | } |
michael@0 | 175 | |
michael@0 | 176 | if (left->isConstant() && left->toConstant()->value().isNumber()) { |
michael@0 | 177 | bound = left->toConstant()->value().toNumber(); |
michael@0 | 178 | val = right; |
michael@0 | 179 | jsop = ReverseCompareOp(jsop); |
michael@0 | 180 | } else if (right->isConstant() && right->toConstant()->value().isNumber()) { |
michael@0 | 181 | bound = right->toConstant()->value().toNumber(); |
michael@0 | 182 | val = left; |
michael@0 | 183 | } else if (left->type() == MIRType_Int32 && right->type() == MIRType_Int32) { |
michael@0 | 184 | MDefinition *smaller = nullptr; |
michael@0 | 185 | MDefinition *greater = nullptr; |
michael@0 | 186 | if (jsop == JSOP_LT) { |
michael@0 | 187 | smaller = left; |
michael@0 | 188 | greater = right; |
michael@0 | 189 | } else if (jsop == JSOP_GT) { |
michael@0 | 190 | smaller = right; |
michael@0 | 191 | greater = left; |
michael@0 | 192 | } |
michael@0 | 193 | if (smaller && greater) { |
michael@0 | 194 | MBeta *beta; |
michael@0 | 195 | beta = MBeta::New(alloc(), smaller, |
michael@0 | 196 | Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1)); |
michael@0 | 197 | block->insertBefore(*block->begin(), beta); |
michael@0 | 198 | replaceDominatedUsesWith(smaller, beta, block); |
michael@0 | 199 | IonSpew(IonSpew_Range, "Adding beta node for smaller %d", smaller->id()); |
michael@0 | 200 | beta = MBeta::New(alloc(), greater, |
michael@0 | 201 | Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX)); |
michael@0 | 202 | block->insertBefore(*block->begin(), beta); |
michael@0 | 203 | replaceDominatedUsesWith(greater, beta, block); |
michael@0 | 204 | IonSpew(IonSpew_Range, "Adding beta node for greater %d", greater->id()); |
michael@0 | 205 | } |
michael@0 | 206 | continue; |
michael@0 | 207 | } else { |
michael@0 | 208 | continue; |
michael@0 | 209 | } |
michael@0 | 210 | |
michael@0 | 211 | // At this point, one of the operands if the compare is a constant, and |
michael@0 | 212 | // val is the other operand. |
michael@0 | 213 | JS_ASSERT(val); |
michael@0 | 214 | |
michael@0 | 215 | Range comp; |
michael@0 | 216 | switch (jsop) { |
michael@0 | 217 | case JSOP_LE: |
michael@0 | 218 | comp.setDouble(conservativeLower, bound); |
michael@0 | 219 | break; |
michael@0 | 220 | case JSOP_LT: |
michael@0 | 221 | // For integers, if x < c, the upper bound of x is c-1. |
michael@0 | 222 | if (val->type() == MIRType_Int32) { |
michael@0 | 223 | int32_t intbound; |
michael@0 | 224 | if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound)) |
michael@0 | 225 | bound = intbound; |
michael@0 | 226 | } |
michael@0 | 227 | comp.setDouble(conservativeLower, bound); |
michael@0 | 228 | break; |
michael@0 | 229 | case JSOP_GE: |
michael@0 | 230 | comp.setDouble(bound, conservativeUpper); |
michael@0 | 231 | break; |
michael@0 | 232 | case JSOP_GT: |
michael@0 | 233 | // For integers, if x > c, the lower bound of x is c+1. |
michael@0 | 234 | if (val->type() == MIRType_Int32) { |
michael@0 | 235 | int32_t intbound; |
michael@0 | 236 | if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound)) |
michael@0 | 237 | bound = intbound; |
michael@0 | 238 | } |
michael@0 | 239 | comp.setDouble(bound, conservativeUpper); |
michael@0 | 240 | break; |
michael@0 | 241 | case JSOP_EQ: |
michael@0 | 242 | comp.setDouble(bound, bound); |
michael@0 | 243 | break; |
michael@0 | 244 | default: |
michael@0 | 245 | continue; // well, for neq we could have |
michael@0 | 246 | // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges. |
michael@0 | 247 | } |
michael@0 | 248 | |
michael@0 | 249 | if (IonSpewEnabled(IonSpew_Range)) { |
michael@0 | 250 | IonSpewHeader(IonSpew_Range); |
michael@0 | 251 | fprintf(IonSpewFile, "Adding beta node for %d with range ", val->id()); |
michael@0 | 252 | comp.dump(IonSpewFile); |
michael@0 | 253 | } |
michael@0 | 254 | |
michael@0 | 255 | MBeta *beta = MBeta::New(alloc(), val, new(alloc()) Range(comp)); |
michael@0 | 256 | block->insertBefore(*block->begin(), beta); |
michael@0 | 257 | replaceDominatedUsesWith(val, beta, block); |
michael@0 | 258 | } |
michael@0 | 259 | |
michael@0 | 260 | return true; |
michael@0 | 261 | } |
michael@0 | 262 | |
michael@0 | 263 | bool |
michael@0 | 264 | RangeAnalysis::removeBetaNodes() |
michael@0 | 265 | { |
michael@0 | 266 | IonSpew(IonSpew_Range, "Removing beta nodes"); |
michael@0 | 267 | |
michael@0 | 268 | for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) { |
michael@0 | 269 | MBasicBlock *block = *i; |
michael@0 | 270 | for (MDefinitionIterator iter(*i); iter; ) { |
michael@0 | 271 | MDefinition *def = *iter; |
michael@0 | 272 | if (def->isBeta()) { |
michael@0 | 273 | MDefinition *op = def->getOperand(0); |
michael@0 | 274 | IonSpew(IonSpew_Range, "Removing beta node %d for %d", |
michael@0 | 275 | def->id(), op->id()); |
michael@0 | 276 | def->replaceAllUsesWith(op); |
michael@0 | 277 | iter = block->discardDefAt(iter); |
michael@0 | 278 | } else { |
michael@0 | 279 | // We only place Beta nodes at the beginning of basic |
michael@0 | 280 | // blocks, so if we see something else, we can move on |
michael@0 | 281 | // to the next block. |
michael@0 | 282 | break; |
michael@0 | 283 | } |
michael@0 | 284 | } |
michael@0 | 285 | } |
michael@0 | 286 | return true; |
michael@0 | 287 | } |
michael@0 | 288 | |
michael@0 | 289 | void |
michael@0 | 290 | SymbolicBound::print(Sprinter &sp) const |
michael@0 | 291 | { |
michael@0 | 292 | if (loop) |
michael@0 | 293 | sp.printf("[loop] "); |
michael@0 | 294 | sum.print(sp); |
michael@0 | 295 | } |
michael@0 | 296 | |
michael@0 | 297 | void |
michael@0 | 298 | SymbolicBound::dump() const |
michael@0 | 299 | { |
michael@0 | 300 | Sprinter sp(GetIonContext()->cx); |
michael@0 | 301 | sp.init(); |
michael@0 | 302 | print(sp); |
michael@0 | 303 | fprintf(stderr, "%s\n", sp.string()); |
michael@0 | 304 | } |
michael@0 | 305 | |
michael@0 | 306 | // Test whether the given range's exponent tells us anything that its lower |
michael@0 | 307 | // and upper bound values don't. |
michael@0 | 308 | static bool |
michael@0 | 309 | IsExponentInteresting(const Range *r) |
michael@0 | 310 | { |
michael@0 | 311 | // If it lacks either a lower or upper bound, the exponent is interesting. |
michael@0 | 312 | if (!r->hasInt32Bounds()) |
michael@0 | 313 | return true; |
michael@0 | 314 | |
michael@0 | 315 | // Otherwise if there's no fractional part, the lower and upper bounds, |
michael@0 | 316 | // which are integers, are perfectly precise. |
michael@0 | 317 | if (!r->canHaveFractionalPart()) |
michael@0 | 318 | return false; |
michael@0 | 319 | |
michael@0 | 320 | // Otherwise, if the bounds are conservatively rounded across a power-of-two |
michael@0 | 321 | // boundary, the exponent may imply a tighter range. |
michael@0 | 322 | return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent(); |
michael@0 | 323 | } |
michael@0 | 324 | |
michael@0 | 325 | void |
michael@0 | 326 | Range::print(Sprinter &sp) const |
michael@0 | 327 | { |
michael@0 | 328 | assertInvariants(); |
michael@0 | 329 | |
michael@0 | 330 | // Floating-point or Integer subset. |
michael@0 | 331 | if (canHaveFractionalPart_) |
michael@0 | 332 | sp.printf("F"); |
michael@0 | 333 | else |
michael@0 | 334 | sp.printf("I"); |
michael@0 | 335 | |
michael@0 | 336 | sp.printf("["); |
michael@0 | 337 | |
michael@0 | 338 | if (!hasInt32LowerBound_) |
michael@0 | 339 | sp.printf("?"); |
michael@0 | 340 | else |
michael@0 | 341 | sp.printf("%d", lower_); |
michael@0 | 342 | if (symbolicLower_) { |
michael@0 | 343 | sp.printf(" {"); |
michael@0 | 344 | symbolicLower_->print(sp); |
michael@0 | 345 | sp.printf("}"); |
michael@0 | 346 | } |
michael@0 | 347 | |
michael@0 | 348 | sp.printf(", "); |
michael@0 | 349 | |
michael@0 | 350 | if (!hasInt32UpperBound_) |
michael@0 | 351 | sp.printf("?"); |
michael@0 | 352 | else |
michael@0 | 353 | sp.printf("%d", upper_); |
michael@0 | 354 | if (symbolicUpper_) { |
michael@0 | 355 | sp.printf(" {"); |
michael@0 | 356 | symbolicUpper_->print(sp); |
michael@0 | 357 | sp.printf("}"); |
michael@0 | 358 | } |
michael@0 | 359 | |
michael@0 | 360 | sp.printf("]"); |
michael@0 | 361 | if (IsExponentInteresting(this)) { |
michael@0 | 362 | if (max_exponent_ == IncludesInfinityAndNaN) |
michael@0 | 363 | sp.printf(" (U inf U NaN)", max_exponent_); |
michael@0 | 364 | else if (max_exponent_ == IncludesInfinity) |
michael@0 | 365 | sp.printf(" (U inf)"); |
michael@0 | 366 | else |
michael@0 | 367 | sp.printf(" (< pow(2, %d+1))", max_exponent_); |
michael@0 | 368 | } |
michael@0 | 369 | } |
michael@0 | 370 | |
michael@0 | 371 | void |
michael@0 | 372 | Range::dump(FILE *fp) const |
michael@0 | 373 | { |
michael@0 | 374 | Sprinter sp(GetIonContext()->cx); |
michael@0 | 375 | sp.init(); |
michael@0 | 376 | print(sp); |
michael@0 | 377 | fprintf(fp, "%s\n", sp.string()); |
michael@0 | 378 | } |
michael@0 | 379 | |
michael@0 | 380 | void |
michael@0 | 381 | Range::dump() const |
michael@0 | 382 | { |
michael@0 | 383 | dump(stderr); |
michael@0 | 384 | } |
michael@0 | 385 | |
michael@0 | 386 | Range * |
michael@0 | 387 | Range::intersect(TempAllocator &alloc, const Range *lhs, const Range *rhs, bool *emptyRange) |
michael@0 | 388 | { |
michael@0 | 389 | *emptyRange = false; |
michael@0 | 390 | |
michael@0 | 391 | if (!lhs && !rhs) |
michael@0 | 392 | return nullptr; |
michael@0 | 393 | |
michael@0 | 394 | if (!lhs) |
michael@0 | 395 | return new(alloc) Range(*rhs); |
michael@0 | 396 | if (!rhs) |
michael@0 | 397 | return new(alloc) Range(*lhs); |
michael@0 | 398 | |
michael@0 | 399 | int32_t newLower = Max(lhs->lower_, rhs->lower_); |
michael@0 | 400 | int32_t newUpper = Min(lhs->upper_, rhs->upper_); |
michael@0 | 401 | |
michael@0 | 402 | // :TODO: This information could be used better. If upper < lower, then we |
michael@0 | 403 | // have conflicting constraints. Consider: |
michael@0 | 404 | // |
michael@0 | 405 | // if (x < 0) { |
michael@0 | 406 | // if (x > 0) { |
michael@0 | 407 | // [Some code.] |
michael@0 | 408 | // } |
michael@0 | 409 | // } |
michael@0 | 410 | // |
michael@0 | 411 | // In this case, the block is dead. Right now, we just disregard this fact |
michael@0 | 412 | // and make the range unbounded, rather than empty. |
michael@0 | 413 | // |
michael@0 | 414 | // Instead, we should use it to eliminate the dead block. |
michael@0 | 415 | // (Bug 765127) |
michael@0 | 416 | if (newUpper < newLower) { |
michael@0 | 417 | // If both ranges can be NaN, the result can still be NaN. |
michael@0 | 418 | if (!lhs->canBeNaN() || !rhs->canBeNaN()) |
michael@0 | 419 | *emptyRange = true; |
michael@0 | 420 | return nullptr; |
michael@0 | 421 | } |
michael@0 | 422 | |
michael@0 | 423 | bool newHasInt32LowerBound = lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_; |
michael@0 | 424 | bool newHasInt32UpperBound = lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_; |
michael@0 | 425 | bool newFractional = lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_; |
michael@0 | 426 | uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_); |
michael@0 | 427 | |
michael@0 | 428 | // NaN is a special value which is neither greater than infinity or less than |
michael@0 | 429 | // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we |
michael@0 | 430 | // can end up thinking we have both a lower and upper bound, even though NaN |
michael@0 | 431 | // is still possible. In this case, just be conservative, since any case where |
michael@0 | 432 | // we can have NaN is not especially interesting. |
michael@0 | 433 | if (newHasInt32LowerBound && newHasInt32UpperBound && newExponent == IncludesInfinityAndNaN) |
michael@0 | 434 | return nullptr; |
michael@0 | 435 | |
michael@0 | 436 | // If one of the ranges has a fractional part and the other doesn't, it's |
michael@0 | 437 | // possible that we will have computed a newExponent that's more precise |
michael@0 | 438 | // than our newLower and newUpper. This is unusual, so we handle it here |
michael@0 | 439 | // instead of in optimize(). |
michael@0 | 440 | // |
michael@0 | 441 | // For example, consider the range F[0,1.5]. Range analysis represents the |
michael@0 | 442 | // lower and upper bound as integers, so we'd actually have |
michael@0 | 443 | // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly |
michael@0 | 444 | // more precise upper bound than the integer upper bound. |
michael@0 | 445 | // |
michael@0 | 446 | // When intersecting such a range with an integer range, the fractional part |
michael@0 | 447 | // of the range is dropped. The max exponent of 0 remains valid, so the |
michael@0 | 448 | // upper bound needs to be adjusted to 1. |
michael@0 | 449 | // |
michael@0 | 450 | // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4], |
michael@0 | 451 | // the naive intersection is I[2,2], but since the max exponent tells us |
michael@0 | 452 | // that the value is always less than 2, the intersection is actually empty. |
michael@0 | 453 | if (lhs->canHaveFractionalPart_ != rhs->canHaveFractionalPart_ || |
michael@0 | 454 | (lhs->canHaveFractionalPart_ && |
michael@0 | 455 | newHasInt32LowerBound && newHasInt32UpperBound && |
michael@0 | 456 | newLower == newUpper)) |
michael@0 | 457 | { |
michael@0 | 458 | refineInt32BoundsByExponent(newExponent, &newLower, &newUpper); |
michael@0 | 459 | |
michael@0 | 460 | // If we're intersecting two ranges that don't overlap, this could also |
michael@0 | 461 | // push the bounds past each other, since the actual intersection is |
michael@0 | 462 | // the empty set. |
michael@0 | 463 | if (newLower > newUpper) { |
michael@0 | 464 | *emptyRange = true; |
michael@0 | 465 | return nullptr; |
michael@0 | 466 | } |
michael@0 | 467 | } |
michael@0 | 468 | |
michael@0 | 469 | return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound, |
michael@0 | 470 | newFractional, newExponent); |
michael@0 | 471 | } |
michael@0 | 472 | |
michael@0 | 473 | void |
michael@0 | 474 | Range::unionWith(const Range *other) |
michael@0 | 475 | { |
michael@0 | 476 | int32_t newLower = Min(lower_, other->lower_); |
michael@0 | 477 | int32_t newUpper = Max(upper_, other->upper_); |
michael@0 | 478 | |
michael@0 | 479 | bool newHasInt32LowerBound = hasInt32LowerBound_ && other->hasInt32LowerBound_; |
michael@0 | 480 | bool newHasInt32UpperBound = hasInt32UpperBound_ && other->hasInt32UpperBound_; |
michael@0 | 481 | bool newFractional = canHaveFractionalPart_ || other->canHaveFractionalPart_; |
michael@0 | 482 | uint16_t newExponent = Max(max_exponent_, other->max_exponent_); |
michael@0 | 483 | |
michael@0 | 484 | rawInitialize(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound, |
michael@0 | 485 | newFractional, newExponent); |
michael@0 | 486 | } |
michael@0 | 487 | |
michael@0 | 488 | Range::Range(const MDefinition *def) |
michael@0 | 489 | : symbolicLower_(nullptr), |
michael@0 | 490 | symbolicUpper_(nullptr) |
michael@0 | 491 | { |
michael@0 | 492 | if (const Range *other = def->range()) { |
michael@0 | 493 | // The instruction has range information; use it. |
michael@0 | 494 | *this = *other; |
michael@0 | 495 | |
michael@0 | 496 | // Simulate the effect of converting the value to its type. |
michael@0 | 497 | switch (def->type()) { |
michael@0 | 498 | case MIRType_Int32: |
michael@0 | 499 | wrapAroundToInt32(); |
michael@0 | 500 | break; |
michael@0 | 501 | case MIRType_Boolean: |
michael@0 | 502 | wrapAroundToBoolean(); |
michael@0 | 503 | break; |
michael@0 | 504 | case MIRType_None: |
michael@0 | 505 | MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value"); |
michael@0 | 506 | default: |
michael@0 | 507 | break; |
michael@0 | 508 | } |
michael@0 | 509 | } else { |
michael@0 | 510 | // Otherwise just use type information. We can trust the type here |
michael@0 | 511 | // because we don't care what value the instruction actually produces, |
michael@0 | 512 | // but what value we might get after we get past the bailouts. |
michael@0 | 513 | switch (def->type()) { |
michael@0 | 514 | case MIRType_Int32: |
michael@0 | 515 | setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX); |
michael@0 | 516 | break; |
michael@0 | 517 | case MIRType_Boolean: |
michael@0 | 518 | setInt32(0, 1); |
michael@0 | 519 | break; |
michael@0 | 520 | case MIRType_None: |
michael@0 | 521 | MOZ_ASSUME_UNREACHABLE("Asking for the range of an instruction with no value"); |
michael@0 | 522 | default: |
michael@0 | 523 | setUnknown(); |
michael@0 | 524 | break; |
michael@0 | 525 | } |
michael@0 | 526 | } |
michael@0 | 527 | |
michael@0 | 528 | // As a special case, MUrsh is permitted to claim a result type of |
michael@0 | 529 | // MIRType_Int32 while actually returning values in [0,UINT32_MAX] without |
michael@0 | 530 | // bailouts. If range analysis hasn't ruled out values in |
michael@0 | 531 | // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for |
michael@0 | 532 | // use as either a uint32 or an int32. |
michael@0 | 533 | if (!hasInt32UpperBound() && def->isUrsh() && def->toUrsh()->bailoutsDisabled()) |
michael@0 | 534 | lower_ = INT32_MIN; |
michael@0 | 535 | |
michael@0 | 536 | assertInvariants(); |
michael@0 | 537 | } |
michael@0 | 538 | |
michael@0 | 539 | static uint16_t |
michael@0 | 540 | ExponentImpliedByDouble(double d) |
michael@0 | 541 | { |
michael@0 | 542 | // Handle the special values. |
michael@0 | 543 | if (IsNaN(d)) |
michael@0 | 544 | return Range::IncludesInfinityAndNaN; |
michael@0 | 545 | if (IsInfinite(d)) |
michael@0 | 546 | return Range::IncludesInfinity; |
michael@0 | 547 | |
michael@0 | 548 | // Otherwise take the exponent part and clamp it at zero, since the Range |
michael@0 | 549 | // class doesn't track fractional ranges. |
michael@0 | 550 | return uint16_t(Max(int_fast16_t(0), ExponentComponent(d))); |
michael@0 | 551 | } |
michael@0 | 552 | |
michael@0 | 553 | void |
michael@0 | 554 | Range::setDouble(double l, double h) |
michael@0 | 555 | { |
michael@0 | 556 | // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_. |
michael@0 | 557 | if (l >= INT32_MIN && l <= INT32_MAX) { |
michael@0 | 558 | lower_ = int32_t(floor(l)); |
michael@0 | 559 | hasInt32LowerBound_ = true; |
michael@0 | 560 | } else { |
michael@0 | 561 | lower_ = INT32_MIN; |
michael@0 | 562 | hasInt32LowerBound_ = false; |
michael@0 | 563 | } |
michael@0 | 564 | if (h >= INT32_MIN && h <= INT32_MAX) { |
michael@0 | 565 | upper_ = int32_t(ceil(h)); |
michael@0 | 566 | hasInt32UpperBound_ = true; |
michael@0 | 567 | } else { |
michael@0 | 568 | upper_ = INT32_MAX; |
michael@0 | 569 | hasInt32UpperBound_ = false; |
michael@0 | 570 | } |
michael@0 | 571 | |
michael@0 | 572 | // Infer max_exponent_. |
michael@0 | 573 | uint16_t lExp = ExponentImpliedByDouble(l); |
michael@0 | 574 | uint16_t hExp = ExponentImpliedByDouble(h); |
michael@0 | 575 | max_exponent_ = Max(lExp, hExp); |
michael@0 | 576 | |
michael@0 | 577 | // Infer the canHaveFractionalPart_ field. We can have a fractional part |
michael@0 | 578 | // if the range crosses through the neighborhood of zero. We won't have a |
michael@0 | 579 | // fractional value if the value is always beyond the point at which |
michael@0 | 580 | // double precision can't represent fractional values. |
michael@0 | 581 | uint16_t minExp = Min(lExp, hExp); |
michael@0 | 582 | bool includesNegative = IsNaN(l) || l < 0; |
michael@0 | 583 | bool includesPositive = IsNaN(h) || h > 0; |
michael@0 | 584 | bool crossesZero = includesNegative && includesPositive; |
michael@0 | 585 | canHaveFractionalPart_ = crossesZero || minExp < MaxTruncatableExponent; |
michael@0 | 586 | |
michael@0 | 587 | optimize(); |
michael@0 | 588 | } |
michael@0 | 589 | |
michael@0 | 590 | static inline bool |
michael@0 | 591 | MissingAnyInt32Bounds(const Range *lhs, const Range *rhs) |
michael@0 | 592 | { |
michael@0 | 593 | return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds(); |
michael@0 | 594 | } |
michael@0 | 595 | |
michael@0 | 596 | Range * |
michael@0 | 597 | Range::add(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 598 | { |
michael@0 | 599 | int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_; |
michael@0 | 600 | if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) |
michael@0 | 601 | l = NoInt32LowerBound; |
michael@0 | 602 | |
michael@0 | 603 | int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_; |
michael@0 | 604 | if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) |
michael@0 | 605 | h = NoInt32UpperBound; |
michael@0 | 606 | |
michael@0 | 607 | // The exponent is at most one greater than the greater of the operands' |
michael@0 | 608 | // exponents, except for NaN and infinity cases. |
michael@0 | 609 | uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_); |
michael@0 | 610 | if (e <= Range::MaxFiniteExponent) |
michael@0 | 611 | ++e; |
michael@0 | 612 | |
michael@0 | 613 | // Infinity + -Infinity is NaN. |
michael@0 | 614 | if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) |
michael@0 | 615 | e = Range::IncludesInfinityAndNaN; |
michael@0 | 616 | |
michael@0 | 617 | return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e); |
michael@0 | 618 | } |
michael@0 | 619 | |
michael@0 | 620 | Range * |
michael@0 | 621 | Range::sub(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 622 | { |
michael@0 | 623 | int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_; |
michael@0 | 624 | if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) |
michael@0 | 625 | l = NoInt32LowerBound; |
michael@0 | 626 | |
michael@0 | 627 | int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_; |
michael@0 | 628 | if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) |
michael@0 | 629 | h = NoInt32UpperBound; |
michael@0 | 630 | |
michael@0 | 631 | // The exponent is at most one greater than the greater of the operands' |
michael@0 | 632 | // exponents, except for NaN and infinity cases. |
michael@0 | 633 | uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_); |
michael@0 | 634 | if (e <= Range::MaxFiniteExponent) |
michael@0 | 635 | ++e; |
michael@0 | 636 | |
michael@0 | 637 | // Infinity - Infinity is NaN. |
michael@0 | 638 | if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) |
michael@0 | 639 | e = Range::IncludesInfinityAndNaN; |
michael@0 | 640 | |
michael@0 | 641 | return new(alloc) Range(l, h, lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(), e); |
michael@0 | 642 | } |
michael@0 | 643 | |
michael@0 | 644 | Range * |
michael@0 | 645 | Range::and_(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 646 | { |
michael@0 | 647 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 648 | JS_ASSERT(rhs->isInt32()); |
michael@0 | 649 | |
michael@0 | 650 | // If both numbers can be negative, result can be negative in the whole range |
michael@0 | 651 | if (lhs->lower() < 0 && rhs->lower() < 0) |
michael@0 | 652 | return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper())); |
michael@0 | 653 | |
michael@0 | 654 | // Only one of both numbers can be negative. |
michael@0 | 655 | // - result can't be negative |
michael@0 | 656 | // - Upper bound is minimum of both upper range, |
michael@0 | 657 | int32_t lower = 0; |
michael@0 | 658 | int32_t upper = Min(lhs->upper(), rhs->upper()); |
michael@0 | 659 | |
michael@0 | 660 | // EXCEPT when upper bound of non negative number is max value, |
michael@0 | 661 | // because negative value can return the whole max value. |
michael@0 | 662 | // -1 & 5 = 5 |
michael@0 | 663 | if (lhs->lower() < 0) |
michael@0 | 664 | upper = rhs->upper(); |
michael@0 | 665 | if (rhs->lower() < 0) |
michael@0 | 666 | upper = lhs->upper(); |
michael@0 | 667 | |
michael@0 | 668 | return Range::NewInt32Range(alloc, lower, upper); |
michael@0 | 669 | } |
michael@0 | 670 | |
michael@0 | 671 | Range * |
michael@0 | 672 | Range::or_(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 673 | { |
michael@0 | 674 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 675 | JS_ASSERT(rhs->isInt32()); |
michael@0 | 676 | // When one operand is always 0 or always -1, it's a special case where we |
michael@0 | 677 | // can compute a fully precise result. Handling these up front also |
michael@0 | 678 | // protects the code below from calling CountLeadingZeroes32 with a zero |
michael@0 | 679 | // operand or from shifting an int32_t by 32. |
michael@0 | 680 | if (lhs->lower() == lhs->upper()) { |
michael@0 | 681 | if (lhs->lower() == 0) |
michael@0 | 682 | return new(alloc) Range(*rhs); |
michael@0 | 683 | if (lhs->lower() == -1) |
michael@0 | 684 | return new(alloc) Range(*lhs);; |
michael@0 | 685 | } |
michael@0 | 686 | if (rhs->lower() == rhs->upper()) { |
michael@0 | 687 | if (rhs->lower() == 0) |
michael@0 | 688 | return new(alloc) Range(*lhs); |
michael@0 | 689 | if (rhs->lower() == -1) |
michael@0 | 690 | return new(alloc) Range(*rhs);; |
michael@0 | 691 | } |
michael@0 | 692 | |
michael@0 | 693 | // The code below uses CountLeadingZeroes32, which has undefined behavior |
michael@0 | 694 | // if its operand is 0. We rely on the code above to protect it. |
michael@0 | 695 | JS_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0); |
michael@0 | 696 | JS_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0); |
michael@0 | 697 | JS_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1); |
michael@0 | 698 | JS_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1); |
michael@0 | 699 | |
michael@0 | 700 | int32_t lower = INT32_MIN; |
michael@0 | 701 | int32_t upper = INT32_MAX; |
michael@0 | 702 | |
michael@0 | 703 | if (lhs->lower() >= 0 && rhs->lower() >= 0) { |
michael@0 | 704 | // Both operands are non-negative, so the result won't be less than either. |
michael@0 | 705 | lower = Max(lhs->lower(), rhs->lower()); |
michael@0 | 706 | // The result will have leading zeros where both operands have leading zeros. |
michael@0 | 707 | // CountLeadingZeroes32 of a non-negative int32 will at least be 1 to account |
michael@0 | 708 | // for the bit of sign. |
michael@0 | 709 | upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()), |
michael@0 | 710 | CountLeadingZeroes32(rhs->upper()))); |
michael@0 | 711 | } else { |
michael@0 | 712 | // The result will have leading ones where either operand has leading ones. |
michael@0 | 713 | if (lhs->upper() < 0) { |
michael@0 | 714 | unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower()); |
michael@0 | 715 | lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes)); |
michael@0 | 716 | upper = -1; |
michael@0 | 717 | } |
michael@0 | 718 | if (rhs->upper() < 0) { |
michael@0 | 719 | unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower()); |
michael@0 | 720 | lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes)); |
michael@0 | 721 | upper = -1; |
michael@0 | 722 | } |
michael@0 | 723 | } |
michael@0 | 724 | |
michael@0 | 725 | return Range::NewInt32Range(alloc, lower, upper); |
michael@0 | 726 | } |
michael@0 | 727 | |
michael@0 | 728 | Range * |
michael@0 | 729 | Range::xor_(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 730 | { |
michael@0 | 731 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 732 | JS_ASSERT(rhs->isInt32()); |
michael@0 | 733 | int32_t lhsLower = lhs->lower(); |
michael@0 | 734 | int32_t lhsUpper = lhs->upper(); |
michael@0 | 735 | int32_t rhsLower = rhs->lower(); |
michael@0 | 736 | int32_t rhsUpper = rhs->upper(); |
michael@0 | 737 | bool invertAfter = false; |
michael@0 | 738 | |
michael@0 | 739 | // If either operand is negative, bitwise-negate it, and arrange to negate |
michael@0 | 740 | // the result; ~((~x)^y) == x^y. If both are negative the negations on the |
michael@0 | 741 | // result cancel each other out; effectively this is (~x)^(~y) == x^y. |
michael@0 | 742 | // These transformations reduce the number of cases we have to handle below. |
michael@0 | 743 | if (lhsUpper < 0) { |
michael@0 | 744 | lhsLower = ~lhsLower; |
michael@0 | 745 | lhsUpper = ~lhsUpper; |
michael@0 | 746 | Swap(lhsLower, lhsUpper); |
michael@0 | 747 | invertAfter = !invertAfter; |
michael@0 | 748 | } |
michael@0 | 749 | if (rhsUpper < 0) { |
michael@0 | 750 | rhsLower = ~rhsLower; |
michael@0 | 751 | rhsUpper = ~rhsUpper; |
michael@0 | 752 | Swap(rhsLower, rhsUpper); |
michael@0 | 753 | invertAfter = !invertAfter; |
michael@0 | 754 | } |
michael@0 | 755 | |
michael@0 | 756 | // Handle cases where lhs or rhs is always zero specially, because they're |
michael@0 | 757 | // easy cases where we can be perfectly precise, and because it protects the |
michael@0 | 758 | // CountLeadingZeroes32 calls below from seeing 0 operands, which would be |
michael@0 | 759 | // undefined behavior. |
michael@0 | 760 | int32_t lower = INT32_MIN; |
michael@0 | 761 | int32_t upper = INT32_MAX; |
michael@0 | 762 | if (lhsLower == 0 && lhsUpper == 0) { |
michael@0 | 763 | upper = rhsUpper; |
michael@0 | 764 | lower = rhsLower; |
michael@0 | 765 | } else if (rhsLower == 0 && rhsUpper == 0) { |
michael@0 | 766 | upper = lhsUpper; |
michael@0 | 767 | lower = lhsLower; |
michael@0 | 768 | } else if (lhsLower >= 0 && rhsLower >= 0) { |
michael@0 | 769 | // Both operands are non-negative. The result will be non-negative. |
michael@0 | 770 | lower = 0; |
michael@0 | 771 | // To compute the upper value, take each operand's upper value and |
michael@0 | 772 | // set all bits that don't correspond to leading zero bits in the |
michael@0 | 773 | // other to one. For each one, this gives an upper bound for the |
michael@0 | 774 | // result, so we can take the minimum between the two. |
michael@0 | 775 | unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper); |
michael@0 | 776 | unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper); |
michael@0 | 777 | upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros), |
michael@0 | 778 | lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros)); |
michael@0 | 779 | } |
michael@0 | 780 | |
michael@0 | 781 | // If we bitwise-negated one (but not both) of the operands above, apply the |
michael@0 | 782 | // bitwise-negate to the result, completing ~((~x)^y) == x^y. |
michael@0 | 783 | if (invertAfter) { |
michael@0 | 784 | lower = ~lower; |
michael@0 | 785 | upper = ~upper; |
michael@0 | 786 | Swap(lower, upper); |
michael@0 | 787 | } |
michael@0 | 788 | |
michael@0 | 789 | return Range::NewInt32Range(alloc, lower, upper); |
michael@0 | 790 | } |
michael@0 | 791 | |
michael@0 | 792 | Range * |
michael@0 | 793 | Range::not_(TempAllocator &alloc, const Range *op) |
michael@0 | 794 | { |
michael@0 | 795 | JS_ASSERT(op->isInt32()); |
michael@0 | 796 | return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower()); |
michael@0 | 797 | } |
michael@0 | 798 | |
michael@0 | 799 | Range * |
michael@0 | 800 | Range::mul(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 801 | { |
michael@0 | 802 | bool fractional = lhs->canHaveFractionalPart() || rhs->canHaveFractionalPart(); |
michael@0 | 803 | |
michael@0 | 804 | uint16_t exponent; |
michael@0 | 805 | if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) { |
michael@0 | 806 | // Two finite values. |
michael@0 | 807 | exponent = lhs->numBits() + rhs->numBits() - 1; |
michael@0 | 808 | if (exponent > Range::MaxFiniteExponent) |
michael@0 | 809 | exponent = Range::IncludesInfinity; |
michael@0 | 810 | } else if (!lhs->canBeNaN() && |
michael@0 | 811 | !rhs->canBeNaN() && |
michael@0 | 812 | !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) && |
michael@0 | 813 | !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) |
michael@0 | 814 | { |
michael@0 | 815 | // Two values that multiplied together won't produce a NaN. |
michael@0 | 816 | exponent = Range::IncludesInfinity; |
michael@0 | 817 | } else { |
michael@0 | 818 | // Could be anything. |
michael@0 | 819 | exponent = Range::IncludesInfinityAndNaN; |
michael@0 | 820 | } |
michael@0 | 821 | |
michael@0 | 822 | if (MissingAnyInt32Bounds(lhs, rhs)) |
michael@0 | 823 | return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound, fractional, exponent); |
michael@0 | 824 | int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower(); |
michael@0 | 825 | int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper(); |
michael@0 | 826 | int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower(); |
michael@0 | 827 | int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper(); |
michael@0 | 828 | return new(alloc) Range( |
michael@0 | 829 | Min( Min(a, b), Min(c, d) ), |
michael@0 | 830 | Max( Max(a, b), Max(c, d) ), |
michael@0 | 831 | fractional, exponent); |
michael@0 | 832 | } |
michael@0 | 833 | |
michael@0 | 834 | Range * |
michael@0 | 835 | Range::lsh(TempAllocator &alloc, const Range *lhs, int32_t c) |
michael@0 | 836 | { |
michael@0 | 837 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 838 | int32_t shift = c & 0x1f; |
michael@0 | 839 | |
michael@0 | 840 | // If the shift doesn't loose bits or shift bits into the sign bit, we |
michael@0 | 841 | // can simply compute the correct range by shifting. |
michael@0 | 842 | if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() && |
michael@0 | 843 | (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper()) |
michael@0 | 844 | { |
michael@0 | 845 | return Range::NewInt32Range(alloc, |
michael@0 | 846 | uint32_t(lhs->lower()) << shift, |
michael@0 | 847 | uint32_t(lhs->upper()) << shift); |
michael@0 | 848 | } |
michael@0 | 849 | |
michael@0 | 850 | return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX); |
michael@0 | 851 | } |
michael@0 | 852 | |
michael@0 | 853 | Range * |
michael@0 | 854 | Range::rsh(TempAllocator &alloc, const Range *lhs, int32_t c) |
michael@0 | 855 | { |
michael@0 | 856 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 857 | int32_t shift = c & 0x1f; |
michael@0 | 858 | return Range::NewInt32Range(alloc, |
michael@0 | 859 | lhs->lower() >> shift, |
michael@0 | 860 | lhs->upper() >> shift); |
michael@0 | 861 | } |
michael@0 | 862 | |
michael@0 | 863 | Range * |
michael@0 | 864 | Range::ursh(TempAllocator &alloc, const Range *lhs, int32_t c) |
michael@0 | 865 | { |
michael@0 | 866 | // ursh's left operand is uint32, not int32, but for range analysis we |
michael@0 | 867 | // currently approximate it as int32. We assume here that the range has |
michael@0 | 868 | // already been adjusted accordingly by our callers. |
michael@0 | 869 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 870 | |
michael@0 | 871 | int32_t shift = c & 0x1f; |
michael@0 | 872 | |
michael@0 | 873 | // If the value is always non-negative or always negative, we can simply |
michael@0 | 874 | // compute the correct range by shifting. |
michael@0 | 875 | if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) { |
michael@0 | 876 | return Range::NewUInt32Range(alloc, |
michael@0 | 877 | uint32_t(lhs->lower()) >> shift, |
michael@0 | 878 | uint32_t(lhs->upper()) >> shift); |
michael@0 | 879 | } |
michael@0 | 880 | |
michael@0 | 881 | // Otherwise return the most general range after the shift. |
michael@0 | 882 | return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift); |
michael@0 | 883 | } |
michael@0 | 884 | |
michael@0 | 885 | Range * |
michael@0 | 886 | Range::lsh(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 887 | { |
michael@0 | 888 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 889 | JS_ASSERT(rhs->isInt32()); |
michael@0 | 890 | return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX); |
michael@0 | 891 | } |
michael@0 | 892 | |
michael@0 | 893 | Range * |
michael@0 | 894 | Range::rsh(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 895 | { |
michael@0 | 896 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 897 | JS_ASSERT(rhs->isInt32()); |
michael@0 | 898 | return Range::NewInt32Range(alloc, Min(lhs->lower(), 0), Max(lhs->upper(), 0)); |
michael@0 | 899 | } |
michael@0 | 900 | |
michael@0 | 901 | Range * |
michael@0 | 902 | Range::ursh(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 903 | { |
michael@0 | 904 | // ursh's left operand is uint32, not int32, but for range analysis we |
michael@0 | 905 | // currently approximate it as int32. We assume here that the range has |
michael@0 | 906 | // already been adjusted accordingly by our callers. |
michael@0 | 907 | JS_ASSERT(lhs->isInt32()); |
michael@0 | 908 | JS_ASSERT(rhs->isInt32()); |
michael@0 | 909 | return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX); |
michael@0 | 910 | } |
michael@0 | 911 | |
michael@0 | 912 | Range * |
michael@0 | 913 | Range::abs(TempAllocator &alloc, const Range *op) |
michael@0 | 914 | { |
michael@0 | 915 | int32_t l = op->lower_; |
michael@0 | 916 | int32_t u = op->upper_; |
michael@0 | 917 | |
michael@0 | 918 | return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), |
michael@0 | 919 | true, |
michael@0 | 920 | Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l), |
michael@0 | 921 | op->hasInt32Bounds() && l != INT32_MIN, |
michael@0 | 922 | op->canHaveFractionalPart_, |
michael@0 | 923 | op->max_exponent_); |
michael@0 | 924 | } |
michael@0 | 925 | |
michael@0 | 926 | Range * |
michael@0 | 927 | Range::min(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 928 | { |
michael@0 | 929 | // If either operand is NaN, the result is NaN. |
michael@0 | 930 | if (lhs->canBeNaN() || rhs->canBeNaN()) |
michael@0 | 931 | return nullptr; |
michael@0 | 932 | |
michael@0 | 933 | return new(alloc) Range(Min(lhs->lower_, rhs->lower_), |
michael@0 | 934 | lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_, |
michael@0 | 935 | Min(lhs->upper_, rhs->upper_), |
michael@0 | 936 | lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_, |
michael@0 | 937 | lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_, |
michael@0 | 938 | Max(lhs->max_exponent_, rhs->max_exponent_)); |
michael@0 | 939 | } |
michael@0 | 940 | |
michael@0 | 941 | Range * |
michael@0 | 942 | Range::max(TempAllocator &alloc, const Range *lhs, const Range *rhs) |
michael@0 | 943 | { |
michael@0 | 944 | // If either operand is NaN, the result is NaN. |
michael@0 | 945 | if (lhs->canBeNaN() || rhs->canBeNaN()) |
michael@0 | 946 | return nullptr; |
michael@0 | 947 | |
michael@0 | 948 | return new(alloc) Range(Max(lhs->lower_, rhs->lower_), |
michael@0 | 949 | lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_, |
michael@0 | 950 | Max(lhs->upper_, rhs->upper_), |
michael@0 | 951 | lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_, |
michael@0 | 952 | lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_, |
michael@0 | 953 | Max(lhs->max_exponent_, rhs->max_exponent_)); |
michael@0 | 954 | } |
michael@0 | 955 | |
michael@0 | 956 | bool |
michael@0 | 957 | Range::negativeZeroMul(const Range *lhs, const Range *rhs) |
michael@0 | 958 | { |
michael@0 | 959 | // The result can only be negative zero if both sides are finite and they |
michael@0 | 960 | // have differing signs. |
michael@0 | 961 | return (lhs->canBeFiniteNegative() && rhs->canBeFiniteNonNegative()) || |
michael@0 | 962 | (rhs->canBeFiniteNegative() && lhs->canBeFiniteNonNegative()); |
michael@0 | 963 | } |
michael@0 | 964 | |
michael@0 | 965 | bool |
michael@0 | 966 | Range::update(const Range *other) |
michael@0 | 967 | { |
michael@0 | 968 | bool changed = |
michael@0 | 969 | lower_ != other->lower_ || |
michael@0 | 970 | hasInt32LowerBound_ != other->hasInt32LowerBound_ || |
michael@0 | 971 | upper_ != other->upper_ || |
michael@0 | 972 | hasInt32UpperBound_ != other->hasInt32UpperBound_ || |
michael@0 | 973 | canHaveFractionalPart_ != other->canHaveFractionalPart_ || |
michael@0 | 974 | max_exponent_ != other->max_exponent_; |
michael@0 | 975 | if (changed) { |
michael@0 | 976 | lower_ = other->lower_; |
michael@0 | 977 | hasInt32LowerBound_ = other->hasInt32LowerBound_; |
michael@0 | 978 | upper_ = other->upper_; |
michael@0 | 979 | hasInt32UpperBound_ = other->hasInt32UpperBound_; |
michael@0 | 980 | canHaveFractionalPart_ = other->canHaveFractionalPart_; |
michael@0 | 981 | max_exponent_ = other->max_exponent_; |
michael@0 | 982 | assertInvariants(); |
michael@0 | 983 | } |
michael@0 | 984 | |
michael@0 | 985 | return changed; |
michael@0 | 986 | } |
michael@0 | 987 | |
michael@0 | 988 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 989 | // Range Computation for MIR Nodes |
michael@0 | 990 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 991 | |
michael@0 | 992 | void |
michael@0 | 993 | MPhi::computeRange(TempAllocator &alloc) |
michael@0 | 994 | { |
michael@0 | 995 | if (type() != MIRType_Int32 && type() != MIRType_Double) |
michael@0 | 996 | return; |
michael@0 | 997 | |
michael@0 | 998 | Range *range = nullptr; |
michael@0 | 999 | for (size_t i = 0, e = numOperands(); i < e; i++) { |
michael@0 | 1000 | if (getOperand(i)->block()->unreachable()) { |
michael@0 | 1001 | IonSpew(IonSpew_Range, "Ignoring unreachable input %d", getOperand(i)->id()); |
michael@0 | 1002 | continue; |
michael@0 | 1003 | } |
michael@0 | 1004 | |
michael@0 | 1005 | // Peek at the pre-bailout range so we can take a short-cut; if any of |
michael@0 | 1006 | // the operands has an unknown range, this phi has an unknown range. |
michael@0 | 1007 | if (!getOperand(i)->range()) |
michael@0 | 1008 | return; |
michael@0 | 1009 | |
michael@0 | 1010 | Range input(getOperand(i)); |
michael@0 | 1011 | |
michael@0 | 1012 | if (range) |
michael@0 | 1013 | range->unionWith(&input); |
michael@0 | 1014 | else |
michael@0 | 1015 | range = new(alloc) Range(input); |
michael@0 | 1016 | } |
michael@0 | 1017 | |
michael@0 | 1018 | setRange(range); |
michael@0 | 1019 | } |
michael@0 | 1020 | |
michael@0 | 1021 | void |
michael@0 | 1022 | MBeta::computeRange(TempAllocator &alloc) |
michael@0 | 1023 | { |
michael@0 | 1024 | bool emptyRange = false; |
michael@0 | 1025 | |
michael@0 | 1026 | Range opRange(getOperand(0)); |
michael@0 | 1027 | Range *range = Range::intersect(alloc, &opRange, comparison_, &emptyRange); |
michael@0 | 1028 | if (emptyRange) { |
michael@0 | 1029 | IonSpew(IonSpew_Range, "Marking block for inst %d unreachable", id()); |
michael@0 | 1030 | block()->setUnreachable(); |
michael@0 | 1031 | } else { |
michael@0 | 1032 | setRange(range); |
michael@0 | 1033 | } |
michael@0 | 1034 | } |
michael@0 | 1035 | |
michael@0 | 1036 | void |
michael@0 | 1037 | MConstant::computeRange(TempAllocator &alloc) |
michael@0 | 1038 | { |
michael@0 | 1039 | if (value().isNumber()) { |
michael@0 | 1040 | double d = value().toNumber(); |
michael@0 | 1041 | setRange(Range::NewDoubleRange(alloc, d, d)); |
michael@0 | 1042 | } else if (value().isBoolean()) { |
michael@0 | 1043 | bool b = value().toBoolean(); |
michael@0 | 1044 | setRange(Range::NewInt32Range(alloc, b, b)); |
michael@0 | 1045 | } |
michael@0 | 1046 | } |
michael@0 | 1047 | |
michael@0 | 1048 | void |
michael@0 | 1049 | MCharCodeAt::computeRange(TempAllocator &alloc) |
michael@0 | 1050 | { |
michael@0 | 1051 | // ECMA 262 says that the integer will be non-negative and at most 65535. |
michael@0 | 1052 | setRange(Range::NewInt32Range(alloc, 0, 65535)); |
michael@0 | 1053 | } |
michael@0 | 1054 | |
michael@0 | 1055 | void |
michael@0 | 1056 | MClampToUint8::computeRange(TempAllocator &alloc) |
michael@0 | 1057 | { |
michael@0 | 1058 | setRange(Range::NewUInt32Range(alloc, 0, 255)); |
michael@0 | 1059 | } |
michael@0 | 1060 | |
michael@0 | 1061 | void |
michael@0 | 1062 | MBitAnd::computeRange(TempAllocator &alloc) |
michael@0 | 1063 | { |
michael@0 | 1064 | Range left(getOperand(0)); |
michael@0 | 1065 | Range right(getOperand(1)); |
michael@0 | 1066 | left.wrapAroundToInt32(); |
michael@0 | 1067 | right.wrapAroundToInt32(); |
michael@0 | 1068 | |
michael@0 | 1069 | setRange(Range::and_(alloc, &left, &right)); |
michael@0 | 1070 | } |
michael@0 | 1071 | |
michael@0 | 1072 | void |
michael@0 | 1073 | MBitOr::computeRange(TempAllocator &alloc) |
michael@0 | 1074 | { |
michael@0 | 1075 | Range left(getOperand(0)); |
michael@0 | 1076 | Range right(getOperand(1)); |
michael@0 | 1077 | left.wrapAroundToInt32(); |
michael@0 | 1078 | right.wrapAroundToInt32(); |
michael@0 | 1079 | |
michael@0 | 1080 | setRange(Range::or_(alloc, &left, &right)); |
michael@0 | 1081 | } |
michael@0 | 1082 | |
michael@0 | 1083 | void |
michael@0 | 1084 | MBitXor::computeRange(TempAllocator &alloc) |
michael@0 | 1085 | { |
michael@0 | 1086 | Range left(getOperand(0)); |
michael@0 | 1087 | Range right(getOperand(1)); |
michael@0 | 1088 | left.wrapAroundToInt32(); |
michael@0 | 1089 | right.wrapAroundToInt32(); |
michael@0 | 1090 | |
michael@0 | 1091 | setRange(Range::xor_(alloc, &left, &right)); |
michael@0 | 1092 | } |
michael@0 | 1093 | |
michael@0 | 1094 | void |
michael@0 | 1095 | MBitNot::computeRange(TempAllocator &alloc) |
michael@0 | 1096 | { |
michael@0 | 1097 | Range op(getOperand(0)); |
michael@0 | 1098 | op.wrapAroundToInt32(); |
michael@0 | 1099 | |
michael@0 | 1100 | setRange(Range::not_(alloc, &op)); |
michael@0 | 1101 | } |
michael@0 | 1102 | |
michael@0 | 1103 | void |
michael@0 | 1104 | MLsh::computeRange(TempAllocator &alloc) |
michael@0 | 1105 | { |
michael@0 | 1106 | Range left(getOperand(0)); |
michael@0 | 1107 | Range right(getOperand(1)); |
michael@0 | 1108 | left.wrapAroundToInt32(); |
michael@0 | 1109 | |
michael@0 | 1110 | MDefinition *rhs = getOperand(1); |
michael@0 | 1111 | if (!rhs->isConstant()) { |
michael@0 | 1112 | right.wrapAroundToShiftCount(); |
michael@0 | 1113 | setRange(Range::lsh(alloc, &left, &right)); |
michael@0 | 1114 | return; |
michael@0 | 1115 | } |
michael@0 | 1116 | |
michael@0 | 1117 | int32_t c = rhs->toConstant()->value().toInt32(); |
michael@0 | 1118 | setRange(Range::lsh(alloc, &left, c)); |
michael@0 | 1119 | } |
michael@0 | 1120 | |
michael@0 | 1121 | void |
michael@0 | 1122 | MRsh::computeRange(TempAllocator &alloc) |
michael@0 | 1123 | { |
michael@0 | 1124 | Range left(getOperand(0)); |
michael@0 | 1125 | Range right(getOperand(1)); |
michael@0 | 1126 | left.wrapAroundToInt32(); |
michael@0 | 1127 | |
michael@0 | 1128 | MDefinition *rhs = getOperand(1); |
michael@0 | 1129 | if (!rhs->isConstant()) { |
michael@0 | 1130 | right.wrapAroundToShiftCount(); |
michael@0 | 1131 | setRange(Range::rsh(alloc, &left, &right)); |
michael@0 | 1132 | return; |
michael@0 | 1133 | } |
michael@0 | 1134 | |
michael@0 | 1135 | int32_t c = rhs->toConstant()->value().toInt32(); |
michael@0 | 1136 | setRange(Range::rsh(alloc, &left, c)); |
michael@0 | 1137 | } |
michael@0 | 1138 | |
michael@0 | 1139 | void |
michael@0 | 1140 | MUrsh::computeRange(TempAllocator &alloc) |
michael@0 | 1141 | { |
michael@0 | 1142 | Range left(getOperand(0)); |
michael@0 | 1143 | Range right(getOperand(1)); |
michael@0 | 1144 | |
michael@0 | 1145 | // ursh can be thought of as converting its left operand to uint32, or it |
michael@0 | 1146 | // can be thought of as converting its left operand to int32, and then |
michael@0 | 1147 | // reinterpreting the int32 bits as a uint32 value. Both approaches yield |
michael@0 | 1148 | // the same result. Since we lack support for full uint32 ranges, we use |
michael@0 | 1149 | // the second interpretation, though it does cause us to be conservative. |
michael@0 | 1150 | left.wrapAroundToInt32(); |
michael@0 | 1151 | right.wrapAroundToShiftCount(); |
michael@0 | 1152 | |
michael@0 | 1153 | MDefinition *rhs = getOperand(1); |
michael@0 | 1154 | if (!rhs->isConstant()) { |
michael@0 | 1155 | setRange(Range::ursh(alloc, &left, &right)); |
michael@0 | 1156 | } else { |
michael@0 | 1157 | int32_t c = rhs->toConstant()->value().toInt32(); |
michael@0 | 1158 | setRange(Range::ursh(alloc, &left, c)); |
michael@0 | 1159 | } |
michael@0 | 1160 | |
michael@0 | 1161 | JS_ASSERT(range()->lower() >= 0); |
michael@0 | 1162 | } |
michael@0 | 1163 | |
michael@0 | 1164 | void |
michael@0 | 1165 | MAbs::computeRange(TempAllocator &alloc) |
michael@0 | 1166 | { |
michael@0 | 1167 | if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double) |
michael@0 | 1168 | return; |
michael@0 | 1169 | |
michael@0 | 1170 | Range other(getOperand(0)); |
michael@0 | 1171 | Range *next = Range::abs(alloc, &other); |
michael@0 | 1172 | if (implicitTruncate_) |
michael@0 | 1173 | next->wrapAroundToInt32(); |
michael@0 | 1174 | setRange(next); |
michael@0 | 1175 | } |
michael@0 | 1176 | |
michael@0 | 1177 | void |
michael@0 | 1178 | MMinMax::computeRange(TempAllocator &alloc) |
michael@0 | 1179 | { |
michael@0 | 1180 | if (specialization_ != MIRType_Int32 && specialization_ != MIRType_Double) |
michael@0 | 1181 | return; |
michael@0 | 1182 | |
michael@0 | 1183 | Range left(getOperand(0)); |
michael@0 | 1184 | Range right(getOperand(1)); |
michael@0 | 1185 | setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right)); |
michael@0 | 1186 | } |
michael@0 | 1187 | |
michael@0 | 1188 | void |
michael@0 | 1189 | MAdd::computeRange(TempAllocator &alloc) |
michael@0 | 1190 | { |
michael@0 | 1191 | if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) |
michael@0 | 1192 | return; |
michael@0 | 1193 | Range left(getOperand(0)); |
michael@0 | 1194 | Range right(getOperand(1)); |
michael@0 | 1195 | Range *next = Range::add(alloc, &left, &right); |
michael@0 | 1196 | if (isTruncated()) |
michael@0 | 1197 | next->wrapAroundToInt32(); |
michael@0 | 1198 | setRange(next); |
michael@0 | 1199 | } |
michael@0 | 1200 | |
michael@0 | 1201 | void |
michael@0 | 1202 | MSub::computeRange(TempAllocator &alloc) |
michael@0 | 1203 | { |
michael@0 | 1204 | if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) |
michael@0 | 1205 | return; |
michael@0 | 1206 | Range left(getOperand(0)); |
michael@0 | 1207 | Range right(getOperand(1)); |
michael@0 | 1208 | Range *next = Range::sub(alloc, &left, &right); |
michael@0 | 1209 | if (isTruncated()) |
michael@0 | 1210 | next->wrapAroundToInt32(); |
michael@0 | 1211 | setRange(next); |
michael@0 | 1212 | } |
michael@0 | 1213 | |
michael@0 | 1214 | void |
michael@0 | 1215 | MMul::computeRange(TempAllocator &alloc) |
michael@0 | 1216 | { |
michael@0 | 1217 | if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) |
michael@0 | 1218 | return; |
michael@0 | 1219 | Range left(getOperand(0)); |
michael@0 | 1220 | Range right(getOperand(1)); |
michael@0 | 1221 | if (canBeNegativeZero()) |
michael@0 | 1222 | canBeNegativeZero_ = Range::negativeZeroMul(&left, &right); |
michael@0 | 1223 | Range *next = Range::mul(alloc, &left, &right); |
michael@0 | 1224 | // Truncated multiplications could overflow in both directions |
michael@0 | 1225 | if (isTruncated()) |
michael@0 | 1226 | next->wrapAroundToInt32(); |
michael@0 | 1227 | setRange(next); |
michael@0 | 1228 | } |
michael@0 | 1229 | |
michael@0 | 1230 | void |
michael@0 | 1231 | MMod::computeRange(TempAllocator &alloc) |
michael@0 | 1232 | { |
michael@0 | 1233 | if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) |
michael@0 | 1234 | return; |
michael@0 | 1235 | Range lhs(getOperand(0)); |
michael@0 | 1236 | Range rhs(getOperand(1)); |
michael@0 | 1237 | |
michael@0 | 1238 | // If either operand is a NaN, the result is NaN. This also conservatively |
michael@0 | 1239 | // handles Infinity cases. |
michael@0 | 1240 | if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) |
michael@0 | 1241 | return; |
michael@0 | 1242 | |
michael@0 | 1243 | // If RHS can be zero, the result can be NaN. |
michael@0 | 1244 | if (rhs.lower() <= 0 && rhs.upper() >= 0) |
michael@0 | 1245 | return; |
michael@0 | 1246 | |
michael@0 | 1247 | // If both operands are non-negative integers, we can optimize this to an |
michael@0 | 1248 | // unsigned mod. |
michael@0 | 1249 | if (specialization() == MIRType_Int32 && lhs.lower() >= 0 && rhs.lower() > 0 && |
michael@0 | 1250 | !lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) |
michael@0 | 1251 | { |
michael@0 | 1252 | unsigned_ = true; |
michael@0 | 1253 | } |
michael@0 | 1254 | |
michael@0 | 1255 | // For unsigned mod, we have to convert both operands to unsigned. |
michael@0 | 1256 | // Note that we handled the case of a zero rhs above. |
michael@0 | 1257 | if (unsigned_) { |
michael@0 | 1258 | // The result of an unsigned mod will never be unsigned-greater than |
michael@0 | 1259 | // either operand. |
michael@0 | 1260 | uint32_t lhsBound = Max<uint32_t>(lhs.lower(), lhs.upper()); |
michael@0 | 1261 | uint32_t rhsBound = Max<uint32_t>(rhs.lower(), rhs.upper()); |
michael@0 | 1262 | |
michael@0 | 1263 | // If either range crosses through -1 as a signed value, it could be |
michael@0 | 1264 | // the maximum unsigned value when interpreted as unsigned. If the range |
michael@0 | 1265 | // doesn't include -1, then the simple max value we computed above is |
michael@0 | 1266 | // correct. |
michael@0 | 1267 | if (lhs.lower() <= -1 && lhs.upper() >= -1) |
michael@0 | 1268 | lhsBound = UINT32_MAX; |
michael@0 | 1269 | if (rhs.lower() <= -1 && rhs.upper() >= -1) |
michael@0 | 1270 | rhsBound = UINT32_MAX; |
michael@0 | 1271 | |
michael@0 | 1272 | // The result will never be equal to the rhs, and we shouldn't have |
michael@0 | 1273 | // any rounding to worry about. |
michael@0 | 1274 | JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()); |
michael@0 | 1275 | --rhsBound; |
michael@0 | 1276 | |
michael@0 | 1277 | // This gives us two upper bounds, so we can take the best one. |
michael@0 | 1278 | setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound))); |
michael@0 | 1279 | return; |
michael@0 | 1280 | } |
michael@0 | 1281 | |
michael@0 | 1282 | // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs). |
michael@0 | 1283 | // First, the absolute value of the result will always be less than the |
michael@0 | 1284 | // absolute value of rhs. (And if rhs is zero, the result is NaN). |
michael@0 | 1285 | int64_t a = Abs<int64_t>(rhs.lower()); |
michael@0 | 1286 | int64_t b = Abs<int64_t>(rhs.upper()); |
michael@0 | 1287 | if (a == 0 && b == 0) |
michael@0 | 1288 | return; |
michael@0 | 1289 | int64_t rhsAbsBound = Max(a, b); |
michael@0 | 1290 | |
michael@0 | 1291 | // If the value is known to be integer, less-than abs(rhs) is equivalent |
michael@0 | 1292 | // to less-than-or-equal abs(rhs)-1. This is important for being able to |
michael@0 | 1293 | // say that the result of x%256 is an 8-bit unsigned number. |
michael@0 | 1294 | if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) |
michael@0 | 1295 | --rhsAbsBound; |
michael@0 | 1296 | |
michael@0 | 1297 | // Next, the absolute value of the result will never be greater than the |
michael@0 | 1298 | // absolute value of lhs. |
michael@0 | 1299 | int64_t lhsAbsBound = Max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper())); |
michael@0 | 1300 | |
michael@0 | 1301 | // This gives us two upper bounds, so we can take the best one. |
michael@0 | 1302 | int64_t absBound = Min(lhsAbsBound, rhsAbsBound); |
michael@0 | 1303 | |
michael@0 | 1304 | // Now consider the sign of the result. |
michael@0 | 1305 | // If lhs is non-negative, the result will be non-negative. |
michael@0 | 1306 | // If lhs is non-positive, the result will be non-positive. |
michael@0 | 1307 | int64_t lower = lhs.lower() >= 0 ? 0 : -absBound; |
michael@0 | 1308 | int64_t upper = lhs.upper() <= 0 ? 0 : absBound; |
michael@0 | 1309 | |
michael@0 | 1310 | setRange(new(alloc) Range(lower, upper, lhs.canHaveFractionalPart() || rhs.canHaveFractionalPart(), |
michael@0 | 1311 | Min(lhs.exponent(), rhs.exponent()))); |
michael@0 | 1312 | } |
michael@0 | 1313 | |
michael@0 | 1314 | void |
michael@0 | 1315 | MDiv::computeRange(TempAllocator &alloc) |
michael@0 | 1316 | { |
michael@0 | 1317 | if (specialization() != MIRType_Int32 && specialization() != MIRType_Double) |
michael@0 | 1318 | return; |
michael@0 | 1319 | Range lhs(getOperand(0)); |
michael@0 | 1320 | Range rhs(getOperand(1)); |
michael@0 | 1321 | |
michael@0 | 1322 | // If either operand is a NaN, the result is NaN. This also conservatively |
michael@0 | 1323 | // handles Infinity cases. |
michael@0 | 1324 | if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) |
michael@0 | 1325 | return; |
michael@0 | 1326 | |
michael@0 | 1327 | // Something simple for now: When dividing by a positive rhs, the result |
michael@0 | 1328 | // won't be further from zero than lhs. |
michael@0 | 1329 | if (lhs.lower() >= 0 && rhs.lower() >= 1) { |
michael@0 | 1330 | setRange(new(alloc) Range(0, lhs.upper(), true, lhs.exponent())); |
michael@0 | 1331 | } else if (unsigned_ && rhs.lower() >= 1) { |
michael@0 | 1332 | // We shouldn't set the unsigned flag if the inputs can have |
michael@0 | 1333 | // fractional parts. |
michael@0 | 1334 | JS_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()); |
michael@0 | 1335 | // Unsigned division by a non-zero rhs will return a uint32 value. |
michael@0 | 1336 | setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX)); |
michael@0 | 1337 | } |
michael@0 | 1338 | } |
michael@0 | 1339 | |
michael@0 | 1340 | void |
michael@0 | 1341 | MSqrt::computeRange(TempAllocator &alloc) |
michael@0 | 1342 | { |
michael@0 | 1343 | Range input(getOperand(0)); |
michael@0 | 1344 | |
michael@0 | 1345 | // If either operand is a NaN, the result is NaN. This also conservatively |
michael@0 | 1346 | // handles Infinity cases. |
michael@0 | 1347 | if (!input.hasInt32Bounds()) |
michael@0 | 1348 | return; |
michael@0 | 1349 | |
michael@0 | 1350 | // Sqrt of a negative non-zero value is NaN. |
michael@0 | 1351 | if (input.lower() < 0) |
michael@0 | 1352 | return; |
michael@0 | 1353 | |
michael@0 | 1354 | // Something simple for now: When taking the sqrt of a positive value, the |
michael@0 | 1355 | // result won't be further from zero than the input. |
michael@0 | 1356 | setRange(new(alloc) Range(0, input.upper(), true, input.exponent())); |
michael@0 | 1357 | } |
michael@0 | 1358 | |
michael@0 | 1359 | void |
michael@0 | 1360 | MToDouble::computeRange(TempAllocator &alloc) |
michael@0 | 1361 | { |
michael@0 | 1362 | setRange(new(alloc) Range(getOperand(0))); |
michael@0 | 1363 | } |
michael@0 | 1364 | |
michael@0 | 1365 | void |
michael@0 | 1366 | MToFloat32::computeRange(TempAllocator &alloc) |
michael@0 | 1367 | { |
michael@0 | 1368 | } |
michael@0 | 1369 | |
michael@0 | 1370 | void |
michael@0 | 1371 | MTruncateToInt32::computeRange(TempAllocator &alloc) |
michael@0 | 1372 | { |
michael@0 | 1373 | Range *output = new(alloc) Range(getOperand(0)); |
michael@0 | 1374 | output->wrapAroundToInt32(); |
michael@0 | 1375 | setRange(output); |
michael@0 | 1376 | } |
michael@0 | 1377 | |
michael@0 | 1378 | void |
michael@0 | 1379 | MToInt32::computeRange(TempAllocator &alloc) |
michael@0 | 1380 | { |
michael@0 | 1381 | Range *output = new(alloc) Range(getOperand(0)); |
michael@0 | 1382 | output->clampToInt32(); |
michael@0 | 1383 | setRange(output); |
michael@0 | 1384 | } |
michael@0 | 1385 | |
michael@0 | 1386 | static Range *GetTypedArrayRange(TempAllocator &alloc, int type) |
michael@0 | 1387 | { |
michael@0 | 1388 | switch (type) { |
michael@0 | 1389 | case ScalarTypeDescr::TYPE_UINT8_CLAMPED: |
michael@0 | 1390 | case ScalarTypeDescr::TYPE_UINT8: |
michael@0 | 1391 | return Range::NewUInt32Range(alloc, 0, UINT8_MAX); |
michael@0 | 1392 | case ScalarTypeDescr::TYPE_UINT16: |
michael@0 | 1393 | return Range::NewUInt32Range(alloc, 0, UINT16_MAX); |
michael@0 | 1394 | case ScalarTypeDescr::TYPE_UINT32: |
michael@0 | 1395 | return Range::NewUInt32Range(alloc, 0, UINT32_MAX); |
michael@0 | 1396 | |
michael@0 | 1397 | case ScalarTypeDescr::TYPE_INT8: |
michael@0 | 1398 | return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX); |
michael@0 | 1399 | case ScalarTypeDescr::TYPE_INT16: |
michael@0 | 1400 | return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX); |
michael@0 | 1401 | case ScalarTypeDescr::TYPE_INT32: |
michael@0 | 1402 | return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX); |
michael@0 | 1403 | |
michael@0 | 1404 | case ScalarTypeDescr::TYPE_FLOAT32: |
michael@0 | 1405 | case ScalarTypeDescr::TYPE_FLOAT64: |
michael@0 | 1406 | break; |
michael@0 | 1407 | } |
michael@0 | 1408 | |
michael@0 | 1409 | return nullptr; |
michael@0 | 1410 | } |
michael@0 | 1411 | |
michael@0 | 1412 | void |
michael@0 | 1413 | MLoadTypedArrayElement::computeRange(TempAllocator &alloc) |
michael@0 | 1414 | { |
michael@0 | 1415 | // We have an Int32 type and if this is a UInt32 load it may produce a value |
michael@0 | 1416 | // outside of our range, but we have a bailout to handle those cases. |
michael@0 | 1417 | setRange(GetTypedArrayRange(alloc, arrayType())); |
michael@0 | 1418 | } |
michael@0 | 1419 | |
michael@0 | 1420 | void |
michael@0 | 1421 | MLoadTypedArrayElementStatic::computeRange(TempAllocator &alloc) |
michael@0 | 1422 | { |
michael@0 | 1423 | // We don't currently use MLoadTypedArrayElementStatic for uint32, so we |
michael@0 | 1424 | // don't have to worry about it returning a value outside our type. |
michael@0 | 1425 | JS_ASSERT(typedArray_->type() != ScalarTypeDescr::TYPE_UINT32); |
michael@0 | 1426 | |
michael@0 | 1427 | setRange(GetTypedArrayRange(alloc, typedArray_->type())); |
michael@0 | 1428 | } |
michael@0 | 1429 | |
michael@0 | 1430 | void |
michael@0 | 1431 | MArrayLength::computeRange(TempAllocator &alloc) |
michael@0 | 1432 | { |
michael@0 | 1433 | // Array lengths can go up to UINT32_MAX, but we only create MArrayLength |
michael@0 | 1434 | // nodes when the value is known to be int32 (see the |
michael@0 | 1435 | // OBJECT_FLAG_LENGTH_OVERFLOW flag). |
michael@0 | 1436 | setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX)); |
michael@0 | 1437 | } |
michael@0 | 1438 | |
michael@0 | 1439 | void |
michael@0 | 1440 | MInitializedLength::computeRange(TempAllocator &alloc) |
michael@0 | 1441 | { |
michael@0 | 1442 | setRange(Range::NewUInt32Range(alloc, 0, JSObject::NELEMENTS_LIMIT)); |
michael@0 | 1443 | } |
michael@0 | 1444 | |
michael@0 | 1445 | void |
michael@0 | 1446 | MTypedArrayLength::computeRange(TempAllocator &alloc) |
michael@0 | 1447 | { |
michael@0 | 1448 | setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX)); |
michael@0 | 1449 | } |
michael@0 | 1450 | |
michael@0 | 1451 | void |
michael@0 | 1452 | MStringLength::computeRange(TempAllocator &alloc) |
michael@0 | 1453 | { |
michael@0 | 1454 | static_assert(JSString::MAX_LENGTH <= UINT32_MAX, |
michael@0 | 1455 | "NewUInt32Range requires a uint32 value"); |
michael@0 | 1456 | setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH)); |
michael@0 | 1457 | } |
michael@0 | 1458 | |
michael@0 | 1459 | void |
michael@0 | 1460 | MArgumentsLength::computeRange(TempAllocator &alloc) |
michael@0 | 1461 | { |
michael@0 | 1462 | // This is is a conservative upper bound on what |TooManyArguments| checks. |
michael@0 | 1463 | // If exceeded, Ion will not be entered in the first place. |
michael@0 | 1464 | static_assert(SNAPSHOT_MAX_NARGS <= UINT32_MAX, |
michael@0 | 1465 | "NewUInt32Range requires a uint32 value"); |
michael@0 | 1466 | setRange(Range::NewUInt32Range(alloc, 0, SNAPSHOT_MAX_NARGS)); |
michael@0 | 1467 | } |
michael@0 | 1468 | |
michael@0 | 1469 | void |
michael@0 | 1470 | MBoundsCheck::computeRange(TempAllocator &alloc) |
michael@0 | 1471 | { |
michael@0 | 1472 | // Just transfer the incoming index range to the output. The length() is |
michael@0 | 1473 | // also interesting, but it is handled as a bailout check, and we're |
michael@0 | 1474 | // computing a pre-bailout range here. |
michael@0 | 1475 | setRange(new(alloc) Range(index())); |
michael@0 | 1476 | } |
michael@0 | 1477 | |
michael@0 | 1478 | void |
michael@0 | 1479 | MArrayPush::computeRange(TempAllocator &alloc) |
michael@0 | 1480 | { |
michael@0 | 1481 | // MArrayPush returns the new array length. |
michael@0 | 1482 | setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX)); |
michael@0 | 1483 | } |
michael@0 | 1484 | |
michael@0 | 1485 | void |
michael@0 | 1486 | MMathFunction::computeRange(TempAllocator &alloc) |
michael@0 | 1487 | { |
michael@0 | 1488 | Range opRange(getOperand(0)); |
michael@0 | 1489 | switch (function()) { |
michael@0 | 1490 | case Sin: |
michael@0 | 1491 | case Cos: |
michael@0 | 1492 | if (!opRange.canBeInfiniteOrNaN()) |
michael@0 | 1493 | setRange(Range::NewDoubleRange(alloc, -1.0, 1.0)); |
michael@0 | 1494 | break; |
michael@0 | 1495 | case Sign: |
michael@0 | 1496 | if (!opRange.canBeNaN()) { |
michael@0 | 1497 | // Note that Math.sign(-0) is -0, and we treat -0 as equal to 0. |
michael@0 | 1498 | int32_t lower = -1; |
michael@0 | 1499 | int32_t upper = 1; |
michael@0 | 1500 | if (opRange.hasInt32LowerBound() && opRange.lower() >= 0) |
michael@0 | 1501 | lower = 0; |
michael@0 | 1502 | if (opRange.hasInt32UpperBound() && opRange.upper() <= 0) |
michael@0 | 1503 | upper = 0; |
michael@0 | 1504 | setRange(Range::NewInt32Range(alloc, lower, upper)); |
michael@0 | 1505 | } |
michael@0 | 1506 | break; |
michael@0 | 1507 | default: |
michael@0 | 1508 | break; |
michael@0 | 1509 | } |
michael@0 | 1510 | } |
michael@0 | 1511 | |
michael@0 | 1512 | void |
michael@0 | 1513 | MRandom::computeRange(TempAllocator &alloc) |
michael@0 | 1514 | { |
michael@0 | 1515 | setRange(Range::NewDoubleRange(alloc, 0.0, 1.0)); |
michael@0 | 1516 | } |
michael@0 | 1517 | |
michael@0 | 1518 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 1519 | // Range Analysis |
michael@0 | 1520 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 1521 | |
michael@0 | 1522 | bool |
michael@0 | 1523 | RangeAnalysis::markBlocksInLoopBody(MBasicBlock *header, MBasicBlock *backedge) |
michael@0 | 1524 | { |
michael@0 | 1525 | Vector<MBasicBlock *, 16, IonAllocPolicy> worklist(alloc()); |
michael@0 | 1526 | |
michael@0 | 1527 | // Mark the header as being in the loop. This terminates the walk. |
michael@0 | 1528 | header->mark(); |
michael@0 | 1529 | |
michael@0 | 1530 | backedge->mark(); |
michael@0 | 1531 | if (!worklist.append(backedge)) |
michael@0 | 1532 | return false; |
michael@0 | 1533 | |
michael@0 | 1534 | // If we haven't reached the loop header yet, walk up the predecessors |
michael@0 | 1535 | // we haven't seen already. |
michael@0 | 1536 | while (!worklist.empty()) { |
michael@0 | 1537 | MBasicBlock *current = worklist.popCopy(); |
michael@0 | 1538 | for (size_t i = 0; i < current->numPredecessors(); i++) { |
michael@0 | 1539 | MBasicBlock *pred = current->getPredecessor(i); |
michael@0 | 1540 | |
michael@0 | 1541 | if (pred->isMarked()) |
michael@0 | 1542 | continue; |
michael@0 | 1543 | |
michael@0 | 1544 | pred->mark(); |
michael@0 | 1545 | if (!worklist.append(pred)) |
michael@0 | 1546 | return false; |
michael@0 | 1547 | } |
michael@0 | 1548 | } |
michael@0 | 1549 | |
michael@0 | 1550 | return true; |
michael@0 | 1551 | } |
michael@0 | 1552 | |
michael@0 | 1553 | bool |
michael@0 | 1554 | RangeAnalysis::analyzeLoop(MBasicBlock *header) |
michael@0 | 1555 | { |
michael@0 | 1556 | JS_ASSERT(header->hasUniqueBackedge()); |
michael@0 | 1557 | |
michael@0 | 1558 | // Try to compute an upper bound on the number of times the loop backedge |
michael@0 | 1559 | // will be taken. Look for tests that dominate the backedge and which have |
michael@0 | 1560 | // an edge leaving the loop body. |
michael@0 | 1561 | MBasicBlock *backedge = header->backedge(); |
michael@0 | 1562 | |
michael@0 | 1563 | // Ignore trivial infinite loops. |
michael@0 | 1564 | if (backedge == header) |
michael@0 | 1565 | return true; |
michael@0 | 1566 | |
michael@0 | 1567 | if (!markBlocksInLoopBody(header, backedge)) |
michael@0 | 1568 | return false; |
michael@0 | 1569 | |
michael@0 | 1570 | LoopIterationBound *iterationBound = nullptr; |
michael@0 | 1571 | |
michael@0 | 1572 | MBasicBlock *block = backedge; |
michael@0 | 1573 | do { |
michael@0 | 1574 | BranchDirection direction; |
michael@0 | 1575 | MTest *branch = block->immediateDominatorBranch(&direction); |
michael@0 | 1576 | |
michael@0 | 1577 | if (block == block->immediateDominator()) |
michael@0 | 1578 | break; |
michael@0 | 1579 | |
michael@0 | 1580 | block = block->immediateDominator(); |
michael@0 | 1581 | |
michael@0 | 1582 | if (branch) { |
michael@0 | 1583 | direction = NegateBranchDirection(direction); |
michael@0 | 1584 | MBasicBlock *otherBlock = branch->branchSuccessor(direction); |
michael@0 | 1585 | if (!otherBlock->isMarked()) { |
michael@0 | 1586 | iterationBound = |
michael@0 | 1587 | analyzeLoopIterationCount(header, branch, direction); |
michael@0 | 1588 | if (iterationBound) |
michael@0 | 1589 | break; |
michael@0 | 1590 | } |
michael@0 | 1591 | } |
michael@0 | 1592 | } while (block != header); |
michael@0 | 1593 | |
michael@0 | 1594 | if (!iterationBound) { |
michael@0 | 1595 | graph_.unmarkBlocks(); |
michael@0 | 1596 | return true; |
michael@0 | 1597 | } |
michael@0 | 1598 | |
michael@0 | 1599 | #ifdef DEBUG |
michael@0 | 1600 | if (IonSpewEnabled(IonSpew_Range)) { |
michael@0 | 1601 | Sprinter sp(GetIonContext()->cx); |
michael@0 | 1602 | sp.init(); |
michael@0 | 1603 | iterationBound->sum.print(sp); |
michael@0 | 1604 | IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s", |
michael@0 | 1605 | sp.string()); |
michael@0 | 1606 | } |
michael@0 | 1607 | #endif |
michael@0 | 1608 | |
michael@0 | 1609 | // Try to compute symbolic bounds for the phi nodes at the head of this |
michael@0 | 1610 | // loop, expressed in terms of the iteration bound just computed. |
michael@0 | 1611 | |
michael@0 | 1612 | for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) |
michael@0 | 1613 | analyzeLoopPhi(header, iterationBound, *iter); |
michael@0 | 1614 | |
michael@0 | 1615 | if (!mir->compilingAsmJS()) { |
michael@0 | 1616 | // Try to hoist any bounds checks from the loop using symbolic bounds. |
michael@0 | 1617 | |
michael@0 | 1618 | Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks(alloc()); |
michael@0 | 1619 | |
michael@0 | 1620 | for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) { |
michael@0 | 1621 | MBasicBlock *block = *iter; |
michael@0 | 1622 | if (!block->isMarked()) |
michael@0 | 1623 | continue; |
michael@0 | 1624 | |
michael@0 | 1625 | for (MDefinitionIterator iter(block); iter; iter++) { |
michael@0 | 1626 | MDefinition *def = *iter; |
michael@0 | 1627 | if (def->isBoundsCheck() && def->isMovable()) { |
michael@0 | 1628 | if (tryHoistBoundsCheck(header, def->toBoundsCheck())) { |
michael@0 | 1629 | if (!hoistedChecks.append(def->toBoundsCheck())) |
michael@0 | 1630 | return false; |
michael@0 | 1631 | } |
michael@0 | 1632 | } |
michael@0 | 1633 | } |
michael@0 | 1634 | } |
michael@0 | 1635 | |
michael@0 | 1636 | // Note: replace all uses of the original bounds check with the |
michael@0 | 1637 | // actual index. This is usually done during bounds check elimination, |
michael@0 | 1638 | // but in this case it's safe to do it here since the load/store is |
michael@0 | 1639 | // definitely not loop-invariant, so we will never move it before |
michael@0 | 1640 | // one of the bounds checks we just added. |
michael@0 | 1641 | for (size_t i = 0; i < hoistedChecks.length(); i++) { |
michael@0 | 1642 | MBoundsCheck *ins = hoistedChecks[i]; |
michael@0 | 1643 | ins->replaceAllUsesWith(ins->index()); |
michael@0 | 1644 | ins->block()->discard(ins); |
michael@0 | 1645 | } |
michael@0 | 1646 | } |
michael@0 | 1647 | |
michael@0 | 1648 | graph_.unmarkBlocks(); |
michael@0 | 1649 | return true; |
michael@0 | 1650 | } |
michael@0 | 1651 | |
michael@0 | 1652 | LoopIterationBound * |
michael@0 | 1653 | RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header, |
michael@0 | 1654 | MTest *test, BranchDirection direction) |
michael@0 | 1655 | { |
michael@0 | 1656 | SimpleLinearSum lhs(nullptr, 0); |
michael@0 | 1657 | MDefinition *rhs; |
michael@0 | 1658 | bool lessEqual; |
michael@0 | 1659 | if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) |
michael@0 | 1660 | return nullptr; |
michael@0 | 1661 | |
michael@0 | 1662 | // Ensure the rhs is a loop invariant term. |
michael@0 | 1663 | if (rhs && rhs->block()->isMarked()) { |
michael@0 | 1664 | if (lhs.term && lhs.term->block()->isMarked()) |
michael@0 | 1665 | return nullptr; |
michael@0 | 1666 | MDefinition *temp = lhs.term; |
michael@0 | 1667 | lhs.term = rhs; |
michael@0 | 1668 | rhs = temp; |
michael@0 | 1669 | if (!SafeSub(0, lhs.constant, &lhs.constant)) |
michael@0 | 1670 | return nullptr; |
michael@0 | 1671 | lessEqual = !lessEqual; |
michael@0 | 1672 | } |
michael@0 | 1673 | |
michael@0 | 1674 | JS_ASSERT_IF(rhs, !rhs->block()->isMarked()); |
michael@0 | 1675 | |
michael@0 | 1676 | // Ensure the lhs is a phi node from the start of the loop body. |
michael@0 | 1677 | if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) |
michael@0 | 1678 | return nullptr; |
michael@0 | 1679 | |
michael@0 | 1680 | // Check that the value of the lhs changes by a constant amount with each |
michael@0 | 1681 | // loop iteration. This requires that the lhs be written in every loop |
michael@0 | 1682 | // iteration with a value that is a constant difference from its value at |
michael@0 | 1683 | // the start of the iteration. |
michael@0 | 1684 | |
michael@0 | 1685 | if (lhs.term->toPhi()->numOperands() != 2) |
michael@0 | 1686 | return nullptr; |
michael@0 | 1687 | |
michael@0 | 1688 | // The first operand of the phi should be the lhs' value at the start of |
michael@0 | 1689 | // the first executed iteration, and not a value written which could |
michael@0 | 1690 | // replace the second operand below during the middle of execution. |
michael@0 | 1691 | MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0); |
michael@0 | 1692 | if (lhsInitial->block()->isMarked()) |
michael@0 | 1693 | return nullptr; |
michael@0 | 1694 | |
michael@0 | 1695 | // The second operand of the phi should be a value written by an add/sub |
michael@0 | 1696 | // in every loop iteration, i.e. in a block which dominates the backedge. |
michael@0 | 1697 | MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1); |
michael@0 | 1698 | if (lhsWrite->isBeta()) |
michael@0 | 1699 | lhsWrite = lhsWrite->getOperand(0); |
michael@0 | 1700 | if (!lhsWrite->isAdd() && !lhsWrite->isSub()) |
michael@0 | 1701 | return nullptr; |
michael@0 | 1702 | if (!lhsWrite->block()->isMarked()) |
michael@0 | 1703 | return nullptr; |
michael@0 | 1704 | MBasicBlock *bb = header->backedge(); |
michael@0 | 1705 | for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {} |
michael@0 | 1706 | if (bb != lhsWrite->block()) |
michael@0 | 1707 | return nullptr; |
michael@0 | 1708 | |
michael@0 | 1709 | SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite); |
michael@0 | 1710 | |
michael@0 | 1711 | // Check that the value of the lhs at the backedge is of the form |
michael@0 | 1712 | // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start |
michael@0 | 1713 | // of the iteration, and not that written to lhs in a previous iteration, |
michael@0 | 1714 | // as such a previous value could not appear directly in the addition: |
michael@0 | 1715 | // it could not be stored in lhs as the lhs add/sub executes in every |
michael@0 | 1716 | // iteration, and if it were stored in another variable its use here would |
michael@0 | 1717 | // be as an operand to a phi node for that variable. |
michael@0 | 1718 | if (lhsModified.term != lhs.term) |
michael@0 | 1719 | return nullptr; |
michael@0 | 1720 | |
michael@0 | 1721 | LinearSum bound(alloc()); |
michael@0 | 1722 | |
michael@0 | 1723 | if (lhsModified.constant == 1 && !lessEqual) { |
michael@0 | 1724 | // The value of lhs is 'initial(lhs) + iterCount' and this will end |
michael@0 | 1725 | // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound |
michael@0 | 1726 | // on the number of backedges executed is: |
michael@0 | 1727 | // |
michael@0 | 1728 | // initial(lhs) + iterCount + lhsN == rhs |
michael@0 | 1729 | // iterCount == rhsN - initial(lhs) - lhsN |
michael@0 | 1730 | |
michael@0 | 1731 | if (rhs) { |
michael@0 | 1732 | if (!bound.add(rhs, 1)) |
michael@0 | 1733 | return nullptr; |
michael@0 | 1734 | } |
michael@0 | 1735 | if (!bound.add(lhsInitial, -1)) |
michael@0 | 1736 | return nullptr; |
michael@0 | 1737 | |
michael@0 | 1738 | int32_t lhsConstant; |
michael@0 | 1739 | if (!SafeSub(0, lhs.constant, &lhsConstant)) |
michael@0 | 1740 | return nullptr; |
michael@0 | 1741 | if (!bound.add(lhsConstant)) |
michael@0 | 1742 | return nullptr; |
michael@0 | 1743 | } else if (lhsModified.constant == -1 && lessEqual) { |
michael@0 | 1744 | // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above |
michael@0 | 1745 | // case, an upper bound on the number of backedges executed is: |
michael@0 | 1746 | // |
michael@0 | 1747 | // initial(lhs) - iterCount + lhsN == rhs |
michael@0 | 1748 | // iterCount == initial(lhs) - rhs + lhsN |
michael@0 | 1749 | |
michael@0 | 1750 | if (!bound.add(lhsInitial, 1)) |
michael@0 | 1751 | return nullptr; |
michael@0 | 1752 | if (rhs) { |
michael@0 | 1753 | if (!bound.add(rhs, -1)) |
michael@0 | 1754 | return nullptr; |
michael@0 | 1755 | } |
michael@0 | 1756 | if (!bound.add(lhs.constant)) |
michael@0 | 1757 | return nullptr; |
michael@0 | 1758 | } else { |
michael@0 | 1759 | return nullptr; |
michael@0 | 1760 | } |
michael@0 | 1761 | |
michael@0 | 1762 | return new(alloc()) LoopIterationBound(header, test, bound); |
michael@0 | 1763 | } |
michael@0 | 1764 | |
michael@0 | 1765 | void |
michael@0 | 1766 | RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi) |
michael@0 | 1767 | { |
michael@0 | 1768 | // Given a bound on the number of backedges taken, compute an upper and |
michael@0 | 1769 | // lower bound for a phi node that may change by a constant amount each |
michael@0 | 1770 | // iteration. Unlike for the case when computing the iteration bound |
michael@0 | 1771 | // itself, the phi does not need to change the same amount every iteration, |
michael@0 | 1772 | // but is required to change at most N and be either nondecreasing or |
michael@0 | 1773 | // nonincreasing. |
michael@0 | 1774 | |
michael@0 | 1775 | JS_ASSERT(phi->numOperands() == 2); |
michael@0 | 1776 | |
michael@0 | 1777 | MBasicBlock *preLoop = header->loopPredecessor(); |
michael@0 | 1778 | JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header); |
michael@0 | 1779 | |
michael@0 | 1780 | MBasicBlock *backedge = header->backedge(); |
michael@0 | 1781 | JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header); |
michael@0 | 1782 | |
michael@0 | 1783 | MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor()); |
michael@0 | 1784 | if (initial->block()->isMarked()) |
michael@0 | 1785 | return; |
michael@0 | 1786 | |
michael@0 | 1787 | SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor())); |
michael@0 | 1788 | |
michael@0 | 1789 | if (modified.term != phi || modified.constant == 0) |
michael@0 | 1790 | return; |
michael@0 | 1791 | |
michael@0 | 1792 | if (!phi->range()) |
michael@0 | 1793 | phi->setRange(new(alloc()) Range()); |
michael@0 | 1794 | |
michael@0 | 1795 | LinearSum initialSum(alloc()); |
michael@0 | 1796 | if (!initialSum.add(initial, 1)) |
michael@0 | 1797 | return; |
michael@0 | 1798 | |
michael@0 | 1799 | // The phi may change by N each iteration, and is either nondecreasing or |
michael@0 | 1800 | // nonincreasing. initial(phi) is either a lower or upper bound for the |
michael@0 | 1801 | // phi, and initial(phi) + loopBound * N is either an upper or lower bound, |
michael@0 | 1802 | // at all points within the loop, provided that loopBound >= 0. |
michael@0 | 1803 | // |
michael@0 | 1804 | // We are more interested, however, in the bound for phi at points |
michael@0 | 1805 | // dominated by the loop bound's test; if the test dominates e.g. a bounds |
michael@0 | 1806 | // check we want to hoist from the loop, using the value of the phi at the |
michael@0 | 1807 | // head of the loop for this will usually be too imprecise to hoist the |
michael@0 | 1808 | // check. These points will execute only if the backedge executes at least |
michael@0 | 1809 | // one more time (as the test passed and the test dominates the backedge), |
michael@0 | 1810 | // so we know both that loopBound >= 1 and that the phi's value has changed |
michael@0 | 1811 | // at most loopBound - 1 times. Thus, another upper or lower bound for the |
michael@0 | 1812 | // phi is initial(phi) + (loopBound - 1) * N, without requiring us to |
michael@0 | 1813 | // ensure that loopBound >= 0. |
michael@0 | 1814 | |
michael@0 | 1815 | LinearSum limitSum(loopBound->sum); |
michael@0 | 1816 | if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) |
michael@0 | 1817 | return; |
michael@0 | 1818 | |
michael@0 | 1819 | int32_t negativeConstant; |
michael@0 | 1820 | if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant)) |
michael@0 | 1821 | return; |
michael@0 | 1822 | |
michael@0 | 1823 | Range *initRange = initial->range(); |
michael@0 | 1824 | if (modified.constant > 0) { |
michael@0 | 1825 | if (initRange && initRange->hasInt32LowerBound()) |
michael@0 | 1826 | phi->range()->refineLower(initRange->lower()); |
michael@0 | 1827 | phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum)); |
michael@0 | 1828 | phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum)); |
michael@0 | 1829 | } else { |
michael@0 | 1830 | if (initRange && initRange->hasInt32UpperBound()) |
michael@0 | 1831 | phi->range()->refineUpper(initRange->upper()); |
michael@0 | 1832 | phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum)); |
michael@0 | 1833 | phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum)); |
michael@0 | 1834 | } |
michael@0 | 1835 | |
michael@0 | 1836 | IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id()); |
michael@0 | 1837 | SpewRange(phi); |
michael@0 | 1838 | } |
michael@0 | 1839 | |
michael@0 | 1840 | // Whether bound is valid at the specified bounds check instruction in a loop, |
michael@0 | 1841 | // and may be used to hoist ins. |
michael@0 | 1842 | static inline bool |
michael@0 | 1843 | SymbolicBoundIsValid(MBasicBlock *header, MBoundsCheck *ins, const SymbolicBound *bound) |
michael@0 | 1844 | { |
michael@0 | 1845 | if (!bound->loop) |
michael@0 | 1846 | return true; |
michael@0 | 1847 | if (ins->block() == header) |
michael@0 | 1848 | return false; |
michael@0 | 1849 | MBasicBlock *bb = ins->block()->immediateDominator(); |
michael@0 | 1850 | while (bb != header && bb != bound->loop->test->block()) |
michael@0 | 1851 | bb = bb->immediateDominator(); |
michael@0 | 1852 | return bb == bound->loop->test->block(); |
michael@0 | 1853 | } |
michael@0 | 1854 | |
michael@0 | 1855 | // Convert all components of a linear sum *except* its constant to a definition, |
michael@0 | 1856 | // adding any necessary instructions to the end of block. |
michael@0 | 1857 | static inline MDefinition * |
michael@0 | 1858 | ConvertLinearSum(TempAllocator &alloc, MBasicBlock *block, const LinearSum &sum) |
michael@0 | 1859 | { |
michael@0 | 1860 | MDefinition *def = nullptr; |
michael@0 | 1861 | |
michael@0 | 1862 | for (size_t i = 0; i < sum.numTerms(); i++) { |
michael@0 | 1863 | LinearTerm term = sum.term(i); |
michael@0 | 1864 | JS_ASSERT(!term.term->isConstant()); |
michael@0 | 1865 | if (term.scale == 1) { |
michael@0 | 1866 | if (def) { |
michael@0 | 1867 | def = MAdd::New(alloc, def, term.term); |
michael@0 | 1868 | def->toAdd()->setInt32(); |
michael@0 | 1869 | block->insertBefore(block->lastIns(), def->toInstruction()); |
michael@0 | 1870 | def->computeRange(alloc); |
michael@0 | 1871 | } else { |
michael@0 | 1872 | def = term.term; |
michael@0 | 1873 | } |
michael@0 | 1874 | } else if (term.scale == -1) { |
michael@0 | 1875 | if (!def) { |
michael@0 | 1876 | def = MConstant::New(alloc, Int32Value(0)); |
michael@0 | 1877 | block->insertBefore(block->lastIns(), def->toInstruction()); |
michael@0 | 1878 | def->computeRange(alloc); |
michael@0 | 1879 | } |
michael@0 | 1880 | def = MSub::New(alloc, def, term.term); |
michael@0 | 1881 | def->toSub()->setInt32(); |
michael@0 | 1882 | block->insertBefore(block->lastIns(), def->toInstruction()); |
michael@0 | 1883 | def->computeRange(alloc); |
michael@0 | 1884 | } else { |
michael@0 | 1885 | JS_ASSERT(term.scale != 0); |
michael@0 | 1886 | MConstant *factor = MConstant::New(alloc, Int32Value(term.scale)); |
michael@0 | 1887 | block->insertBefore(block->lastIns(), factor); |
michael@0 | 1888 | MMul *mul = MMul::New(alloc, term.term, factor); |
michael@0 | 1889 | mul->setInt32(); |
michael@0 | 1890 | block->insertBefore(block->lastIns(), mul); |
michael@0 | 1891 | mul->computeRange(alloc); |
michael@0 | 1892 | if (def) { |
michael@0 | 1893 | def = MAdd::New(alloc, def, mul); |
michael@0 | 1894 | def->toAdd()->setInt32(); |
michael@0 | 1895 | block->insertBefore(block->lastIns(), def->toInstruction()); |
michael@0 | 1896 | def->computeRange(alloc); |
michael@0 | 1897 | } else { |
michael@0 | 1898 | def = mul; |
michael@0 | 1899 | } |
michael@0 | 1900 | } |
michael@0 | 1901 | } |
michael@0 | 1902 | |
michael@0 | 1903 | if (!def) { |
michael@0 | 1904 | def = MConstant::New(alloc, Int32Value(0)); |
michael@0 | 1905 | block->insertBefore(block->lastIns(), def->toInstruction()); |
michael@0 | 1906 | def->computeRange(alloc); |
michael@0 | 1907 | } |
michael@0 | 1908 | |
michael@0 | 1909 | return def; |
michael@0 | 1910 | } |
michael@0 | 1911 | |
michael@0 | 1912 | bool |
michael@0 | 1913 | RangeAnalysis::tryHoistBoundsCheck(MBasicBlock *header, MBoundsCheck *ins) |
michael@0 | 1914 | { |
michael@0 | 1915 | // The bounds check's length must be loop invariant. |
michael@0 | 1916 | if (ins->length()->block()->isMarked()) |
michael@0 | 1917 | return false; |
michael@0 | 1918 | |
michael@0 | 1919 | // The bounds check's index should not be loop invariant (else we would |
michael@0 | 1920 | // already have hoisted it during LICM). |
michael@0 | 1921 | SimpleLinearSum index = ExtractLinearSum(ins->index()); |
michael@0 | 1922 | if (!index.term || !index.term->block()->isMarked()) |
michael@0 | 1923 | return false; |
michael@0 | 1924 | |
michael@0 | 1925 | // Check for a symbolic lower and upper bound on the index. If either |
michael@0 | 1926 | // condition depends on an iteration bound for the loop, only hoist if |
michael@0 | 1927 | // the bounds check is dominated by the iteration bound's test. |
michael@0 | 1928 | if (!index.term->range()) |
michael@0 | 1929 | return false; |
michael@0 | 1930 | const SymbolicBound *lower = index.term->range()->symbolicLower(); |
michael@0 | 1931 | if (!lower || !SymbolicBoundIsValid(header, ins, lower)) |
michael@0 | 1932 | return false; |
michael@0 | 1933 | const SymbolicBound *upper = index.term->range()->symbolicUpper(); |
michael@0 | 1934 | if (!upper || !SymbolicBoundIsValid(header, ins, upper)) |
michael@0 | 1935 | return false; |
michael@0 | 1936 | |
michael@0 | 1937 | MBasicBlock *preLoop = header->loopPredecessor(); |
michael@0 | 1938 | JS_ASSERT(!preLoop->isMarked()); |
michael@0 | 1939 | |
michael@0 | 1940 | MDefinition *lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum); |
michael@0 | 1941 | if (!lowerTerm) |
michael@0 | 1942 | return false; |
michael@0 | 1943 | |
michael@0 | 1944 | MDefinition *upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum); |
michael@0 | 1945 | if (!upperTerm) |
michael@0 | 1946 | return false; |
michael@0 | 1947 | |
michael@0 | 1948 | // We are checking that index + indexConstant >= 0, and know that |
michael@0 | 1949 | // index >= lowerTerm + lowerConstant. Thus, check that: |
michael@0 | 1950 | // |
michael@0 | 1951 | // lowerTerm + lowerConstant + indexConstant >= 0 |
michael@0 | 1952 | // lowerTerm >= -lowerConstant - indexConstant |
michael@0 | 1953 | |
michael@0 | 1954 | int32_t lowerConstant = 0; |
michael@0 | 1955 | if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) |
michael@0 | 1956 | return false; |
michael@0 | 1957 | if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) |
michael@0 | 1958 | return false; |
michael@0 | 1959 | |
michael@0 | 1960 | // We are checking that index < boundsLength, and know that |
michael@0 | 1961 | // index <= upperTerm + upperConstant. Thus, check that: |
michael@0 | 1962 | // |
michael@0 | 1963 | // upperTerm + upperConstant < boundsLength |
michael@0 | 1964 | |
michael@0 | 1965 | int32_t upperConstant = index.constant; |
michael@0 | 1966 | if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) |
michael@0 | 1967 | return false; |
michael@0 | 1968 | |
michael@0 | 1969 | MBoundsCheckLower *lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm); |
michael@0 | 1970 | lowerCheck->setMinimum(lowerConstant); |
michael@0 | 1971 | |
michael@0 | 1972 | MBoundsCheck *upperCheck = MBoundsCheck::New(alloc(), upperTerm, ins->length()); |
michael@0 | 1973 | upperCheck->setMinimum(upperConstant); |
michael@0 | 1974 | upperCheck->setMaximum(upperConstant); |
michael@0 | 1975 | |
michael@0 | 1976 | // Hoist the loop invariant upper and lower bounds checks. |
michael@0 | 1977 | preLoop->insertBefore(preLoop->lastIns(), lowerCheck); |
michael@0 | 1978 | preLoop->insertBefore(preLoop->lastIns(), upperCheck); |
michael@0 | 1979 | |
michael@0 | 1980 | return true; |
michael@0 | 1981 | } |
michael@0 | 1982 | |
michael@0 | 1983 | bool |
michael@0 | 1984 | RangeAnalysis::analyze() |
michael@0 | 1985 | { |
michael@0 | 1986 | IonSpew(IonSpew_Range, "Doing range propagation"); |
michael@0 | 1987 | |
michael@0 | 1988 | for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { |
michael@0 | 1989 | MBasicBlock *block = *iter; |
michael@0 | 1990 | |
michael@0 | 1991 | if (block->unreachable()) |
michael@0 | 1992 | continue; |
michael@0 | 1993 | |
michael@0 | 1994 | for (MDefinitionIterator iter(block); iter; iter++) { |
michael@0 | 1995 | MDefinition *def = *iter; |
michael@0 | 1996 | |
michael@0 | 1997 | def->computeRange(alloc()); |
michael@0 | 1998 | IonSpew(IonSpew_Range, "computing range on %d", def->id()); |
michael@0 | 1999 | SpewRange(def); |
michael@0 | 2000 | } |
michael@0 | 2001 | |
michael@0 | 2002 | if (block->isLoopHeader()) { |
michael@0 | 2003 | if (!analyzeLoop(block)) |
michael@0 | 2004 | return false; |
michael@0 | 2005 | } |
michael@0 | 2006 | |
michael@0 | 2007 | // First pass at collecting range info - while the beta nodes are still |
michael@0 | 2008 | // around and before truncation. |
michael@0 | 2009 | for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) { |
michael@0 | 2010 | iter->collectRangeInfoPreTrunc(); |
michael@0 | 2011 | |
michael@0 | 2012 | // Would have been nice to implement this using collectRangeInfoPreTrunc() |
michael@0 | 2013 | // methods but it needs the minAsmJSHeapLength(). |
michael@0 | 2014 | if (mir->compilingAsmJS()) { |
michael@0 | 2015 | uint32_t minHeapLength = mir->minAsmJSHeapLength(); |
michael@0 | 2016 | if (iter->isAsmJSLoadHeap()) { |
michael@0 | 2017 | MAsmJSLoadHeap *ins = iter->toAsmJSLoadHeap(); |
michael@0 | 2018 | Range *range = ins->ptr()->range(); |
michael@0 | 2019 | if (range && range->hasInt32LowerBound() && range->lower() >= 0 && |
michael@0 | 2020 | range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) { |
michael@0 | 2021 | ins->setSkipBoundsCheck(true); |
michael@0 | 2022 | } |
michael@0 | 2023 | } else if (iter->isAsmJSStoreHeap()) { |
michael@0 | 2024 | MAsmJSStoreHeap *ins = iter->toAsmJSStoreHeap(); |
michael@0 | 2025 | Range *range = ins->ptr()->range(); |
michael@0 | 2026 | if (range && range->hasInt32LowerBound() && range->lower() >= 0 && |
michael@0 | 2027 | range->hasInt32UpperBound() && (uint32_t) range->upper() < minHeapLength) { |
michael@0 | 2028 | ins->setSkipBoundsCheck(true); |
michael@0 | 2029 | } |
michael@0 | 2030 | } |
michael@0 | 2031 | } |
michael@0 | 2032 | } |
michael@0 | 2033 | } |
michael@0 | 2034 | |
michael@0 | 2035 | return true; |
michael@0 | 2036 | } |
michael@0 | 2037 | |
michael@0 | 2038 | bool |
michael@0 | 2039 | RangeAnalysis::addRangeAssertions() |
michael@0 | 2040 | { |
michael@0 | 2041 | if (!js_JitOptions.checkRangeAnalysis) |
michael@0 | 2042 | return true; |
michael@0 | 2043 | |
michael@0 | 2044 | // Check the computed range for this instruction, if the option is set. Note |
michael@0 | 2045 | // that this code is quite invasive; it adds numerous additional |
michael@0 | 2046 | // instructions for each MInstruction with a computed range, and it uses |
michael@0 | 2047 | // registers, so it also affects register allocation. |
michael@0 | 2048 | for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { |
michael@0 | 2049 | MBasicBlock *block = *iter; |
michael@0 | 2050 | |
michael@0 | 2051 | for (MDefinitionIterator iter(block); iter; iter++) { |
michael@0 | 2052 | MDefinition *ins = *iter; |
michael@0 | 2053 | |
michael@0 | 2054 | // Perform range checking for all numeric and numeric-like types. |
michael@0 | 2055 | if (!IsNumberType(ins->type()) && |
michael@0 | 2056 | ins->type() != MIRType_Boolean && |
michael@0 | 2057 | ins->type() != MIRType_Value) |
michael@0 | 2058 | { |
michael@0 | 2059 | continue; |
michael@0 | 2060 | } |
michael@0 | 2061 | |
michael@0 | 2062 | Range r(ins); |
michael@0 | 2063 | |
michael@0 | 2064 | // Don't insert assertions if there's nothing interesting to assert. |
michael@0 | 2065 | if (r.isUnknown() || (ins->type() == MIRType_Int32 && r.isUnknownInt32())) |
michael@0 | 2066 | continue; |
michael@0 | 2067 | |
michael@0 | 2068 | MAssertRange *guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r)); |
michael@0 | 2069 | |
michael@0 | 2070 | // Beta nodes and interrupt checks are required to be located at the |
michael@0 | 2071 | // beginnings of basic blocks, so we must insert range assertions |
michael@0 | 2072 | // after any such instructions. |
michael@0 | 2073 | MInstructionIterator insertIter = ins->isPhi() |
michael@0 | 2074 | ? block->begin() |
michael@0 | 2075 | : block->begin(ins->toInstruction()); |
michael@0 | 2076 | while (insertIter->isBeta() || |
michael@0 | 2077 | insertIter->isInterruptCheck() || |
michael@0 | 2078 | insertIter->isInterruptCheckPar() || |
michael@0 | 2079 | insertIter->isConstant()) |
michael@0 | 2080 | { |
michael@0 | 2081 | insertIter++; |
michael@0 | 2082 | } |
michael@0 | 2083 | |
michael@0 | 2084 | if (*insertIter == *iter) |
michael@0 | 2085 | block->insertAfter(*insertIter, guard); |
michael@0 | 2086 | else |
michael@0 | 2087 | block->insertBefore(*insertIter, guard); |
michael@0 | 2088 | } |
michael@0 | 2089 | } |
michael@0 | 2090 | |
michael@0 | 2091 | return true; |
michael@0 | 2092 | } |
michael@0 | 2093 | |
michael@0 | 2094 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 2095 | // Range based Truncation |
michael@0 | 2096 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 2097 | |
michael@0 | 2098 | void |
michael@0 | 2099 | Range::clampToInt32() |
michael@0 | 2100 | { |
michael@0 | 2101 | if (isInt32()) |
michael@0 | 2102 | return; |
michael@0 | 2103 | int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN; |
michael@0 | 2104 | int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX; |
michael@0 | 2105 | setInt32(l, h); |
michael@0 | 2106 | } |
michael@0 | 2107 | |
michael@0 | 2108 | void |
michael@0 | 2109 | Range::wrapAroundToInt32() |
michael@0 | 2110 | { |
michael@0 | 2111 | if (!hasInt32Bounds()) { |
michael@0 | 2112 | setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX); |
michael@0 | 2113 | } else if (canHaveFractionalPart()) { |
michael@0 | 2114 | canHaveFractionalPart_ = false; |
michael@0 | 2115 | |
michael@0 | 2116 | // Clearing the fractional field may provide an opportunity to refine |
michael@0 | 2117 | // lower_ or upper_. |
michael@0 | 2118 | refineInt32BoundsByExponent(max_exponent_, &lower_, &upper_); |
michael@0 | 2119 | |
michael@0 | 2120 | assertInvariants(); |
michael@0 | 2121 | } |
michael@0 | 2122 | } |
michael@0 | 2123 | |
michael@0 | 2124 | void |
michael@0 | 2125 | Range::wrapAroundToShiftCount() |
michael@0 | 2126 | { |
michael@0 | 2127 | wrapAroundToInt32(); |
michael@0 | 2128 | if (lower() < 0 || upper() >= 32) |
michael@0 | 2129 | setInt32(0, 31); |
michael@0 | 2130 | } |
michael@0 | 2131 | |
michael@0 | 2132 | void |
michael@0 | 2133 | Range::wrapAroundToBoolean() |
michael@0 | 2134 | { |
michael@0 | 2135 | wrapAroundToInt32(); |
michael@0 | 2136 | if (!isBoolean()) |
michael@0 | 2137 | setInt32(0, 1); |
michael@0 | 2138 | } |
michael@0 | 2139 | |
michael@0 | 2140 | bool |
michael@0 | 2141 | MDefinition::truncate() |
michael@0 | 2142 | { |
michael@0 | 2143 | // No procedure defined for truncating this instruction. |
michael@0 | 2144 | return false; |
michael@0 | 2145 | } |
michael@0 | 2146 | |
michael@0 | 2147 | bool |
michael@0 | 2148 | MConstant::truncate() |
michael@0 | 2149 | { |
michael@0 | 2150 | if (!value_.isDouble()) |
michael@0 | 2151 | return false; |
michael@0 | 2152 | |
michael@0 | 2153 | // Truncate the double to int, since all uses truncates it. |
michael@0 | 2154 | int32_t res = ToInt32(value_.toDouble()); |
michael@0 | 2155 | value_.setInt32(res); |
michael@0 | 2156 | setResultType(MIRType_Int32); |
michael@0 | 2157 | if (range()) |
michael@0 | 2158 | range()->setInt32(res, res); |
michael@0 | 2159 | return true; |
michael@0 | 2160 | } |
michael@0 | 2161 | |
michael@0 | 2162 | bool |
michael@0 | 2163 | MAdd::truncate() |
michael@0 | 2164 | { |
michael@0 | 2165 | // Remember analysis, needed for fallible checks. |
michael@0 | 2166 | setTruncated(true); |
michael@0 | 2167 | |
michael@0 | 2168 | if (type() == MIRType_Double || type() == MIRType_Int32) { |
michael@0 | 2169 | specialization_ = MIRType_Int32; |
michael@0 | 2170 | setResultType(MIRType_Int32); |
michael@0 | 2171 | if (range()) |
michael@0 | 2172 | range()->wrapAroundToInt32(); |
michael@0 | 2173 | return true; |
michael@0 | 2174 | } |
michael@0 | 2175 | |
michael@0 | 2176 | return false; |
michael@0 | 2177 | } |
michael@0 | 2178 | |
michael@0 | 2179 | bool |
michael@0 | 2180 | MSub::truncate() |
michael@0 | 2181 | { |
michael@0 | 2182 | // Remember analysis, needed for fallible checks. |
michael@0 | 2183 | setTruncated(true); |
michael@0 | 2184 | |
michael@0 | 2185 | if (type() == MIRType_Double || type() == MIRType_Int32) { |
michael@0 | 2186 | specialization_ = MIRType_Int32; |
michael@0 | 2187 | setResultType(MIRType_Int32); |
michael@0 | 2188 | if (range()) |
michael@0 | 2189 | range()->wrapAroundToInt32(); |
michael@0 | 2190 | return true; |
michael@0 | 2191 | } |
michael@0 | 2192 | |
michael@0 | 2193 | return false; |
michael@0 | 2194 | } |
michael@0 | 2195 | |
michael@0 | 2196 | bool |
michael@0 | 2197 | MMul::truncate() |
michael@0 | 2198 | { |
michael@0 | 2199 | // Remember analysis, needed to remove negative zero checks. |
michael@0 | 2200 | setTruncated(true); |
michael@0 | 2201 | |
michael@0 | 2202 | if (type() == MIRType_Double || type() == MIRType_Int32) { |
michael@0 | 2203 | specialization_ = MIRType_Int32; |
michael@0 | 2204 | setResultType(MIRType_Int32); |
michael@0 | 2205 | setCanBeNegativeZero(false); |
michael@0 | 2206 | if (range()) |
michael@0 | 2207 | range()->wrapAroundToInt32(); |
michael@0 | 2208 | return true; |
michael@0 | 2209 | } |
michael@0 | 2210 | |
michael@0 | 2211 | return false; |
michael@0 | 2212 | } |
michael@0 | 2213 | |
michael@0 | 2214 | bool |
michael@0 | 2215 | MDiv::truncate() |
michael@0 | 2216 | { |
michael@0 | 2217 | // Remember analysis, needed to remove negative zero checks. |
michael@0 | 2218 | setTruncatedIndirectly(true); |
michael@0 | 2219 | |
michael@0 | 2220 | // Check if this division only flows in bitwise instructions. |
michael@0 | 2221 | if (!isTruncated()) { |
michael@0 | 2222 | bool allUsesExplictlyTruncate = true; |
michael@0 | 2223 | for (MUseDefIterator use(this); allUsesExplictlyTruncate && use; use++) { |
michael@0 | 2224 | switch (use.def()->op()) { |
michael@0 | 2225 | case MDefinition::Op_BitAnd: |
michael@0 | 2226 | case MDefinition::Op_BitOr: |
michael@0 | 2227 | case MDefinition::Op_BitXor: |
michael@0 | 2228 | case MDefinition::Op_Lsh: |
michael@0 | 2229 | case MDefinition::Op_Rsh: |
michael@0 | 2230 | case MDefinition::Op_Ursh: |
michael@0 | 2231 | break; |
michael@0 | 2232 | default: |
michael@0 | 2233 | allUsesExplictlyTruncate = false; |
michael@0 | 2234 | } |
michael@0 | 2235 | } |
michael@0 | 2236 | |
michael@0 | 2237 | if (allUsesExplictlyTruncate) |
michael@0 | 2238 | setTruncated(true); |
michael@0 | 2239 | } |
michael@0 | 2240 | |
michael@0 | 2241 | // Divisions where the lhs and rhs are unsigned and the result is |
michael@0 | 2242 | // truncated can be lowered more efficiently. |
michael@0 | 2243 | if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) { |
michael@0 | 2244 | unsigned_ = true; |
michael@0 | 2245 | return true; |
michael@0 | 2246 | } |
michael@0 | 2247 | |
michael@0 | 2248 | // No modifications. |
michael@0 | 2249 | return false; |
michael@0 | 2250 | } |
michael@0 | 2251 | |
michael@0 | 2252 | bool |
michael@0 | 2253 | MMod::truncate() |
michael@0 | 2254 | { |
michael@0 | 2255 | // Remember analysis, needed to remove negative zero checks. |
michael@0 | 2256 | setTruncated(true); |
michael@0 | 2257 | |
michael@0 | 2258 | // As for division, handle unsigned modulus with a truncated result. |
michael@0 | 2259 | if (specialization() == MIRType_Int32 && tryUseUnsignedOperands()) { |
michael@0 | 2260 | unsigned_ = true; |
michael@0 | 2261 | return true; |
michael@0 | 2262 | } |
michael@0 | 2263 | |
michael@0 | 2264 | // No modifications. |
michael@0 | 2265 | return false; |
michael@0 | 2266 | } |
michael@0 | 2267 | |
michael@0 | 2268 | bool |
michael@0 | 2269 | MToDouble::truncate() |
michael@0 | 2270 | { |
michael@0 | 2271 | JS_ASSERT(type() == MIRType_Double); |
michael@0 | 2272 | |
michael@0 | 2273 | // We use the return type to flag that this MToDouble should be replaced by |
michael@0 | 2274 | // a MTruncateToInt32 when modifying the graph. |
michael@0 | 2275 | setResultType(MIRType_Int32); |
michael@0 | 2276 | if (range()) |
michael@0 | 2277 | range()->wrapAroundToInt32(); |
michael@0 | 2278 | |
michael@0 | 2279 | return true; |
michael@0 | 2280 | } |
michael@0 | 2281 | |
michael@0 | 2282 | bool |
michael@0 | 2283 | MLoadTypedArrayElementStatic::truncate() |
michael@0 | 2284 | { |
michael@0 | 2285 | setInfallible(); |
michael@0 | 2286 | return false; |
michael@0 | 2287 | } |
michael@0 | 2288 | |
michael@0 | 2289 | bool |
michael@0 | 2290 | MDefinition::isOperandTruncated(size_t index) const |
michael@0 | 2291 | { |
michael@0 | 2292 | return false; |
michael@0 | 2293 | } |
michael@0 | 2294 | |
michael@0 | 2295 | bool |
michael@0 | 2296 | MTruncateToInt32::isOperandTruncated(size_t index) const |
michael@0 | 2297 | { |
michael@0 | 2298 | return true; |
michael@0 | 2299 | } |
michael@0 | 2300 | |
michael@0 | 2301 | bool |
michael@0 | 2302 | MBinaryBitwiseInstruction::isOperandTruncated(size_t index) const |
michael@0 | 2303 | { |
michael@0 | 2304 | return true; |
michael@0 | 2305 | } |
michael@0 | 2306 | |
michael@0 | 2307 | bool |
michael@0 | 2308 | MAdd::isOperandTruncated(size_t index) const |
michael@0 | 2309 | { |
michael@0 | 2310 | return isTruncated(); |
michael@0 | 2311 | } |
michael@0 | 2312 | |
michael@0 | 2313 | bool |
michael@0 | 2314 | MSub::isOperandTruncated(size_t index) const |
michael@0 | 2315 | { |
michael@0 | 2316 | return isTruncated(); |
michael@0 | 2317 | } |
michael@0 | 2318 | |
michael@0 | 2319 | bool |
michael@0 | 2320 | MMul::isOperandTruncated(size_t index) const |
michael@0 | 2321 | { |
michael@0 | 2322 | return isTruncated(); |
michael@0 | 2323 | } |
michael@0 | 2324 | |
michael@0 | 2325 | bool |
michael@0 | 2326 | MToDouble::isOperandTruncated(size_t index) const |
michael@0 | 2327 | { |
michael@0 | 2328 | // The return type is used to flag that we are replacing this Double by a |
michael@0 | 2329 | // Truncate of its operand if needed. |
michael@0 | 2330 | return type() == MIRType_Int32; |
michael@0 | 2331 | } |
michael@0 | 2332 | |
michael@0 | 2333 | bool |
michael@0 | 2334 | MStoreTypedArrayElement::isOperandTruncated(size_t index) const |
michael@0 | 2335 | { |
michael@0 | 2336 | return index == 2 && !isFloatArray(); |
michael@0 | 2337 | } |
michael@0 | 2338 | |
michael@0 | 2339 | bool |
michael@0 | 2340 | MStoreTypedArrayElementHole::isOperandTruncated(size_t index) const |
michael@0 | 2341 | { |
michael@0 | 2342 | return index == 3 && !isFloatArray(); |
michael@0 | 2343 | } |
michael@0 | 2344 | |
michael@0 | 2345 | bool |
michael@0 | 2346 | MStoreTypedArrayElementStatic::isOperandTruncated(size_t index) const |
michael@0 | 2347 | { |
michael@0 | 2348 | return index == 1 && !isFloatArray(); |
michael@0 | 2349 | } |
michael@0 | 2350 | |
michael@0 | 2351 | bool |
michael@0 | 2352 | MCompare::truncate() |
michael@0 | 2353 | { |
michael@0 | 2354 | if (!isDoubleComparison()) |
michael@0 | 2355 | return false; |
michael@0 | 2356 | |
michael@0 | 2357 | // If both operands are naturally in the int32 range, we can convert from |
michael@0 | 2358 | // a double comparison to being an int32 comparison. |
michael@0 | 2359 | if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) |
michael@0 | 2360 | return false; |
michael@0 | 2361 | |
michael@0 | 2362 | compareType_ = Compare_Int32; |
michael@0 | 2363 | |
michael@0 | 2364 | // Truncating the operands won't change their value, but it will change |
michael@0 | 2365 | // their type, which we need because we now expect integer inputs. |
michael@0 | 2366 | truncateOperands_ = true; |
michael@0 | 2367 | |
michael@0 | 2368 | return true; |
michael@0 | 2369 | } |
michael@0 | 2370 | |
michael@0 | 2371 | bool |
michael@0 | 2372 | MCompare::isOperandTruncated(size_t index) const |
michael@0 | 2373 | { |
michael@0 | 2374 | // If we're doing an int32 comparison on operands which were previously |
michael@0 | 2375 | // floating-point, convert them! |
michael@0 | 2376 | JS_ASSERT_IF(truncateOperands_, isInt32Comparison()); |
michael@0 | 2377 | return truncateOperands_; |
michael@0 | 2378 | } |
michael@0 | 2379 | |
michael@0 | 2380 | // Ensure that all observables uses can work with a truncated |
michael@0 | 2381 | // version of the |candidate|'s result. |
michael@0 | 2382 | static bool |
michael@0 | 2383 | AllUsesTruncate(MInstruction *candidate) |
michael@0 | 2384 | { |
michael@0 | 2385 | // If the value naturally produces an int32 value (before bailout checks) |
michael@0 | 2386 | // that needs no conversion, we don't have to worry about resume points |
michael@0 | 2387 | // seeing truncated values. |
michael@0 | 2388 | bool needsConversion = !candidate->range() || !candidate->range()->isInt32(); |
michael@0 | 2389 | |
michael@0 | 2390 | for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd(); use++) { |
michael@0 | 2391 | if (!use->consumer()->isDefinition()) { |
michael@0 | 2392 | // We can only skip testing resume points, if all original uses are |
michael@0 | 2393 | // still present, or if the value does not need conversion. |
michael@0 | 2394 | // Otherwise a branch removed by UCE might rely on the non-truncated |
michael@0 | 2395 | // value, and any bailout with a truncated value might lead an |
michael@0 | 2396 | // incorrect value. |
michael@0 | 2397 | if (candidate->isUseRemoved() && needsConversion) |
michael@0 | 2398 | return false; |
michael@0 | 2399 | continue; |
michael@0 | 2400 | } |
michael@0 | 2401 | |
michael@0 | 2402 | if (!use->consumer()->toDefinition()->isOperandTruncated(use->index())) |
michael@0 | 2403 | return false; |
michael@0 | 2404 | } |
michael@0 | 2405 | |
michael@0 | 2406 | return true; |
michael@0 | 2407 | } |
michael@0 | 2408 | |
michael@0 | 2409 | static bool |
michael@0 | 2410 | CanTruncate(MInstruction *candidate) |
michael@0 | 2411 | { |
michael@0 | 2412 | // Compare operations might coerce its inputs to int32 if the ranges are |
michael@0 | 2413 | // correct. So we do not need to check if all uses are coerced. |
michael@0 | 2414 | if (candidate->isCompare()) |
michael@0 | 2415 | return true; |
michael@0 | 2416 | |
michael@0 | 2417 | // Set truncated flag if range analysis ensure that it has no |
michael@0 | 2418 | // rounding errors and no fractional part. Note that we can't use |
michael@0 | 2419 | // the MDefinition Range constructor, because we need to know if |
michael@0 | 2420 | // the value will have rounding errors before any bailout checks. |
michael@0 | 2421 | const Range *r = candidate->range(); |
michael@0 | 2422 | bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors(); |
michael@0 | 2423 | |
michael@0 | 2424 | // Special case integer division: the result of a/b can be infinite |
michael@0 | 2425 | // but cannot actually have rounding errors induced by truncation. |
michael@0 | 2426 | if (candidate->isDiv() && candidate->toDiv()->specialization() == MIRType_Int32) |
michael@0 | 2427 | canHaveRoundingErrors = false; |
michael@0 | 2428 | |
michael@0 | 2429 | if (canHaveRoundingErrors) |
michael@0 | 2430 | return false; |
michael@0 | 2431 | |
michael@0 | 2432 | // Ensure all observable uses are truncated. |
michael@0 | 2433 | return AllUsesTruncate(candidate); |
michael@0 | 2434 | } |
michael@0 | 2435 | |
michael@0 | 2436 | static void |
michael@0 | 2437 | RemoveTruncatesOnOutput(MInstruction *truncated) |
michael@0 | 2438 | { |
michael@0 | 2439 | // Compare returns a boolean so it doen't have any output truncates. |
michael@0 | 2440 | if (truncated->isCompare()) |
michael@0 | 2441 | return; |
michael@0 | 2442 | |
michael@0 | 2443 | JS_ASSERT(truncated->type() == MIRType_Int32); |
michael@0 | 2444 | JS_ASSERT(Range(truncated).isInt32()); |
michael@0 | 2445 | |
michael@0 | 2446 | for (MUseDefIterator use(truncated); use; use++) { |
michael@0 | 2447 | MDefinition *def = use.def(); |
michael@0 | 2448 | if (!def->isTruncateToInt32() || !def->isToInt32()) |
michael@0 | 2449 | continue; |
michael@0 | 2450 | |
michael@0 | 2451 | def->replaceAllUsesWith(truncated); |
michael@0 | 2452 | } |
michael@0 | 2453 | } |
michael@0 | 2454 | |
michael@0 | 2455 | static void |
michael@0 | 2456 | AdjustTruncatedInputs(TempAllocator &alloc, MInstruction *truncated) |
michael@0 | 2457 | { |
michael@0 | 2458 | MBasicBlock *block = truncated->block(); |
michael@0 | 2459 | for (size_t i = 0, e = truncated->numOperands(); i < e; i++) { |
michael@0 | 2460 | if (!truncated->isOperandTruncated(i)) |
michael@0 | 2461 | continue; |
michael@0 | 2462 | |
michael@0 | 2463 | MDefinition *input = truncated->getOperand(i); |
michael@0 | 2464 | if (input->type() == MIRType_Int32) |
michael@0 | 2465 | continue; |
michael@0 | 2466 | |
michael@0 | 2467 | if (input->isToDouble() && input->getOperand(0)->type() == MIRType_Int32) { |
michael@0 | 2468 | JS_ASSERT(input->range()->isInt32()); |
michael@0 | 2469 | truncated->replaceOperand(i, input->getOperand(0)); |
michael@0 | 2470 | } else { |
michael@0 | 2471 | MTruncateToInt32 *op = MTruncateToInt32::New(alloc, truncated->getOperand(i)); |
michael@0 | 2472 | block->insertBefore(truncated, op); |
michael@0 | 2473 | truncated->replaceOperand(i, op); |
michael@0 | 2474 | } |
michael@0 | 2475 | } |
michael@0 | 2476 | |
michael@0 | 2477 | if (truncated->isToDouble()) { |
michael@0 | 2478 | truncated->replaceAllUsesWith(truncated->getOperand(0)); |
michael@0 | 2479 | block->discard(truncated); |
michael@0 | 2480 | } |
michael@0 | 2481 | } |
michael@0 | 2482 | |
michael@0 | 2483 | // Iterate backward on all instruction and attempt to truncate operations for |
michael@0 | 2484 | // each instruction which respect the following list of predicates: Has been |
michael@0 | 2485 | // analyzed by range analysis, the range has no rounding errors, all uses cases |
michael@0 | 2486 | // are truncating the result. |
michael@0 | 2487 | // |
michael@0 | 2488 | // If the truncation of the operation is successful, then the instruction is |
michael@0 | 2489 | // queue for later updating the graph to restore the type correctness by |
michael@0 | 2490 | // converting the operands that need to be truncated. |
michael@0 | 2491 | // |
michael@0 | 2492 | // We iterate backward because it is likely that a truncated operation truncates |
michael@0 | 2493 | // some of its operands. |
michael@0 | 2494 | bool |
michael@0 | 2495 | RangeAnalysis::truncate() |
michael@0 | 2496 | { |
michael@0 | 2497 | IonSpew(IonSpew_Range, "Do range-base truncation (backward loop)"); |
michael@0 | 2498 | |
michael@0 | 2499 | Vector<MInstruction *, 16, SystemAllocPolicy> worklist; |
michael@0 | 2500 | Vector<MBinaryBitwiseInstruction *, 16, SystemAllocPolicy> bitops; |
michael@0 | 2501 | |
michael@0 | 2502 | for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) { |
michael@0 | 2503 | for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) { |
michael@0 | 2504 | if (iter->type() == MIRType_None) |
michael@0 | 2505 | continue; |
michael@0 | 2506 | |
michael@0 | 2507 | // Remember all bitop instructions for folding after range analysis. |
michael@0 | 2508 | switch (iter->op()) { |
michael@0 | 2509 | case MDefinition::Op_BitAnd: |
michael@0 | 2510 | case MDefinition::Op_BitOr: |
michael@0 | 2511 | case MDefinition::Op_BitXor: |
michael@0 | 2512 | case MDefinition::Op_Lsh: |
michael@0 | 2513 | case MDefinition::Op_Rsh: |
michael@0 | 2514 | case MDefinition::Op_Ursh: |
michael@0 | 2515 | if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter))) |
michael@0 | 2516 | return false; |
michael@0 | 2517 | default:; |
michael@0 | 2518 | } |
michael@0 | 2519 | |
michael@0 | 2520 | if (!CanTruncate(*iter)) |
michael@0 | 2521 | continue; |
michael@0 | 2522 | |
michael@0 | 2523 | // Truncate this instruction if possible. |
michael@0 | 2524 | if (!iter->truncate()) |
michael@0 | 2525 | continue; |
michael@0 | 2526 | |
michael@0 | 2527 | // Delay updates of inputs/outputs to avoid creating node which |
michael@0 | 2528 | // would be removed by the truncation of the next operations. |
michael@0 | 2529 | iter->setInWorklist(); |
michael@0 | 2530 | if (!worklist.append(*iter)) |
michael@0 | 2531 | return false; |
michael@0 | 2532 | } |
michael@0 | 2533 | } |
michael@0 | 2534 | |
michael@0 | 2535 | // Update inputs/outputs of truncated instructions. |
michael@0 | 2536 | IonSpew(IonSpew_Range, "Do graph type fixup (dequeue)"); |
michael@0 | 2537 | while (!worklist.empty()) { |
michael@0 | 2538 | MInstruction *ins = worklist.popCopy(); |
michael@0 | 2539 | ins->setNotInWorklist(); |
michael@0 | 2540 | RemoveTruncatesOnOutput(ins); |
michael@0 | 2541 | AdjustTruncatedInputs(alloc(), ins); |
michael@0 | 2542 | } |
michael@0 | 2543 | |
michael@0 | 2544 | // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer |
michael@0 | 2545 | // input. This is done after range analysis rather than during GVN as the |
michael@0 | 2546 | // presence of the bitop can change which instructions are truncated. |
michael@0 | 2547 | for (size_t i = 0; i < bitops.length(); i++) { |
michael@0 | 2548 | MBinaryBitwiseInstruction *ins = bitops[i]; |
michael@0 | 2549 | MDefinition *folded = ins->foldUnnecessaryBitop(); |
michael@0 | 2550 | if (folded != ins) |
michael@0 | 2551 | ins->replaceAllUsesWith(folded); |
michael@0 | 2552 | } |
michael@0 | 2553 | |
michael@0 | 2554 | return true; |
michael@0 | 2555 | } |
michael@0 | 2556 | |
michael@0 | 2557 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 2558 | // Collect Range information of operands |
michael@0 | 2559 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 2560 | |
michael@0 | 2561 | void |
michael@0 | 2562 | MInArray::collectRangeInfoPreTrunc() |
michael@0 | 2563 | { |
michael@0 | 2564 | Range indexRange(index()); |
michael@0 | 2565 | if (indexRange.isFiniteNonNegative()) |
michael@0 | 2566 | needsNegativeIntCheck_ = false; |
michael@0 | 2567 | } |
michael@0 | 2568 | |
michael@0 | 2569 | void |
michael@0 | 2570 | MLoadElementHole::collectRangeInfoPreTrunc() |
michael@0 | 2571 | { |
michael@0 | 2572 | Range indexRange(index()); |
michael@0 | 2573 | if (indexRange.isFiniteNonNegative()) |
michael@0 | 2574 | needsNegativeIntCheck_ = false; |
michael@0 | 2575 | } |
michael@0 | 2576 | |
michael@0 | 2577 | void |
michael@0 | 2578 | MDiv::collectRangeInfoPreTrunc() |
michael@0 | 2579 | { |
michael@0 | 2580 | Range lhsRange(lhs()); |
michael@0 | 2581 | if (lhsRange.isFiniteNonNegative()) |
michael@0 | 2582 | canBeNegativeDividend_ = false; |
michael@0 | 2583 | } |
michael@0 | 2584 | |
michael@0 | 2585 | void |
michael@0 | 2586 | MMod::collectRangeInfoPreTrunc() |
michael@0 | 2587 | { |
michael@0 | 2588 | Range lhsRange(lhs()); |
michael@0 | 2589 | if (lhsRange.isFiniteNonNegative()) |
michael@0 | 2590 | canBeNegativeDividend_ = false; |
michael@0 | 2591 | } |
michael@0 | 2592 | |
michael@0 | 2593 | void |
michael@0 | 2594 | MBoundsCheckLower::collectRangeInfoPreTrunc() |
michael@0 | 2595 | { |
michael@0 | 2596 | Range indexRange(index()); |
michael@0 | 2597 | if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_) |
michael@0 | 2598 | fallible_ = false; |
michael@0 | 2599 | } |
michael@0 | 2600 | |
michael@0 | 2601 | void |
michael@0 | 2602 | MCompare::collectRangeInfoPreTrunc() |
michael@0 | 2603 | { |
michael@0 | 2604 | if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) |
michael@0 | 2605 | operandsAreNeverNaN_ = true; |
michael@0 | 2606 | } |
michael@0 | 2607 | |
michael@0 | 2608 | void |
michael@0 | 2609 | MNot::collectRangeInfoPreTrunc() |
michael@0 | 2610 | { |
michael@0 | 2611 | if (!Range(operand()).canBeNaN()) |
michael@0 | 2612 | operandIsNeverNaN_ = true; |
michael@0 | 2613 | } |
michael@0 | 2614 | |
michael@0 | 2615 | void |
michael@0 | 2616 | MPowHalf::collectRangeInfoPreTrunc() |
michael@0 | 2617 | { |
michael@0 | 2618 | Range inputRange(input()); |
michael@0 | 2619 | if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound()) |
michael@0 | 2620 | operandIsNeverNegativeInfinity_ = true; |
michael@0 | 2621 | if (!inputRange.canBeZero()) |
michael@0 | 2622 | operandIsNeverNegativeZero_ = true; |
michael@0 | 2623 | if (!inputRange.canBeNaN()) |
michael@0 | 2624 | operandIsNeverNaN_ = true; |
michael@0 | 2625 | } |
michael@0 | 2626 | |
michael@0 | 2627 | void |
michael@0 | 2628 | MUrsh::collectRangeInfoPreTrunc() |
michael@0 | 2629 | { |
michael@0 | 2630 | Range lhsRange(lhs()), rhsRange(rhs()); |
michael@0 | 2631 | |
michael@0 | 2632 | // As in MUrsh::computeRange(), convert the inputs. |
michael@0 | 2633 | lhsRange.wrapAroundToInt32(); |
michael@0 | 2634 | rhsRange.wrapAroundToShiftCount(); |
michael@0 | 2635 | |
michael@0 | 2636 | // If the most significant bit of our result is always going to be zero, |
michael@0 | 2637 | // we can optimize by disabling bailout checks for enforcing an int32 range. |
michael@0 | 2638 | if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) |
michael@0 | 2639 | bailoutsDisabled_ = true; |
michael@0 | 2640 | } |
michael@0 | 2641 | |
michael@0 | 2642 | bool |
michael@0 | 2643 | RangeAnalysis::prepareForUCE(bool *shouldRemoveDeadCode) |
michael@0 | 2644 | { |
michael@0 | 2645 | *shouldRemoveDeadCode = false; |
michael@0 | 2646 | |
michael@0 | 2647 | for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) { |
michael@0 | 2648 | MBasicBlock *block = *iter; |
michael@0 | 2649 | |
michael@0 | 2650 | if (!block->unreachable()) |
michael@0 | 2651 | continue; |
michael@0 | 2652 | |
michael@0 | 2653 | MControlInstruction *cond = block->getPredecessor(0)->lastIns(); |
michael@0 | 2654 | if (!cond->isTest()) |
michael@0 | 2655 | continue; |
michael@0 | 2656 | |
michael@0 | 2657 | // Replace the condition of the test control instruction by a constant |
michael@0 | 2658 | // chosen based which of the successors has the unreachable flag which is |
michael@0 | 2659 | // added by MBeta::computeRange on its own block. |
michael@0 | 2660 | MTest *test = cond->toTest(); |
michael@0 | 2661 | MConstant *constant = nullptr; |
michael@0 | 2662 | if (block == test->ifTrue()) { |
michael@0 | 2663 | constant = MConstant::New(alloc(), BooleanValue(false)); |
michael@0 | 2664 | } else { |
michael@0 | 2665 | JS_ASSERT(block == test->ifFalse()); |
michael@0 | 2666 | constant = MConstant::New(alloc(), BooleanValue(true)); |
michael@0 | 2667 | } |
michael@0 | 2668 | test->block()->insertBefore(test, constant); |
michael@0 | 2669 | test->replaceOperand(0, constant); |
michael@0 | 2670 | IonSpew(IonSpew_Range, "Update condition of %d to reflect unreachable branches.", |
michael@0 | 2671 | test->id()); |
michael@0 | 2672 | |
michael@0 | 2673 | *shouldRemoveDeadCode = true; |
michael@0 | 2674 | } |
michael@0 | 2675 | |
michael@0 | 2676 | return true; |
michael@0 | 2677 | } |