js/src/jit/LiveRangeAllocator.cpp

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/LiveRangeAllocator.h"
michael@0 8
michael@0 9 #include "mozilla/DebugOnly.h"
michael@0 10
michael@0 11 #include "jsprf.h"
michael@0 12
michael@0 13 #include "jit/BacktrackingAllocator.h"
michael@0 14 #include "jit/BitSet.h"
michael@0 15 #include "jit/LinearScan.h"
michael@0 16
michael@0 17 using namespace js;
michael@0 18 using namespace js::jit;
michael@0 19
michael@0 20 using mozilla::DebugOnly;
michael@0 21
michael@0 22 int
michael@0 23 Requirement::priority() const
michael@0 24 {
michael@0 25 switch (kind_) {
michael@0 26 case Requirement::FIXED:
michael@0 27 return 0;
michael@0 28
michael@0 29 case Requirement::REGISTER:
michael@0 30 return 1;
michael@0 31
michael@0 32 case Requirement::NONE:
michael@0 33 return 2;
michael@0 34
michael@0 35 default:
michael@0 36 MOZ_ASSUME_UNREACHABLE("Unknown requirement kind.");
michael@0 37 }
michael@0 38 }
michael@0 39
michael@0 40 bool
michael@0 41 LiveInterval::Range::contains(const Range *other) const
michael@0 42 {
michael@0 43 return from <= other->from && to >= other->to;
michael@0 44 }
michael@0 45
michael@0 46 void
michael@0 47 LiveInterval::Range::intersect(const Range *other, Range *pre, Range *inside, Range *post) const
michael@0 48 {
michael@0 49 JS_ASSERT(pre->empty() && inside->empty() && post->empty());
michael@0 50
michael@0 51 CodePosition innerFrom = from;
michael@0 52 if (from < other->from) {
michael@0 53 if (to < other->from) {
michael@0 54 *pre = Range(from, to);
michael@0 55 return;
michael@0 56 }
michael@0 57 *pre = Range(from, other->from);
michael@0 58 innerFrom = other->from;
michael@0 59 }
michael@0 60
michael@0 61 CodePosition innerTo = to;
michael@0 62 if (to > other->to) {
michael@0 63 if (from >= other->to) {
michael@0 64 *post = Range(from, to);
michael@0 65 return;
michael@0 66 }
michael@0 67 *post = Range(other->to, to);
michael@0 68 innerTo = other->to;
michael@0 69 }
michael@0 70
michael@0 71 if (innerFrom != innerTo)
michael@0 72 *inside = Range(innerFrom, innerTo);
michael@0 73 }
michael@0 74
michael@0 75 bool
michael@0 76 LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
michael@0 77 {
michael@0 78 JS_ASSERT(from < to);
michael@0 79
michael@0 80 Range newRange(from, to);
michael@0 81
michael@0 82 if (ranges_.empty())
michael@0 83 return ranges_.append(newRange);
michael@0 84
michael@0 85 Range &first = ranges_.back();
michael@0 86 if (to < first.from)
michael@0 87 return ranges_.append(newRange);
michael@0 88
michael@0 89 if (to == first.from) {
michael@0 90 first.from = from;
michael@0 91 return true;
michael@0 92 }
michael@0 93
michael@0 94 JS_ASSERT(from < first.to);
michael@0 95 JS_ASSERT(to > first.from);
michael@0 96 if (from < first.from)
michael@0 97 first.from = from;
michael@0 98 if (to > first.to)
michael@0 99 first.to = to;
michael@0 100
michael@0 101 return true;
michael@0 102 }
michael@0 103
michael@0 104 bool
michael@0 105 LiveInterval::addRange(CodePosition from, CodePosition to)
michael@0 106 {
michael@0 107 JS_ASSERT(from < to);
michael@0 108
michael@0 109 Range newRange(from, to);
michael@0 110
michael@0 111 Range *i;
michael@0 112 // Find the location to insert the new range
michael@0 113 for (i = ranges_.end() - 1; i >= ranges_.begin(); i--) {
michael@0 114 if (newRange.from <= i->to) {
michael@0 115 if (i->from < newRange.from)
michael@0 116 newRange.from = i->from;
michael@0 117 break;
michael@0 118 }
michael@0 119 }
michael@0 120 // Perform coalescing on overlapping ranges
michael@0 121 for (; i >= ranges_.begin(); i--) {
michael@0 122 if (newRange.to < i->from)
michael@0 123 break;
michael@0 124 if (newRange.to < i->to)
michael@0 125 newRange.to = i->to;
michael@0 126 ranges_.erase(i);
michael@0 127 }
michael@0 128
michael@0 129 return ranges_.insert(i + 1, newRange);
michael@0 130 }
michael@0 131
michael@0 132 void
michael@0 133 LiveInterval::setFrom(CodePosition from)
michael@0 134 {
michael@0 135 while (!ranges_.empty()) {
michael@0 136 if (ranges_.back().to < from) {
michael@0 137 ranges_.erase(&ranges_.back());
michael@0 138 } else {
michael@0 139 if (from == ranges_.back().to)
michael@0 140 ranges_.erase(&ranges_.back());
michael@0 141 else
michael@0 142 ranges_.back().from = from;
michael@0 143 break;
michael@0 144 }
michael@0 145 }
michael@0 146 }
michael@0 147
michael@0 148 bool
michael@0 149 LiveInterval::covers(CodePosition pos)
michael@0 150 {
michael@0 151 if (pos < start() || pos >= end())
michael@0 152 return false;
michael@0 153
michael@0 154 // Loop over the ranges in ascending order.
michael@0 155 size_t i = lastProcessedRangeIfValid(pos);
michael@0 156 for (; i < ranges_.length(); i--) {
michael@0 157 if (pos < ranges_[i].from)
michael@0 158 return false;
michael@0 159 setLastProcessedRange(i, pos);
michael@0 160 if (pos < ranges_[i].to)
michael@0 161 return true;
michael@0 162 }
michael@0 163 return false;
michael@0 164 }
michael@0 165
michael@0 166 CodePosition
michael@0 167 LiveInterval::nextCoveredAfter(CodePosition pos)
michael@0 168 {
michael@0 169 for (size_t i = 0; i < ranges_.length(); i++) {
michael@0 170 if (ranges_[i].to <= pos) {
michael@0 171 if (i)
michael@0 172 return ranges_[i-1].from;
michael@0 173 break;
michael@0 174 }
michael@0 175 if (ranges_[i].from <= pos)
michael@0 176 return pos;
michael@0 177 }
michael@0 178 return CodePosition::MIN;
michael@0 179 }
michael@0 180
michael@0 181 CodePosition
michael@0 182 LiveInterval::intersect(LiveInterval *other)
michael@0 183 {
michael@0 184 if (start() > other->start())
michael@0 185 return other->intersect(this);
michael@0 186
michael@0 187 // Loop over the ranges in ascending order. As an optimization,
michael@0 188 // try to start at the last processed range.
michael@0 189 size_t i = lastProcessedRangeIfValid(other->start());
michael@0 190 size_t j = other->ranges_.length() - 1;
michael@0 191 if (i >= ranges_.length() || j >= other->ranges_.length())
michael@0 192 return CodePosition::MIN;
michael@0 193
michael@0 194 while (true) {
michael@0 195 const Range &r1 = ranges_[i];
michael@0 196 const Range &r2 = other->ranges_[j];
michael@0 197
michael@0 198 if (r1.from <= r2.from) {
michael@0 199 if (r1.from <= other->start())
michael@0 200 setLastProcessedRange(i, other->start());
michael@0 201 if (r2.from < r1.to)
michael@0 202 return r2.from;
michael@0 203 if (i == 0 || ranges_[i-1].from > other->end())
michael@0 204 break;
michael@0 205 i--;
michael@0 206 } else {
michael@0 207 if (r1.from < r2.to)
michael@0 208 return r1.from;
michael@0 209 if (j == 0 || other->ranges_[j-1].from > end())
michael@0 210 break;
michael@0 211 j--;
michael@0 212 }
michael@0 213 }
michael@0 214
michael@0 215 return CodePosition::MIN;
michael@0 216 }
michael@0 217
michael@0 218 /*
michael@0 219 * This function takes the callee interval and moves all ranges following or
michael@0 220 * including provided position to the target interval. Additionally, if a
michael@0 221 * range in the callee interval spans the given position, it is split and the
michael@0 222 * latter half is placed in the target interval.
michael@0 223 *
michael@0 224 * This function should only be called if it is known that the interval should
michael@0 225 * actually be split (and, presumably, a move inserted). As such, it is an
michael@0 226 * error for the caller to request a split that moves all intervals into the
michael@0 227 * target. Doing so will trip the assertion at the bottom of the function.
michael@0 228 */
michael@0 229 bool
michael@0 230 LiveInterval::splitFrom(CodePosition pos, LiveInterval *after)
michael@0 231 {
michael@0 232 JS_ASSERT(pos >= start() && pos < end());
michael@0 233 JS_ASSERT(after->ranges_.empty());
michael@0 234
michael@0 235 // Move all intervals over to the target
michael@0 236 size_t bufferLength = ranges_.length();
michael@0 237 Range *buffer = ranges_.extractRawBuffer();
michael@0 238 if (!buffer)
michael@0 239 return false;
michael@0 240 after->ranges_.replaceRawBuffer(buffer, bufferLength);
michael@0 241
michael@0 242 // Move intervals back as required
michael@0 243 for (Range *i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) {
michael@0 244 if (pos >= i->to)
michael@0 245 continue;
michael@0 246
michael@0 247 if (pos > i->from) {
michael@0 248 // Split the range
michael@0 249 Range split(i->from, pos);
michael@0 250 i->from = pos;
michael@0 251 if (!ranges_.append(split))
michael@0 252 return false;
michael@0 253 }
michael@0 254 if (!ranges_.append(i + 1, after->ranges_.end()))
michael@0 255 return false;
michael@0 256 after->ranges_.shrinkBy(after->ranges_.end() - i - 1);
michael@0 257 break;
michael@0 258 }
michael@0 259
michael@0 260 // Split the linked list of use positions
michael@0 261 UsePosition *prev = nullptr;
michael@0 262 for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
michael@0 263 if (usePos->pos > pos)
michael@0 264 break;
michael@0 265 prev = *usePos;
michael@0 266 }
michael@0 267
michael@0 268 uses_.splitAfter(prev, &after->uses_);
michael@0 269 return true;
michael@0 270 }
michael@0 271
michael@0 272 void
michael@0 273 LiveInterval::addUse(UsePosition *use)
michael@0 274 {
michael@0 275 // Insert use positions in ascending order. Note that instructions
michael@0 276 // are visited in reverse order, so in most cases the loop terminates
michael@0 277 // at the first iteration and the use position will be added to the
michael@0 278 // front of the list.
michael@0 279 UsePosition *prev = nullptr;
michael@0 280 for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) {
michael@0 281 if (current->pos >= use->pos)
michael@0 282 break;
michael@0 283 prev = *current;
michael@0 284 }
michael@0 285
michael@0 286 if (prev)
michael@0 287 uses_.insertAfter(prev, use);
michael@0 288 else
michael@0 289 uses_.pushFront(use);
michael@0 290 }
michael@0 291
michael@0 292 void
michael@0 293 LiveInterval::addUseAtEnd(UsePosition *use)
michael@0 294 {
michael@0 295 JS_ASSERT(uses_.empty() || use->pos >= uses_.back()->pos);
michael@0 296 uses_.pushBack(use);
michael@0 297 }
michael@0 298
michael@0 299 UsePosition *
michael@0 300 LiveInterval::nextUseAfter(CodePosition after)
michael@0 301 {
michael@0 302 for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
michael@0 303 if (usePos->pos >= after) {
michael@0 304 LUse::Policy policy = usePos->use->policy();
michael@0 305 JS_ASSERT(policy != LUse::RECOVERED_INPUT);
michael@0 306 if (policy != LUse::KEEPALIVE)
michael@0 307 return *usePos;
michael@0 308 }
michael@0 309 }
michael@0 310 return nullptr;
michael@0 311 }
michael@0 312
michael@0 313 /*
michael@0 314 * This function locates the first "real" use of this interval that follows
michael@0 315 * the given code position. Non-"real" uses are currently just snapshots,
michael@0 316 * which keep virtual registers alive but do not result in the
michael@0 317 * generation of code that use them.
michael@0 318 */
michael@0 319 CodePosition
michael@0 320 LiveInterval::nextUsePosAfter(CodePosition after)
michael@0 321 {
michael@0 322 UsePosition *min = nextUseAfter(after);
michael@0 323 return min ? min->pos : CodePosition::MAX;
michael@0 324 }
michael@0 325
michael@0 326 /*
michael@0 327 * This function finds the position of the first use of this interval
michael@0 328 * that is incompatible with the provideded allocation. For example,
michael@0 329 * a use with a REGISTER policy would be incompatible with a stack slot
michael@0 330 * allocation.
michael@0 331 */
michael@0 332 CodePosition
michael@0 333 LiveInterval::firstIncompatibleUse(LAllocation alloc)
michael@0 334 {
michael@0 335 for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
michael@0 336 if (!UseCompatibleWith(usePos->use, alloc))
michael@0 337 return usePos->pos;
michael@0 338 }
michael@0 339 return CodePosition::MAX;
michael@0 340 }
michael@0 341
michael@0 342 LiveInterval *
michael@0 343 VirtualRegister::intervalFor(CodePosition pos)
michael@0 344 {
michael@0 345 for (LiveInterval **i = intervals_.begin(); i != intervals_.end(); i++) {
michael@0 346 if ((*i)->covers(pos))
michael@0 347 return *i;
michael@0 348 if (pos < (*i)->end())
michael@0 349 break;
michael@0 350 }
michael@0 351 return nullptr;
michael@0 352 }
michael@0 353
michael@0 354 LiveInterval *
michael@0 355 VirtualRegister::getFirstInterval()
michael@0 356 {
michael@0 357 JS_ASSERT(!intervals_.empty());
michael@0 358 return intervals_[0];
michael@0 359 }
michael@0 360
michael@0 361 // Instantiate LiveRangeAllocator for each template instance.
michael@0 362 template bool LiveRangeAllocator<LinearScanVirtualRegister, true>::buildLivenessInfo();
michael@0 363 template bool LiveRangeAllocator<BacktrackingVirtualRegister, false>::buildLivenessInfo();
michael@0 364
michael@0 365 #ifdef DEBUG
michael@0 366 static inline bool
michael@0 367 NextInstructionHasFixedUses(LBlock *block, LInstruction *ins)
michael@0 368 {
michael@0 369 LInstructionIterator iter(block->begin(ins));
michael@0 370 iter++;
michael@0 371 for (LInstruction::InputIterator alloc(**iter); alloc.more(); alloc.next()) {
michael@0 372 if (alloc->isUse() && alloc->toUse()->isFixedRegister())
michael@0 373 return true;
michael@0 374 }
michael@0 375 return false;
michael@0 376 }
michael@0 377
michael@0 378 // Returns true iff ins has a def/temp reusing the input allocation.
michael@0 379 static bool
michael@0 380 IsInputReused(LInstruction *ins, LUse *use)
michael@0 381 {
michael@0 382 for (size_t i = 0; i < ins->numDefs(); i++) {
michael@0 383 if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
michael@0 384 ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
michael@0 385 {
michael@0 386 return true;
michael@0 387 }
michael@0 388 }
michael@0 389
michael@0 390 for (size_t i = 0; i < ins->numTemps(); i++) {
michael@0 391 if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
michael@0 392 ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
michael@0 393 {
michael@0 394 return true;
michael@0 395 }
michael@0 396 }
michael@0 397
michael@0 398 return false;
michael@0 399 }
michael@0 400 #endif
michael@0 401
michael@0 402 /*
michael@0 403 * This function pre-allocates and initializes as much global state as possible
michael@0 404 * to avoid littering the algorithms with memory management cruft.
michael@0 405 */
michael@0 406 template <typename VREG, bool forLSRA>
michael@0 407 bool
michael@0 408 LiveRangeAllocator<VREG, forLSRA>::init()
michael@0 409 {
michael@0 410 if (!RegisterAllocator::init())
michael@0 411 return false;
michael@0 412
michael@0 413 liveIn = mir->allocate<BitSet*>(graph.numBlockIds());
michael@0 414 if (!liveIn)
michael@0 415 return false;
michael@0 416
michael@0 417 // Initialize fixed intervals.
michael@0 418 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 419 AnyRegister reg = AnyRegister::FromCode(i);
michael@0 420 LiveInterval *interval = LiveInterval::New(alloc(), 0);
michael@0 421 interval->setAllocation(LAllocation(reg));
michael@0 422 fixedIntervals[i] = interval;
michael@0 423 }
michael@0 424
michael@0 425 fixedIntervalsUnion = LiveInterval::New(alloc(), 0);
michael@0 426
michael@0 427 if (!vregs.init(mir, graph.numVirtualRegisters()))
michael@0 428 return false;
michael@0 429
michael@0 430 // Build virtual register objects
michael@0 431 for (size_t i = 0; i < graph.numBlocks(); i++) {
michael@0 432 if (mir->shouldCancel("Create data structures (main loop)"))
michael@0 433 return false;
michael@0 434
michael@0 435 LBlock *block = graph.getBlock(i);
michael@0 436 for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
michael@0 437 for (size_t j = 0; j < ins->numDefs(); j++) {
michael@0 438 LDefinition *def = ins->getDef(j);
michael@0 439 if (def->policy() != LDefinition::PASSTHROUGH) {
michael@0 440 if (!vregs[def].init(alloc(), block, *ins, def, /* isTemp */ false))
michael@0 441 return false;
michael@0 442 }
michael@0 443 }
michael@0 444
michael@0 445 for (size_t j = 0; j < ins->numTemps(); j++) {
michael@0 446 LDefinition *def = ins->getTemp(j);
michael@0 447 if (def->isBogusTemp())
michael@0 448 continue;
michael@0 449 if (!vregs[def].init(alloc(), block, *ins, def, /* isTemp */ true))
michael@0 450 return false;
michael@0 451 }
michael@0 452 }
michael@0 453 for (size_t j = 0; j < block->numPhis(); j++) {
michael@0 454 LPhi *phi = block->getPhi(j);
michael@0 455 LDefinition *def = phi->getDef(0);
michael@0 456 if (!vregs[def].init(alloc(), block, phi, def, /* isTemp */ false))
michael@0 457 return false;
michael@0 458 }
michael@0 459 }
michael@0 460
michael@0 461 return true;
michael@0 462 }
michael@0 463
michael@0 464 static void
michael@0 465 AddRegisterToSafepoint(LSafepoint *safepoint, AnyRegister reg, const LDefinition &def)
michael@0 466 {
michael@0 467 safepoint->addLiveRegister(reg);
michael@0 468
michael@0 469 JS_ASSERT(def.type() == LDefinition::GENERAL ||
michael@0 470 def.type() == LDefinition::INT32 ||
michael@0 471 def.type() == LDefinition::DOUBLE ||
michael@0 472 def.type() == LDefinition::FLOAT32 ||
michael@0 473 def.type() == LDefinition::OBJECT);
michael@0 474
michael@0 475 if (def.type() == LDefinition::OBJECT)
michael@0 476 safepoint->addGcRegister(reg.gpr());
michael@0 477 }
michael@0 478
michael@0 479 /*
michael@0 480 * This function builds up liveness intervals for all virtual registers
michael@0 481 * defined in the function. Additionally, it populates the liveIn array with
michael@0 482 * information about which registers are live at the beginning of a block, to
michael@0 483 * aid resolution and reification in a later phase.
michael@0 484 *
michael@0 485 * The algorithm is based on the one published in:
michael@0 486 *
michael@0 487 * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
michael@0 488 * SSA Form." Proceedings of the International Symposium on Code Generation
michael@0 489 * and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
michael@0 490 *
michael@0 491 * The algorithm operates on blocks ordered such that dominators of a block
michael@0 492 * are before the block itself, and such that all blocks of a loop are
michael@0 493 * contiguous. It proceeds backwards over the instructions in this order,
michael@0 494 * marking registers live at their uses, ending their live intervals at
michael@0 495 * definitions, and recording which registers are live at the top of every
michael@0 496 * block. To deal with loop backedges, variables live at the beginning of
michael@0 497 * a loop gain an interval covering the entire loop.
michael@0 498 */
michael@0 499 template <typename VREG, bool forLSRA>
michael@0 500 bool
michael@0 501 LiveRangeAllocator<VREG, forLSRA>::buildLivenessInfo()
michael@0 502 {
michael@0 503 if (!init())
michael@0 504 return false;
michael@0 505
michael@0 506 Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
michael@0 507 BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds());
michael@0 508 if (!loopDone)
michael@0 509 return false;
michael@0 510
michael@0 511 for (size_t i = graph.numBlocks(); i > 0; i--) {
michael@0 512 if (mir->shouldCancel("Build Liveness Info (main loop)"))
michael@0 513 return false;
michael@0 514
michael@0 515 LBlock *block = graph.getBlock(i - 1);
michael@0 516 MBasicBlock *mblock = block->mir();
michael@0 517
michael@0 518 BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters());
michael@0 519 if (!live)
michael@0 520 return false;
michael@0 521 liveIn[mblock->id()] = live;
michael@0 522
michael@0 523 // Propagate liveIn from our successors to us
michael@0 524 for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
michael@0 525 MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
michael@0 526 // Skip backedges, as we fix them up at the loop header.
michael@0 527 if (mblock->id() < successor->id())
michael@0 528 live->insertAll(liveIn[successor->id()]);
michael@0 529 }
michael@0 530
michael@0 531 // Add successor phis
michael@0 532 if (mblock->successorWithPhis()) {
michael@0 533 LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
michael@0 534 for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
michael@0 535 LPhi *phi = phiSuccessor->getPhi(j);
michael@0 536 LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
michael@0 537 uint32_t reg = use->toUse()->virtualRegister();
michael@0 538 live->insert(reg);
michael@0 539 }
michael@0 540 }
michael@0 541
michael@0 542 // Variables are assumed alive for the entire block, a define shortens
michael@0 543 // the interval to the point of definition.
michael@0 544 for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
michael@0 545 if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
michael@0 546 outputOf(block->lastId()).next()))
michael@0 547 {
michael@0 548 return false;
michael@0 549 }
michael@0 550 }
michael@0 551
michael@0 552 // Shorten the front end of live intervals for live variables to their
michael@0 553 // point of definition, if found.
michael@0 554 for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
michael@0 555 // Calls may clobber registers, so force a spill and reload around the callsite.
michael@0 556 if (ins->isCall()) {
michael@0 557 for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
michael@0 558 if (forLSRA) {
michael@0 559 if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
michael@0 560 return false;
michael@0 561 } else {
michael@0 562 bool found = false;
michael@0 563 for (size_t i = 0; i < ins->numDefs(); i++) {
michael@0 564 if (ins->getDef(i)->isPreset() &&
michael@0 565 *ins->getDef(i)->output() == LAllocation(*iter)) {
michael@0 566 found = true;
michael@0 567 break;
michael@0 568 }
michael@0 569 }
michael@0 570 if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
michael@0 571 return false;
michael@0 572 }
michael@0 573 }
michael@0 574 }
michael@0 575
michael@0 576 for (size_t i = 0; i < ins->numDefs(); i++) {
michael@0 577 if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
michael@0 578 LDefinition *def = ins->getDef(i);
michael@0 579
michael@0 580 CodePosition from;
michael@0 581 if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
michael@0 582 // The fixed range covers the current instruction so the
michael@0 583 // interval for the virtual register starts at the next
michael@0 584 // instruction. If the next instruction has a fixed use,
michael@0 585 // this can lead to unnecessary register moves. To avoid
michael@0 586 // special handling for this, assert the next instruction
michael@0 587 // has no fixed uses. defineFixed guarantees this by inserting
michael@0 588 // an LNop.
michael@0 589 JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
michael@0 590 AnyRegister reg = def->output()->toRegister();
michael@0 591 if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
michael@0 592 return false;
michael@0 593 from = outputOf(*ins).next();
michael@0 594 } else {
michael@0 595 from = forLSRA ? inputOf(*ins) : outputOf(*ins);
michael@0 596 }
michael@0 597
michael@0 598 if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
michael@0 599 // MUST_REUSE_INPUT is implemented by allocating an output
michael@0 600 // register and moving the input to it. Register hints are
michael@0 601 // used to avoid unnecessary moves. We give the input an
michael@0 602 // LUse::ANY policy to avoid allocating a register for the
michael@0 603 // input.
michael@0 604 LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
michael@0 605 JS_ASSERT(inputUse->policy() == LUse::REGISTER);
michael@0 606 JS_ASSERT(inputUse->usedAtStart());
michael@0 607 *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
michael@0 608 }
michael@0 609
michael@0 610 LiveInterval *interval = vregs[def].getInterval(0);
michael@0 611 interval->setFrom(from);
michael@0 612
michael@0 613 // Ensure that if there aren't any uses, there's at least
michael@0 614 // some interval for the output to go into.
michael@0 615 if (interval->numRanges() == 0) {
michael@0 616 if (!interval->addRangeAtHead(from, from.next()))
michael@0 617 return false;
michael@0 618 }
michael@0 619 live->remove(def->virtualRegister());
michael@0 620 }
michael@0 621 }
michael@0 622
michael@0 623 for (size_t i = 0; i < ins->numTemps(); i++) {
michael@0 624 LDefinition *temp = ins->getTemp(i);
michael@0 625 if (temp->isBogusTemp())
michael@0 626 continue;
michael@0 627
michael@0 628 if (forLSRA) {
michael@0 629 if (temp->policy() == LDefinition::PRESET) {
michael@0 630 if (ins->isCall())
michael@0 631 continue;
michael@0 632 AnyRegister reg = temp->output()->toRegister();
michael@0 633 if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
michael@0 634 return false;
michael@0 635
michael@0 636 // Fixed intervals are not added to safepoints, so do it
michael@0 637 // here.
michael@0 638 if (LSafepoint *safepoint = ins->safepoint())
michael@0 639 AddRegisterToSafepoint(safepoint, reg, *temp);
michael@0 640 } else {
michael@0 641 JS_ASSERT(!ins->isCall());
michael@0 642 if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
michael@0 643 return false;
michael@0 644 }
michael@0 645 } else {
michael@0 646 // Normally temps are considered to cover both the input
michael@0 647 // and output of the associated instruction. In some cases
michael@0 648 // though we want to use a fixed register as both an input
michael@0 649 // and clobbered register in the instruction, so watch for
michael@0 650 // this and shorten the temp to cover only the output.
michael@0 651 CodePosition from = inputOf(*ins);
michael@0 652 if (temp->policy() == LDefinition::PRESET) {
michael@0 653 AnyRegister reg = temp->output()->toRegister();
michael@0 654 for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
michael@0 655 if (alloc->isUse()) {
michael@0 656 LUse *use = alloc->toUse();
michael@0 657 if (use->isFixedRegister()) {
michael@0 658 if (GetFixedRegister(vregs[use].def(), use) == reg)
michael@0 659 from = outputOf(*ins);
michael@0 660 }
michael@0 661 }
michael@0 662 }
michael@0 663 }
michael@0 664
michael@0 665 CodePosition to =
michael@0 666 ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
michael@0 667 if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
michael@0 668 return false;
michael@0 669 }
michael@0 670 }
michael@0 671
michael@0 672 DebugOnly<bool> hasUseRegister = false;
michael@0 673 DebugOnly<bool> hasUseRegisterAtStart = false;
michael@0 674
michael@0 675 for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
michael@0 676 if (inputAlloc->isUse()) {
michael@0 677 LUse *use = inputAlloc->toUse();
michael@0 678
michael@0 679 // The first instruction, LLabel, has no uses.
michael@0 680 JS_ASSERT_IF(forLSRA, inputOf(*ins) > outputOf(block->firstId()));
michael@0 681
michael@0 682 // Call uses should always be at-start or fixed, since the fixed intervals
michael@0 683 // use all registers.
michael@0 684 JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
michael@0 685 use->isFixedRegister() || use->usedAtStart());
michael@0 686
michael@0 687 #ifdef DEBUG
michael@0 688 // Don't allow at-start call uses if there are temps of the same kind,
michael@0 689 // so that we don't assign the same register.
michael@0 690 if (ins->isCall() && use->usedAtStart()) {
michael@0 691 for (size_t i = 0; i < ins->numTemps(); i++)
michael@0 692 JS_ASSERT(vregs[ins->getTemp(i)].isFloatReg() != vregs[use].isFloatReg());
michael@0 693 }
michael@0 694
michael@0 695 // If there are both useRegisterAtStart(x) and useRegister(y)
michael@0 696 // uses, we may assign the same register to both operands due to
michael@0 697 // interval splitting (bug 772830). Don't allow this for now.
michael@0 698 if (use->policy() == LUse::REGISTER) {
michael@0 699 if (use->usedAtStart()) {
michael@0 700 if (!IsInputReused(*ins, use))
michael@0 701 hasUseRegisterAtStart = true;
michael@0 702 } else {
michael@0 703 hasUseRegister = true;
michael@0 704 }
michael@0 705 }
michael@0 706
michael@0 707 JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
michael@0 708 #endif
michael@0 709
michael@0 710 // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
michael@0 711 if (use->policy() == LUse::RECOVERED_INPUT)
michael@0 712 continue;
michael@0 713
michael@0 714 CodePosition to;
michael@0 715 if (forLSRA) {
michael@0 716 if (use->isFixedRegister()) {
michael@0 717 AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
michael@0 718 if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
michael@0 719 return false;
michael@0 720 to = inputOf(*ins);
michael@0 721
michael@0 722 // Fixed intervals are not added to safepoints, so do it
michael@0 723 // here.
michael@0 724 LSafepoint *safepoint = ins->safepoint();
michael@0 725 if (!ins->isCall() && safepoint)
michael@0 726 AddRegisterToSafepoint(safepoint, reg, *vregs[use].def());
michael@0 727 } else {
michael@0 728 to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
michael@0 729 }
michael@0 730 } else {
michael@0 731 to = (use->usedAtStart() || ins->isCall())
michael@0 732 ? inputOf(*ins) : outputOf(*ins);
michael@0 733 if (use->isFixedRegister()) {
michael@0 734 LAllocation reg(AnyRegister::FromCode(use->registerCode()));
michael@0 735 for (size_t i = 0; i < ins->numDefs(); i++) {
michael@0 736 LDefinition *def = ins->getDef(i);
michael@0 737 if (def->policy() == LDefinition::PRESET && *def->output() == reg)
michael@0 738 to = inputOf(*ins);
michael@0 739 }
michael@0 740 }
michael@0 741 }
michael@0 742
michael@0 743 LiveInterval *interval = vregs[use].getInterval(0);
michael@0 744 if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
michael@0 745 return false;
michael@0 746 interval->addUse(new(alloc()) UsePosition(use, to));
michael@0 747
michael@0 748 live->insert(use->virtualRegister());
michael@0 749 }
michael@0 750 }
michael@0 751 }
michael@0 752
michael@0 753 // Phis have simultaneous assignment semantics at block begin, so at
michael@0 754 // the beginning of the block we can be sure that liveIn does not
michael@0 755 // contain any phi outputs.
michael@0 756 for (unsigned int i = 0; i < block->numPhis(); i++) {
michael@0 757 LDefinition *def = block->getPhi(i)->getDef(0);
michael@0 758 if (live->contains(def->virtualRegister())) {
michael@0 759 live->remove(def->virtualRegister());
michael@0 760 } else {
michael@0 761 // This is a dead phi, so add a dummy range over all phis. This
michael@0 762 // can go away if we have an earlier dead code elimination pass.
michael@0 763 if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
michael@0 764 outputOf(block->firstId())))
michael@0 765 {
michael@0 766 return false;
michael@0 767 }
michael@0 768 }
michael@0 769 }
michael@0 770
michael@0 771 if (mblock->isLoopHeader()) {
michael@0 772 // A divergence from the published algorithm is required here, as
michael@0 773 // our block order does not guarantee that blocks of a loop are
michael@0 774 // contiguous. As a result, a single live interval spanning the
michael@0 775 // loop is not possible. Additionally, we require liveIn in a later
michael@0 776 // pass for resolution, so that must also be fixed up here.
michael@0 777 MBasicBlock *loopBlock = mblock->backedge();
michael@0 778 while (true) {
michael@0 779 // Blocks must already have been visited to have a liveIn set.
michael@0 780 JS_ASSERT(loopBlock->id() >= mblock->id());
michael@0 781
michael@0 782 // Add an interval for this entire loop block
michael@0 783 CodePosition from = inputOf(loopBlock->lir()->firstId());
michael@0 784 CodePosition to = outputOf(loopBlock->lir()->lastId()).next();
michael@0 785
michael@0 786 for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
michael@0 787 if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
michael@0 788 return false;
michael@0 789 }
michael@0 790
michael@0 791 // Fix up the liveIn set to account for the new interval
michael@0 792 liveIn[loopBlock->id()]->insertAll(live);
michael@0 793
michael@0 794 // Make sure we don't visit this node again
michael@0 795 loopDone->insert(loopBlock->id());
michael@0 796
michael@0 797 // If this is the loop header, any predecessors are either the
michael@0 798 // backedge or out of the loop, so skip any predecessors of
michael@0 799 // this block
michael@0 800 if (loopBlock != mblock) {
michael@0 801 for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
michael@0 802 MBasicBlock *pred = loopBlock->getPredecessor(i);
michael@0 803 if (loopDone->contains(pred->id()))
michael@0 804 continue;
michael@0 805 if (!loopWorkList.append(pred))
michael@0 806 return false;
michael@0 807 }
michael@0 808 }
michael@0 809
michael@0 810 // Terminate loop if out of work.
michael@0 811 if (loopWorkList.empty())
michael@0 812 break;
michael@0 813
michael@0 814 // Grab the next block off the work list, skipping any OSR block.
michael@0 815 while (!loopWorkList.empty()) {
michael@0 816 loopBlock = loopWorkList.popCopy();
michael@0 817 if (loopBlock->lir() != graph.osrBlock())
michael@0 818 break;
michael@0 819 }
michael@0 820
michael@0 821 // If end is reached without finding a non-OSR block, then no more work items were found.
michael@0 822 if (loopBlock->lir() == graph.osrBlock()) {
michael@0 823 JS_ASSERT(loopWorkList.empty());
michael@0 824 break;
michael@0 825 }
michael@0 826 }
michael@0 827
michael@0 828 // Clear the done set for other loops
michael@0 829 loopDone->clear();
michael@0 830 }
michael@0 831
michael@0 832 JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
michael@0 833 }
michael@0 834
michael@0 835 validateVirtualRegisters();
michael@0 836
michael@0 837 // If the script has an infinite loop, there may be no MReturn and therefore
michael@0 838 // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
michael@0 839 // rest of the allocator can assume it has at least one range.
michael@0 840 if (fixedIntervalsUnion->numRanges() == 0) {
michael@0 841 if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
michael@0 842 CodePosition(0, CodePosition::OUTPUT)))
michael@0 843 {
michael@0 844 return false;
michael@0 845 }
michael@0 846 }
michael@0 847
michael@0 848 return true;
michael@0 849 }
michael@0 850
michael@0 851 #ifdef DEBUG
michael@0 852
michael@0 853 void
michael@0 854 LiveInterval::validateRanges()
michael@0 855 {
michael@0 856 Range *prev = nullptr;
michael@0 857
michael@0 858 for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
michael@0 859 Range *range = &ranges_[i];
michael@0 860
michael@0 861 JS_ASSERT(range->from < range->to);
michael@0 862 JS_ASSERT_IF(prev, prev->to <= range->from);
michael@0 863 prev = range;
michael@0 864 }
michael@0 865 }
michael@0 866
michael@0 867 #endif // DEBUG
michael@0 868
michael@0 869 const char *
michael@0 870 LiveInterval::rangesToString() const
michael@0 871 {
michael@0 872 #ifdef DEBUG
michael@0 873 if (!numRanges())
michael@0 874 return " empty";
michael@0 875
michael@0 876 // Not reentrant!
michael@0 877 static char buf[1000];
michael@0 878
michael@0 879 char *cursor = buf;
michael@0 880 char *end = cursor + sizeof(buf);
michael@0 881
michael@0 882 for (size_t i = 0; i < numRanges(); i++) {
michael@0 883 const LiveInterval::Range *range = getRange(i);
michael@0 884 int n = JS_snprintf(cursor, end - cursor, " [%u,%u>", range->from.pos(), range->to.pos());
michael@0 885 if (n < 0)
michael@0 886 return " ???";
michael@0 887 cursor += n;
michael@0 888 }
michael@0 889
michael@0 890 return buf;
michael@0 891 #else
michael@0 892 return " ???";
michael@0 893 #endif
michael@0 894 }
michael@0 895
michael@0 896 void
michael@0 897 LiveInterval::dump()
michael@0 898 {
michael@0 899 fprintf(stderr, "v%u: index=%u allocation=%s %s\n",
michael@0 900 vreg(), index(), getAllocation()->toString(), rangesToString());
michael@0 901 }

mercurial