js/src/jit/LinearScan.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/LinearScan.h"
michael@0 8
michael@0 9 #include "mozilla/DebugOnly.h"
michael@0 10
michael@0 11 #include "jit/BitSet.h"
michael@0 12 #include "jit/IonSpewer.h"
michael@0 13
michael@0 14 using namespace js;
michael@0 15 using namespace js::jit;
michael@0 16
michael@0 17 using mozilla::DebugOnly;
michael@0 18
michael@0 19 /*
michael@0 20 * Merge virtual register intervals into the UnhandledQueue, taking advantage
michael@0 21 * of their nearly-sorted ordering.
michael@0 22 */
michael@0 23 void
michael@0 24 LinearScanAllocator::enqueueVirtualRegisterIntervals()
michael@0 25 {
michael@0 26 // Cursor into the unhandled queue, iterating through start positions.
michael@0 27 IntervalReverseIterator curr = unhandled.rbegin();
michael@0 28
michael@0 29 // Start position is non-monotonically increasing by virtual register number.
michael@0 30 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 31 LiveInterval *live = vregs[i].getInterval(0);
michael@0 32 if (live->numRanges() > 0) {
michael@0 33 setIntervalRequirement(live);
michael@0 34
michael@0 35 // Iterate backward until the next highest class of start position.
michael@0 36 for (; curr != unhandled.rend(); curr++) {
michael@0 37 if (curr->start() > live->start())
michael@0 38 break;
michael@0 39 }
michael@0 40
michael@0 41 // Insert forward from the current position, thereby
michael@0 42 // sorting by priority within the start class.
michael@0 43 unhandled.enqueueForward(*curr, live);
michael@0 44 }
michael@0 45 }
michael@0 46 }
michael@0 47
michael@0 48 /*
michael@0 49 * This function performs a preliminary allocation on the already-computed
michael@0 50 * live intervals, storing the result in the allocation field of the intervals
michael@0 51 * themselves.
michael@0 52 *
michael@0 53 * The algorithm is based on the one published in:
michael@0 54 *
michael@0 55 * Wimmer, Christian, and Hanspeter Mössenböck. "Optimized Interval Splitting
michael@0 56 * in a Linear Scan Register Allocator." Proceedings of the First
michael@0 57 * ACM/USENIX Conference on Virtual Execution Environments. Chicago, IL,
michael@0 58 * USA, ACM. 2005. PDF.
michael@0 59 *
michael@0 60 * The algorithm proceeds over each interval in order of their start position.
michael@0 61 * If a free register is available, the register that is free for the largest
michael@0 62 * portion of the current interval is allocated. Otherwise, the interval
michael@0 63 * with the farthest-away next use is spilled to make room for this one. In all
michael@0 64 * cases, intervals which conflict either with other intervals or with
michael@0 65 * use or definition constraints are split at the point of conflict to be
michael@0 66 * assigned a compatible allocation later.
michael@0 67 */
michael@0 68 bool
michael@0 69 LinearScanAllocator::allocateRegisters()
michael@0 70 {
michael@0 71 // The unhandled queue currently contains only spill intervals, in sorted
michael@0 72 // order. Intervals for virtual registers exist in sorted order based on
michael@0 73 // start position by vreg ID, but may have priorities that require them to
michael@0 74 // be reordered when adding to the unhandled queue.
michael@0 75 enqueueVirtualRegisterIntervals();
michael@0 76 unhandled.assertSorted();
michael@0 77
michael@0 78 // Add fixed intervals with ranges to fixed.
michael@0 79 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 80 if (fixedIntervals[i]->numRanges() > 0)
michael@0 81 fixed.pushBack(fixedIntervals[i]);
michael@0 82 }
michael@0 83
michael@0 84 // Iterate through all intervals in ascending start order.
michael@0 85 CodePosition prevPosition = CodePosition::MIN;
michael@0 86 while ((current = unhandled.dequeue()) != nullptr) {
michael@0 87 JS_ASSERT(current->getAllocation()->isUse());
michael@0 88 JS_ASSERT(current->numRanges() > 0);
michael@0 89
michael@0 90 if (mir->shouldCancel("LSRA Allocate Registers (main loop)"))
michael@0 91 return false;
michael@0 92
michael@0 93 CodePosition position = current->start();
michael@0 94 const Requirement *req = current->requirement();
michael@0 95 const Requirement *hint = current->hint();
michael@0 96
michael@0 97 IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)",
michael@0 98 current->hasVreg() ? current->vreg() : 0, current->start().pos(),
michael@0 99 current->end().pos(), current->requirement()->priority());
michael@0 100
michael@0 101 // Shift active intervals to the inactive or handled sets as appropriate
michael@0 102 if (position != prevPosition) {
michael@0 103 JS_ASSERT(position > prevPosition);
michael@0 104 prevPosition = position;
michael@0 105
michael@0 106 for (IntervalIterator i(active.begin()); i != active.end(); ) {
michael@0 107 LiveInterval *it = *i;
michael@0 108 JS_ASSERT(it->numRanges() > 0);
michael@0 109
michael@0 110 if (it->end() <= position) {
michael@0 111 i = active.removeAt(i);
michael@0 112 finishInterval(it);
michael@0 113 } else if (!it->covers(position)) {
michael@0 114 i = active.removeAt(i);
michael@0 115 inactive.pushBack(it);
michael@0 116 } else {
michael@0 117 i++;
michael@0 118 }
michael@0 119 }
michael@0 120
michael@0 121 // Shift inactive intervals to the active or handled sets as appropriate
michael@0 122 for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
michael@0 123 LiveInterval *it = *i;
michael@0 124 JS_ASSERT(it->numRanges() > 0);
michael@0 125
michael@0 126 if (it->end() <= position) {
michael@0 127 i = inactive.removeAt(i);
michael@0 128 finishInterval(it);
michael@0 129 } else if (it->covers(position)) {
michael@0 130 i = inactive.removeAt(i);
michael@0 131 active.pushBack(it);
michael@0 132 } else {
michael@0 133 i++;
michael@0 134 }
michael@0 135 }
michael@0 136 }
michael@0 137
michael@0 138 // Sanity check all intervals in all sets
michael@0 139 validateIntervals();
michael@0 140
michael@0 141 // If the interval has a hard requirement, grant it.
michael@0 142 if (req->kind() == Requirement::FIXED) {
michael@0 143 JS_ASSERT(!req->allocation().isRegister());
michael@0 144 if (!assign(req->allocation()))
michael@0 145 return false;
michael@0 146 continue;
michael@0 147 }
michael@0 148
michael@0 149 // If we don't really need this in a register, don't allocate one
michael@0 150 if (req->kind() != Requirement::REGISTER && hint->kind() == Requirement::NONE) {
michael@0 151 IonSpew(IonSpew_RegAlloc, " Eagerly spilling virtual register %d",
michael@0 152 current->hasVreg() ? current->vreg() : 0);
michael@0 153 if (!spill())
michael@0 154 return false;
michael@0 155 continue;
michael@0 156 }
michael@0 157
michael@0 158 // Try to allocate a free register
michael@0 159 IonSpew(IonSpew_RegAlloc, " Attempting free register allocation");
michael@0 160 CodePosition bestFreeUntil;
michael@0 161 AnyRegister::Code bestCode = findBestFreeRegister(&bestFreeUntil);
michael@0 162 if (bestCode != AnyRegister::Invalid) {
michael@0 163 AnyRegister best = AnyRegister::FromCode(bestCode);
michael@0 164 IonSpew(IonSpew_RegAlloc, " Decided best register was %s", best.name());
michael@0 165
michael@0 166 // Split when the register is next needed if necessary
michael@0 167 if (bestFreeUntil < current->end()) {
michael@0 168 if (!splitInterval(current, bestFreeUntil))
michael@0 169 return false;
michael@0 170 }
michael@0 171 if (!assign(LAllocation(best)))
michael@0 172 return false;
michael@0 173
michael@0 174 continue;
michael@0 175 }
michael@0 176
michael@0 177 IonSpew(IonSpew_RegAlloc, " Attempting blocked register allocation");
michael@0 178
michael@0 179 // If we absolutely need a register or our next use is closer than the
michael@0 180 // selected blocking register then we spill the blocker. Otherwise, we
michael@0 181 // spill the current interval.
michael@0 182 CodePosition bestNextUsed;
michael@0 183 bestCode = findBestBlockedRegister(&bestNextUsed);
michael@0 184 if (bestCode != AnyRegister::Invalid &&
michael@0 185 (req->kind() == Requirement::REGISTER || hint->pos() < bestNextUsed))
michael@0 186 {
michael@0 187 AnyRegister best = AnyRegister::FromCode(bestCode);
michael@0 188 IonSpew(IonSpew_RegAlloc, " Decided best register was %s", best.name());
michael@0 189
michael@0 190 if (!splitBlockingIntervals(LAllocation(best)))
michael@0 191 return false;
michael@0 192 if (!assign(LAllocation(best)))
michael@0 193 return false;
michael@0 194
michael@0 195 continue;
michael@0 196 }
michael@0 197
michael@0 198 IonSpew(IonSpew_RegAlloc, " No registers available to spill");
michael@0 199 JS_ASSERT(req->kind() == Requirement::NONE);
michael@0 200
michael@0 201 if (!spill())
michael@0 202 return false;
michael@0 203 }
michael@0 204
michael@0 205 validateAllocations();
michael@0 206 validateVirtualRegisters();
michael@0 207
michael@0 208 return true;
michael@0 209 }
michael@0 210
michael@0 211 /*
michael@0 212 * This function iterates over control flow edges in the function and resolves
michael@0 213 * conflicts wherein two predecessors of a block have different allocations
michael@0 214 * for a virtual register than the block itself. It also turns phis into moves.
michael@0 215 *
michael@0 216 * The algorithm is based on the one published in "Linear Scan Register
michael@0 217 * Allocation on SSA Form" by C. Wimmer et al., for which the full citation
michael@0 218 * appears above.
michael@0 219 */
michael@0 220 bool
michael@0 221 LinearScanAllocator::resolveControlFlow()
michael@0 222 {
michael@0 223 for (size_t i = 0; i < graph.numBlocks(); i++) {
michael@0 224 if (mir->shouldCancel("LSRA Resolve Control Flow (main loop)"))
michael@0 225 return false;
michael@0 226
michael@0 227 LBlock *successor = graph.getBlock(i);
michael@0 228 MBasicBlock *mSuccessor = successor->mir();
michael@0 229 if (mSuccessor->numPredecessors() < 1)
michael@0 230 continue;
michael@0 231
michael@0 232 // Resolve phis to moves
michael@0 233 for (size_t j = 0; j < successor->numPhis(); j++) {
michael@0 234 LPhi *phi = successor->getPhi(j);
michael@0 235 JS_ASSERT(phi->numDefs() == 1);
michael@0 236 LDefinition *def = phi->getDef(0);
michael@0 237 LinearScanVirtualRegister *vreg = &vregs[def];
michael@0 238 LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
michael@0 239 JS_ASSERT(to);
michael@0 240
michael@0 241 for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
michael@0 242 LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
michael@0 243 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
michael@0 244
michael@0 245 LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
michael@0 246 LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
michael@0 247 JS_ASSERT(from);
michael@0 248
michael@0 249 if (!moveAtExit(predecessor, from, to, def->type()))
michael@0 250 return false;
michael@0 251 }
michael@0 252
michael@0 253 if (vreg->mustSpillAtDefinition() && !to->isSpill()) {
michael@0 254 // Make sure this phi is spilled at the loop header.
michael@0 255 LMoveGroup *moves = successor->getEntryMoveGroup(alloc());
michael@0 256 if (!moves->add(to->getAllocation(), vregs[to->vreg()].canonicalSpill(),
michael@0 257 def->type()))
michael@0 258 return false;
michael@0 259 }
michael@0 260 }
michael@0 261
michael@0 262 // Resolve split intervals with moves
michael@0 263 BitSet *live = liveIn[mSuccessor->id()];
michael@0 264
michael@0 265 for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
michael@0 266 LinearScanVirtualRegister *vreg = &vregs[*liveRegId];
michael@0 267 LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
michael@0 268 JS_ASSERT(to);
michael@0 269
michael@0 270 for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
michael@0 271 LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
michael@0 272 LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId()));
michael@0 273 JS_ASSERT(from);
michael@0 274
michael@0 275 if (*from->getAllocation() == *to->getAllocation())
michael@0 276 continue;
michael@0 277
michael@0 278 // If this value is spilled at its definition, other stores
michael@0 279 // are redundant.
michael@0 280 if (vreg->mustSpillAtDefinition() && to->getAllocation()->isStackSlot()) {
michael@0 281 JS_ASSERT(vreg->canonicalSpill());
michael@0 282 JS_ASSERT(*vreg->canonicalSpill() == *to->getAllocation());
michael@0 283 continue;
michael@0 284 }
michael@0 285
michael@0 286 if (mSuccessor->numPredecessors() > 1) {
michael@0 287 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
michael@0 288 if (!moveAtExit(predecessor, from, to, vreg->type()))
michael@0 289 return false;
michael@0 290 } else {
michael@0 291 if (!moveAtEntry(successor, from, to, vreg->type()))
michael@0 292 return false;
michael@0 293 }
michael@0 294 }
michael@0 295 }
michael@0 296 }
michael@0 297
michael@0 298 return true;
michael@0 299 }
michael@0 300
michael@0 301 bool
michael@0 302 LinearScanAllocator::moveInputAlloc(CodePosition pos, LAllocation *from, LAllocation *to,
michael@0 303 LDefinition::Type type)
michael@0 304 {
michael@0 305 if (*from == *to)
michael@0 306 return true;
michael@0 307 LMoveGroup *moves = getInputMoveGroup(pos);
michael@0 308 return moves->add(from, to, type);
michael@0 309 }
michael@0 310
michael@0 311 static inline void
michael@0 312 SetOsiPointUses(LiveInterval *interval, CodePosition defEnd, const LAllocation &allocation)
michael@0 313 {
michael@0 314 // Moves are inserted after OsiPoint instructions. This function sets
michael@0 315 // any OsiPoint uses of this interval to the allocation of the value
michael@0 316 // before the move.
michael@0 317
michael@0 318 JS_ASSERT(interval->index() == 0);
michael@0 319
michael@0 320 for (UsePositionIterator usePos(interval->usesBegin());
michael@0 321 usePos != interval->usesEnd();
michael@0 322 usePos++)
michael@0 323 {
michael@0 324 if (usePos->pos > defEnd)
michael@0 325 break;
michael@0 326 *static_cast<LAllocation *>(usePos->use) = allocation;
michael@0 327 }
michael@0 328 }
michael@0 329
michael@0 330 /*
michael@0 331 * This function takes the allocations assigned to the live intervals and
michael@0 332 * erases all virtual registers in the function with the allocations
michael@0 333 * corresponding to the appropriate interval.
michael@0 334 */
michael@0 335 bool
michael@0 336 LinearScanAllocator::reifyAllocations()
michael@0 337 {
michael@0 338 // Iterate over each interval, ensuring that definitions are visited before uses.
michael@0 339 for (size_t j = 1; j < graph.numVirtualRegisters(); j++) {
michael@0 340 LinearScanVirtualRegister *reg = &vregs[j];
michael@0 341 if (mir->shouldCancel("LSRA Reification (main loop)"))
michael@0 342 return false;
michael@0 343
michael@0 344 for (size_t k = 0; k < reg->numIntervals(); k++) {
michael@0 345 LiveInterval *interval = reg->getInterval(k);
michael@0 346 JS_ASSERT(reg == &vregs[interval->vreg()]);
michael@0 347 if (!interval->numRanges())
michael@0 348 continue;
michael@0 349
michael@0 350 UsePositionIterator usePos(interval->usesBegin());
michael@0 351 for (; usePos != interval->usesEnd(); usePos++) {
michael@0 352 if (usePos->use->isFixedRegister()) {
michael@0 353 LiveInterval *to = fixedIntervals[GetFixedRegister(reg->def(), usePos->use).code()];
michael@0 354
michael@0 355 *static_cast<LAllocation *>(usePos->use) = *to->getAllocation();
michael@0 356 if (!moveInput(usePos->pos, interval, to, reg->type()))
michael@0 357 return false;
michael@0 358 } else {
michael@0 359 JS_ASSERT(UseCompatibleWith(usePos->use, *interval->getAllocation()));
michael@0 360 *static_cast<LAllocation *>(usePos->use) = *interval->getAllocation();
michael@0 361 }
michael@0 362 }
michael@0 363
michael@0 364 // Erase the def of this interval if it's the first one
michael@0 365 if (interval->index() == 0)
michael@0 366 {
michael@0 367 LDefinition *def = reg->def();
michael@0 368 LAllocation *spillFrom;
michael@0 369
michael@0 370 // Insert the moves after any OsiPoint or Nop instructions
michael@0 371 // following this one. See minimalDefEnd for more info.
michael@0 372 CodePosition defEnd = minimalDefEnd(reg->ins());
michael@0 373
michael@0 374 if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) {
michael@0 375 AnyRegister fixedReg = def->output()->toRegister();
michael@0 376 LiveInterval *from = fixedIntervals[fixedReg.code()];
michael@0 377
michael@0 378 // If we insert the move after an OsiPoint that uses this vreg,
michael@0 379 // it should use the fixed register instead.
michael@0 380 SetOsiPointUses(interval, defEnd, LAllocation(fixedReg));
michael@0 381
michael@0 382 if (!moveAfter(defEnd, from, interval, def->type()))
michael@0 383 return false;
michael@0 384 spillFrom = from->getAllocation();
michael@0 385 } else {
michael@0 386 if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
michael@0 387 LAllocation *inputAlloc = reg->ins()->getOperand(def->getReusedInput());
michael@0 388 LAllocation *origAlloc = LAllocation::New(alloc(), *inputAlloc);
michael@0 389
michael@0 390 JS_ASSERT(!inputAlloc->isUse());
michael@0 391
michael@0 392 *inputAlloc = *interval->getAllocation();
michael@0 393 if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, inputAlloc, def->type()))
michael@0 394 return false;
michael@0 395 }
michael@0 396
michael@0 397 JS_ASSERT(DefinitionCompatibleWith(reg->ins(), def, *interval->getAllocation()));
michael@0 398 def->setOutput(*interval->getAllocation());
michael@0 399
michael@0 400 spillFrom = interval->getAllocation();
michael@0 401 }
michael@0 402
michael@0 403 if (reg->ins()->recoversInput()) {
michael@0 404 LSnapshot *snapshot = reg->ins()->snapshot();
michael@0 405 for (size_t i = 0; i < snapshot->numEntries(); i++) {
michael@0 406 LAllocation *entry = snapshot->getEntry(i);
michael@0 407 if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
michael@0 408 *entry = *def->output();
michael@0 409 }
michael@0 410 }
michael@0 411
michael@0 412 if (reg->mustSpillAtDefinition() && !reg->ins()->isPhi() &&
michael@0 413 (*reg->canonicalSpill() != *spillFrom))
michael@0 414 {
michael@0 415 // If we move the spill after an OsiPoint, the OsiPoint should
michael@0 416 // use the original location instead.
michael@0 417 SetOsiPointUses(interval, defEnd, *spillFrom);
michael@0 418
michael@0 419 // Insert a spill after this instruction (or after any OsiPoint
michael@0 420 // or Nop instructions). Note that we explicitly ignore phis,
michael@0 421 // which should have been handled in resolveControlFlow().
michael@0 422 LMoveGroup *moves = getMoveGroupAfter(defEnd);
michael@0 423 if (!moves->add(spillFrom, reg->canonicalSpill(), def->type()))
michael@0 424 return false;
michael@0 425 }
michael@0 426 }
michael@0 427 else if (interval->start() > inputOf(insData[interval->start()].block()->firstId()) &&
michael@0 428 (!reg->canonicalSpill() ||
michael@0 429 (reg->canonicalSpill() == interval->getAllocation() &&
michael@0 430 !reg->mustSpillAtDefinition()) ||
michael@0 431 *reg->canonicalSpill() != *interval->getAllocation()))
michael@0 432 {
michael@0 433 // If this virtual register has no canonical spill location, this
michael@0 434 // is the first spill to that location, or this is a move to somewhere
michael@0 435 // completely different, we have to emit a move for this interval.
michael@0 436 // Don't do this if the interval starts at the first instruction of the
michael@0 437 // block; this case should have been handled by resolveControlFlow().
michael@0 438 //
michael@0 439 // If the interval starts at the output half of an instruction, we have to
michael@0 440 // emit the move *after* this instruction, to prevent clobbering an input
michael@0 441 // register.
michael@0 442 LiveInterval *prevInterval = reg->getInterval(interval->index() - 1);
michael@0 443 CodePosition start = interval->start();
michael@0 444 InstructionData *data = &insData[start];
michael@0 445
michael@0 446 JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins()));
michael@0 447
michael@0 448 if (start.subpos() == CodePosition::INPUT) {
michael@0 449 if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type()))
michael@0 450 return false;
michael@0 451 } else {
michael@0 452 if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type()))
michael@0 453 return false;
michael@0 454 }
michael@0 455
michael@0 456 // Mark this interval's spill position, if needed.
michael@0 457 if (reg->canonicalSpill() == interval->getAllocation() &&
michael@0 458 !reg->mustSpillAtDefinition())
michael@0 459 {
michael@0 460 reg->setSpillPosition(interval->start());
michael@0 461 }
michael@0 462 }
michael@0 463
michael@0 464 addLiveRegistersForInterval(reg, interval);
michael@0 465 }} // Iteration over virtual register intervals.
michael@0 466
michael@0 467 // Set the graph overall stack height
michael@0 468 graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
michael@0 469
michael@0 470 return true;
michael@0 471 }
michael@0 472
michael@0 473 inline bool
michael@0 474 LinearScanAllocator::isSpilledAt(LiveInterval *interval, CodePosition pos)
michael@0 475 {
michael@0 476 LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 477 if (!reg->canonicalSpill() || !reg->canonicalSpill()->isStackSlot())
michael@0 478 return false;
michael@0 479
michael@0 480 if (reg->mustSpillAtDefinition()) {
michael@0 481 JS_ASSERT(reg->spillPosition() <= pos);
michael@0 482 return true;
michael@0 483 }
michael@0 484
michael@0 485 return interval->getAllocation() == reg->canonicalSpill();
michael@0 486 }
michael@0 487
michael@0 488 bool
michael@0 489 LinearScanAllocator::populateSafepoints()
michael@0 490 {
michael@0 491 size_t firstSafepoint = 0;
michael@0 492
michael@0 493 for (uint32_t i = 0; i < vregs.numVirtualRegisters(); i++) {
michael@0 494 LinearScanVirtualRegister *reg = &vregs[i];
michael@0 495
michael@0 496 if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
michael@0 497 continue;
michael@0 498
michael@0 499 firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
michael@0 500 if (firstSafepoint >= graph.numSafepoints())
michael@0 501 break;
michael@0 502
michael@0 503 // Find the furthest endpoint.
michael@0 504 size_t lastInterval = reg->numIntervals() - 1;
michael@0 505 CodePosition end = reg->getInterval(lastInterval)->end();
michael@0 506
michael@0 507 for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
michael@0 508 LInstruction *ins = graph.getSafepoint(j);
michael@0 509
michael@0 510 // Stop processing safepoints if we know we're out of this virtual
michael@0 511 // register's range.
michael@0 512 if (end < inputOf(ins))
michael@0 513 break;
michael@0 514
michael@0 515 // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
michael@0 516 // is not used with gcthings or nunboxes, or we would have to add the input reg
michael@0 517 // to this safepoint.
michael@0 518 if (ins == reg->ins() && !reg->isTemp()) {
michael@0 519 DebugOnly<LDefinition*> def = reg->def();
michael@0 520 JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
michael@0 521 def->type() == LDefinition::GENERAL ||
michael@0 522 def->type() == LDefinition::INT32 ||
michael@0 523 def->type() == LDefinition::FLOAT32 ||
michael@0 524 def->type() == LDefinition::DOUBLE);
michael@0 525 continue;
michael@0 526 }
michael@0 527
michael@0 528 LSafepoint *safepoint = ins->safepoint();
michael@0 529
michael@0 530 if (IsSlotsOrElements(reg)) {
michael@0 531 LiveInterval *interval = reg->intervalFor(inputOf(ins));
michael@0 532 if (!interval)
michael@0 533 continue;
michael@0 534
michael@0 535 LAllocation *a = interval->getAllocation();
michael@0 536 if (a->isGeneralReg() && !ins->isCall())
michael@0 537 safepoint->addSlotsOrElementsRegister(a->toGeneralReg()->reg());
michael@0 538
michael@0 539 if (isSpilledAt(interval, inputOf(ins))) {
michael@0 540 if (!safepoint->addSlotsOrElementsSlot(reg->canonicalSpillSlot()))
michael@0 541 return false;
michael@0 542 }
michael@0 543 } else if (!IsNunbox(reg)) {
michael@0 544 JS_ASSERT(IsTraceable(reg));
michael@0 545
michael@0 546 LiveInterval *interval = reg->intervalFor(inputOf(ins));
michael@0 547 if (!interval)
michael@0 548 continue;
michael@0 549
michael@0 550 LAllocation *a = interval->getAllocation();
michael@0 551 if (a->isGeneralReg() && !ins->isCall()) {
michael@0 552 #ifdef JS_PUNBOX64
michael@0 553 if (reg->type() == LDefinition::BOX) {
michael@0 554 safepoint->addValueRegister(a->toGeneralReg()->reg());
michael@0 555 } else
michael@0 556 #endif
michael@0 557 {
michael@0 558 safepoint->addGcRegister(a->toGeneralReg()->reg());
michael@0 559 }
michael@0 560 }
michael@0 561
michael@0 562 if (isSpilledAt(interval, inputOf(ins))) {
michael@0 563 #ifdef JS_PUNBOX64
michael@0 564 if (reg->type() == LDefinition::BOX) {
michael@0 565 if (!safepoint->addValueSlot(reg->canonicalSpillSlot()))
michael@0 566 return false;
michael@0 567 } else
michael@0 568 #endif
michael@0 569 {
michael@0 570 if (!safepoint->addGcSlot(reg->canonicalSpillSlot()))
michael@0 571 return false;
michael@0 572 }
michael@0 573 }
michael@0 574 #ifdef JS_NUNBOX32
michael@0 575 } else {
michael@0 576 LinearScanVirtualRegister *other = otherHalfOfNunbox(reg);
michael@0 577 LinearScanVirtualRegister *type = (reg->type() == LDefinition::TYPE) ? reg : other;
michael@0 578 LinearScanVirtualRegister *payload = (reg->type() == LDefinition::PAYLOAD) ? reg : other;
michael@0 579 LiveInterval *typeInterval = type->intervalFor(inputOf(ins));
michael@0 580 LiveInterval *payloadInterval = payload->intervalFor(inputOf(ins));
michael@0 581
michael@0 582 if (!typeInterval && !payloadInterval)
michael@0 583 continue;
michael@0 584
michael@0 585 LAllocation *typeAlloc = typeInterval->getAllocation();
michael@0 586 LAllocation *payloadAlloc = payloadInterval->getAllocation();
michael@0 587
michael@0 588 // If the payload is an argument, we'll scan that explicitly as
michael@0 589 // part of the frame. It is therefore safe to not add any
michael@0 590 // safepoint entry, as long as the vreg does not have a stack
michael@0 591 // slot as canonical spill slot.
michael@0 592 if (payloadAlloc->isArgument() &&
michael@0 593 (!payload->canonicalSpill() || payload->canonicalSpill() == payloadAlloc))
michael@0 594 {
michael@0 595 continue;
michael@0 596 }
michael@0 597
michael@0 598 if (isSpilledAt(typeInterval, inputOf(ins)) &&
michael@0 599 isSpilledAt(payloadInterval, inputOf(ins)))
michael@0 600 {
michael@0 601 // These two components of the Value are spilled
michael@0 602 // contiguously, so simply keep track of the base slot.
michael@0 603 uint32_t payloadSlot = payload->canonicalSpillSlot();
michael@0 604 uint32_t slot = BaseOfNunboxSlot(LDefinition::PAYLOAD, payloadSlot);
michael@0 605 if (!safepoint->addValueSlot(slot))
michael@0 606 return false;
michael@0 607 }
michael@0 608
michael@0 609 if (!ins->isCall() &&
michael@0 610 (!isSpilledAt(typeInterval, inputOf(ins)) || payloadAlloc->isGeneralReg()))
michael@0 611 {
michael@0 612 // Either the payload is on the stack but the type is
michael@0 613 // in a register, or the payload is in a register. In
michael@0 614 // both cases, we don't have a contiguous spill so we
michael@0 615 // add a torn entry.
michael@0 616 if (!safepoint->addNunboxParts(*typeAlloc, *payloadAlloc))
michael@0 617 return false;
michael@0 618
michael@0 619 // If the nunbox is stored in multiple places, we need to
michael@0 620 // trace all of them to allow the GC to relocate objects.
michael@0 621 if (payloadAlloc->isGeneralReg() && isSpilledAt(payloadInterval, inputOf(ins))) {
michael@0 622 if (!safepoint->addNunboxParts(*typeAlloc, *payload->canonicalSpill()))
michael@0 623 return false;
michael@0 624 }
michael@0 625 }
michael@0 626 #endif
michael@0 627 }
michael@0 628 }
michael@0 629
michael@0 630 #ifdef JS_NUNBOX32
michael@0 631 if (IsNunbox(reg)) {
michael@0 632 // Skip past the next half of this nunbox so we don't track the
michael@0 633 // same slot twice.
michael@0 634 JS_ASSERT(&vregs[reg->def()->virtualRegister() + 1] == otherHalfOfNunbox(reg));
michael@0 635 i++;
michael@0 636 }
michael@0 637 #endif
michael@0 638 }
michael@0 639
michael@0 640 return true;
michael@0 641 }
michael@0 642
michael@0 643 /*
michael@0 644 * Split the given interval at the given position, and add the created
michael@0 645 * interval to the unhandled queue.
michael@0 646 */
michael@0 647 bool
michael@0 648 LinearScanAllocator::splitInterval(LiveInterval *interval, CodePosition pos)
michael@0 649 {
michael@0 650 // Make sure we're actually splitting this interval, not some other
michael@0 651 // interval in the same virtual register.
michael@0 652 JS_ASSERT(interval->start() < pos && pos < interval->end());
michael@0 653
michael@0 654 LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 655
michael@0 656 // "Bogus" intervals cannot be split.
michael@0 657 JS_ASSERT(reg);
michael@0 658
michael@0 659 // Do the split.
michael@0 660 LiveInterval *newInterval = LiveInterval::New(alloc(), interval->vreg(), interval->index() + 1);
michael@0 661 if (!interval->splitFrom(pos, newInterval))
michael@0 662 return false;
michael@0 663
michael@0 664 JS_ASSERT(interval->numRanges() > 0);
michael@0 665 JS_ASSERT(newInterval->numRanges() > 0);
michael@0 666
michael@0 667 if (!reg->addInterval(newInterval))
michael@0 668 return false;
michael@0 669
michael@0 670 IonSpew(IonSpew_RegAlloc, " Split interval to %u = [%u, %u]/[%u, %u]",
michael@0 671 interval->vreg(), interval->start().pos(),
michael@0 672 interval->end().pos(), newInterval->start().pos(),
michael@0 673 newInterval->end().pos());
michael@0 674
michael@0 675 // We always want to enqueue the resulting split. We always split
michael@0 676 // forward, and we never want to handle something forward of our
michael@0 677 // current position.
michael@0 678 setIntervalRequirement(newInterval);
michael@0 679
michael@0 680 // splitInterval() is usually called to split the node that has just been
michael@0 681 // popped from the unhandled queue. Therefore the split will likely be
michael@0 682 // closer to the lower start positions in the queue.
michael@0 683 unhandled.enqueueBackward(newInterval);
michael@0 684
michael@0 685 return true;
michael@0 686 }
michael@0 687
michael@0 688 bool
michael@0 689 LinearScanAllocator::splitBlockingIntervals(LAllocation allocation)
michael@0 690 {
michael@0 691 JS_ASSERT(allocation.isRegister());
michael@0 692
michael@0 693 // Split current before the next fixed use.
michael@0 694 LiveInterval *fixed = fixedIntervals[allocation.toRegister().code()];
michael@0 695 if (fixed->numRanges() > 0) {
michael@0 696 CodePosition fixedPos = current->intersect(fixed);
michael@0 697 if (fixedPos != CodePosition::MIN) {
michael@0 698 JS_ASSERT(fixedPos > current->start());
michael@0 699 JS_ASSERT(fixedPos < current->end());
michael@0 700 if (!splitInterval(current, fixedPos))
michael@0 701 return false;
michael@0 702 }
michael@0 703 }
michael@0 704
michael@0 705 // Split the blocking interval if it exists.
michael@0 706 for (IntervalIterator i(active.begin()); i != active.end(); i++) {
michael@0 707 if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
michael@0 708 IonSpew(IonSpew_RegAlloc, " Splitting active interval %u = [%u, %u]",
michael@0 709 vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos());
michael@0 710
michael@0 711 JS_ASSERT(i->start() != current->start());
michael@0 712 JS_ASSERT(i->covers(current->start()));
michael@0 713 JS_ASSERT(i->start() != current->start());
michael@0 714
michael@0 715 if (!splitInterval(*i, current->start()))
michael@0 716 return false;
michael@0 717
michael@0 718 LiveInterval *it = *i;
michael@0 719 active.removeAt(i);
michael@0 720 finishInterval(it);
michael@0 721 break;
michael@0 722 }
michael@0 723 }
michael@0 724
michael@0 725 // Split any inactive intervals at the next live point.
michael@0 726 for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
michael@0 727 if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
michael@0 728 IonSpew(IonSpew_RegAlloc, " Splitting inactive interval %u = [%u, %u]",
michael@0 729 vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos());
michael@0 730
michael@0 731 LiveInterval *it = *i;
michael@0 732 CodePosition nextActive = it->nextCoveredAfter(current->start());
michael@0 733 JS_ASSERT(nextActive != CodePosition::MIN);
michael@0 734
michael@0 735 if (!splitInterval(it, nextActive))
michael@0 736 return false;
michael@0 737
michael@0 738 i = inactive.removeAt(i);
michael@0 739 finishInterval(it);
michael@0 740 } else {
michael@0 741 i++;
michael@0 742 }
michael@0 743 }
michael@0 744
michael@0 745 return true;
michael@0 746 }
michael@0 747
michael@0 748 /*
michael@0 749 * Assign the current interval the given allocation, splitting conflicting
michael@0 750 * intervals as necessary to make the allocation stick.
michael@0 751 */
michael@0 752 bool
michael@0 753 LinearScanAllocator::assign(LAllocation allocation)
michael@0 754 {
michael@0 755 if (allocation.isRegister())
michael@0 756 IonSpew(IonSpew_RegAlloc, "Assigning register %s", allocation.toRegister().name());
michael@0 757 current->setAllocation(allocation);
michael@0 758
michael@0 759 // Split this interval at the next incompatible one
michael@0 760 LinearScanVirtualRegister *reg = &vregs[current->vreg()];
michael@0 761 if (reg) {
michael@0 762 CodePosition splitPos = current->firstIncompatibleUse(allocation);
michael@0 763 if (splitPos != CodePosition::MAX) {
michael@0 764 // Split before the incompatible use. This ensures the use position is
michael@0 765 // part of the second half of the interval and guarantees we never split
michael@0 766 // at the end (zero-length intervals are invalid).
michael@0 767 splitPos = splitPos.previous();
michael@0 768 JS_ASSERT (splitPos < current->end());
michael@0 769 if (!splitInterval(current, splitPos))
michael@0 770 return false;
michael@0 771 }
michael@0 772 }
michael@0 773
michael@0 774 bool useAsCanonicalSpillSlot = allocation.isMemory();
michael@0 775 // Only canonically spill argument values when frame arguments are not
michael@0 776 // modified in the body.
michael@0 777 if (mir->modifiesFrameArguments())
michael@0 778 useAsCanonicalSpillSlot = allocation.isStackSlot();
michael@0 779
michael@0 780 if (reg && useAsCanonicalSpillSlot) {
michael@0 781 if (reg->canonicalSpill()) {
michael@0 782 JS_ASSERT(allocation == *reg->canonicalSpill());
michael@0 783
michael@0 784 // This interval is spilled more than once, so just always spill
michael@0 785 // it at its definition.
michael@0 786 reg->setSpillAtDefinition(outputOf(reg->ins()));
michael@0 787 } else {
michael@0 788 reg->setCanonicalSpill(current->getAllocation());
michael@0 789
michael@0 790 // If this spill is inside a loop, and the definition is outside
michael@0 791 // the loop, instead move the spill to outside the loop.
michael@0 792 InstructionData *other = &insData[current->start()];
michael@0 793 uint32_t loopDepthAtDef = reg->block()->mir()->loopDepth();
michael@0 794 uint32_t loopDepthAtSpill = other->block()->mir()->loopDepth();
michael@0 795 if (loopDepthAtSpill > loopDepthAtDef)
michael@0 796 reg->setSpillAtDefinition(outputOf(reg->ins()));
michael@0 797 }
michael@0 798 }
michael@0 799
michael@0 800 active.pushBack(current);
michael@0 801
michael@0 802 return true;
michael@0 803 }
michael@0 804
michael@0 805 uint32_t
michael@0 806 LinearScanAllocator::allocateSlotFor(const LiveInterval *interval)
michael@0 807 {
michael@0 808 LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 809
michael@0 810 SlotList *freed;
michael@0 811 if (reg->type() == LDefinition::DOUBLE)
michael@0 812 freed = &finishedDoubleSlots_;
michael@0 813 #if JS_BITS_PER_WORD == 64
michael@0 814 else if (reg->type() == LDefinition::GENERAL ||
michael@0 815 reg->type() == LDefinition::OBJECT ||
michael@0 816 reg->type() == LDefinition::SLOTS)
michael@0 817 freed = &finishedDoubleSlots_;
michael@0 818 #endif
michael@0 819 #ifdef JS_PUNBOX64
michael@0 820 else if (reg->type() == LDefinition::BOX)
michael@0 821 freed = &finishedDoubleSlots_;
michael@0 822 #endif
michael@0 823 #ifdef JS_NUNBOX32
michael@0 824 else if (IsNunbox(reg))
michael@0 825 freed = &finishedNunboxSlots_;
michael@0 826 #endif
michael@0 827 else
michael@0 828 freed = &finishedSlots_;
michael@0 829
michael@0 830 if (!freed->empty()) {
michael@0 831 LiveInterval *maybeDead = freed->back();
michael@0 832 if (maybeDead->end() < reg->getInterval(0)->start()) {
michael@0 833 // This spill slot is dead before the start of the interval trying
michael@0 834 // to reuse the slot, so reuse is safe. Otherwise, we could
michael@0 835 // encounter a situation where a stack slot is allocated and freed
michael@0 836 // inside a loop, but the same allocation is then used to hold a
michael@0 837 // loop-carried value.
michael@0 838 //
michael@0 839 // Note that we don't reuse the dead slot if its interval ends right
michael@0 840 // before the current interval, to avoid conflicting slot -> reg and
michael@0 841 // reg -> slot moves in the same movegroup.
michael@0 842 freed->popBack();
michael@0 843 LinearScanVirtualRegister *dead = &vregs[maybeDead->vreg()];
michael@0 844 #ifdef JS_NUNBOX32
michael@0 845 if (IsNunbox(dead))
michael@0 846 return BaseOfNunboxSlot(dead->type(), dead->canonicalSpillSlot());
michael@0 847 #endif
michael@0 848 return dead->canonicalSpillSlot();
michael@0 849 }
michael@0 850 }
michael@0 851
michael@0 852 return stackSlotAllocator.allocateSlot(reg->type());
michael@0 853 }
michael@0 854
michael@0 855 bool
michael@0 856 LinearScanAllocator::spill()
michael@0 857 {
michael@0 858 IonSpew(IonSpew_RegAlloc, " Decided to spill current interval");
michael@0 859
michael@0 860 // We can't spill bogus intervals
michael@0 861 JS_ASSERT(current->hasVreg());
michael@0 862
michael@0 863 LinearScanVirtualRegister *reg = &vregs[current->vreg()];
michael@0 864
michael@0 865 if (reg->canonicalSpill()) {
michael@0 866 IonSpew(IonSpew_RegAlloc, " Allocating canonical spill location");
michael@0 867
michael@0 868 return assign(*reg->canonicalSpill());
michael@0 869 }
michael@0 870
michael@0 871 uint32_t stackSlot;
michael@0 872 #if defined JS_NUNBOX32
michael@0 873 if (IsNunbox(reg)) {
michael@0 874 LinearScanVirtualRegister *other = otherHalfOfNunbox(reg);
michael@0 875
michael@0 876 if (other->canonicalSpill()) {
michael@0 877 // The other half of this nunbox already has a spill slot. To
michael@0 878 // ensure the Value is spilled contiguously, use the other half (it
michael@0 879 // was allocated double-wide).
michael@0 880 JS_ASSERT(other->canonicalSpill()->isStackSlot());
michael@0 881 stackSlot = BaseOfNunboxSlot(other->type(), other->canonicalSpillSlot());
michael@0 882 } else {
michael@0 883 // No canonical spill location exists for this nunbox yet. Allocate
michael@0 884 // one.
michael@0 885 stackSlot = allocateSlotFor(current);
michael@0 886 }
michael@0 887 stackSlot -= OffsetOfNunboxSlot(reg->type());
michael@0 888 } else
michael@0 889 #endif
michael@0 890 {
michael@0 891 stackSlot = allocateSlotFor(current);
michael@0 892 }
michael@0 893 JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
michael@0 894
michael@0 895 return assign(LStackSlot(stackSlot));
michael@0 896 }
michael@0 897
michael@0 898 void
michael@0 899 LinearScanAllocator::freeAllocation(LiveInterval *interval, LAllocation *alloc)
michael@0 900 {
michael@0 901 LinearScanVirtualRegister *mine = &vregs[interval->vreg()];
michael@0 902 if (!IsNunbox(mine)) {
michael@0 903 if (alloc->isStackSlot()) {
michael@0 904 if (mine->type() == LDefinition::DOUBLE)
michael@0 905 finishedDoubleSlots_.append(interval);
michael@0 906 #if JS_BITS_PER_WORD == 64
michael@0 907 else if (mine->type() == LDefinition::GENERAL ||
michael@0 908 mine->type() == LDefinition::OBJECT ||
michael@0 909 mine->type() == LDefinition::SLOTS)
michael@0 910 finishedDoubleSlots_.append(interval);
michael@0 911 #endif
michael@0 912 #ifdef JS_PUNBOX64
michael@0 913 else if (mine->type() == LDefinition::BOX)
michael@0 914 finishedDoubleSlots_.append(interval);
michael@0 915 #endif
michael@0 916 else
michael@0 917 finishedSlots_.append(interval);
michael@0 918 }
michael@0 919 return;
michael@0 920 }
michael@0 921
michael@0 922 #ifdef JS_NUNBOX32
michael@0 923 // Special handling for nunboxes. We can only free the stack slot once we
michael@0 924 // know both intervals have been finished.
michael@0 925 LinearScanVirtualRegister *other = otherHalfOfNunbox(mine);
michael@0 926 if (other->finished()) {
michael@0 927 if (!mine->canonicalSpill() && !other->canonicalSpill())
michael@0 928 return;
michael@0 929
michael@0 930 JS_ASSERT_IF(mine->canonicalSpill() && other->canonicalSpill(),
michael@0 931 mine->canonicalSpill()->isStackSlot() == other->canonicalSpill()->isStackSlot());
michael@0 932
michael@0 933 LinearScanVirtualRegister *candidate = mine->canonicalSpill() ? mine : other;
michael@0 934 if (!candidate->canonicalSpill()->isStackSlot())
michael@0 935 return;
michael@0 936
michael@0 937 finishedNunboxSlots_.append(candidate->lastInterval());
michael@0 938 }
michael@0 939 #endif
michael@0 940 }
michael@0 941
michael@0 942 void
michael@0 943 LinearScanAllocator::finishInterval(LiveInterval *interval)
michael@0 944 {
michael@0 945 LAllocation *alloc = interval->getAllocation();
michael@0 946 JS_ASSERT(!alloc->isUse());
michael@0 947
michael@0 948 // Toss out the bogus interval now that it's run its course
michael@0 949 if (!interval->hasVreg())
michael@0 950 return;
michael@0 951
michael@0 952 LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 953
michael@0 954 // All spills should be equal to the canonical spill location.
michael@0 955 JS_ASSERT_IF(alloc->isStackSlot(), *alloc == *reg->canonicalSpill());
michael@0 956
michael@0 957 bool lastInterval = interval->index() == (reg->numIntervals() - 1);
michael@0 958 if (lastInterval) {
michael@0 959 freeAllocation(interval, alloc);
michael@0 960 reg->setFinished();
michael@0 961 }
michael@0 962
michael@0 963 handled.pushBack(interval);
michael@0 964 }
michael@0 965
michael@0 966 /*
michael@0 967 * This function locates a register which may be assigned by the register
michael@0 968 * and is not assigned to any active interval. The register which is free
michael@0 969 * for the longest period of time is then returned.
michael@0 970 */
michael@0 971 AnyRegister::Code
michael@0 972 LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
michael@0 973 {
michael@0 974 IonSpew(IonSpew_RegAlloc, " Computing freeUntilPos");
michael@0 975
michael@0 976 // Compute free-until positions for all registers
michael@0 977 CodePosition freeUntilPos[AnyRegister::Total];
michael@0 978 bool needFloat = vregs[current->vreg()].isFloatReg();
michael@0 979 for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) {
michael@0 980 AnyRegister reg = regs.takeAny(needFloat);
michael@0 981 freeUntilPos[reg.code()] = CodePosition::MAX;
michael@0 982 }
michael@0 983 for (IntervalIterator i(active.begin()); i != active.end(); i++) {
michael@0 984 LAllocation *alloc = i->getAllocation();
michael@0 985 if (alloc->isRegister(needFloat)) {
michael@0 986 AnyRegister reg = alloc->toRegister();
michael@0 987 IonSpew(IonSpew_RegAlloc, " Register %s not free", reg.name());
michael@0 988 freeUntilPos[reg.code()] = CodePosition::MIN;
michael@0 989 }
michael@0 990 }
michael@0 991 for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
michael@0 992 LAllocation *alloc = i->getAllocation();
michael@0 993 if (alloc->isRegister(needFloat)) {
michael@0 994 AnyRegister reg = alloc->toRegister();
michael@0 995 CodePosition pos = current->intersect(*i);
michael@0 996 if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
michael@0 997 freeUntilPos[reg.code()] = pos;
michael@0 998 IonSpew(IonSpew_RegAlloc, " Register %s free until %u", reg.name(), pos.pos());
michael@0 999 }
michael@0 1000 }
michael@0 1001 }
michael@0 1002
michael@0 1003 CodePosition fixedPos = fixedIntervalsUnion->intersect(current);
michael@0 1004 if (fixedPos != CodePosition::MIN) {
michael@0 1005 for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) {
michael@0 1006 AnyRegister reg = i->getAllocation()->toRegister();
michael@0 1007 if (freeUntilPos[reg.code()] != CodePosition::MIN) {
michael@0 1008 CodePosition pos = current->intersect(*i);
michael@0 1009 if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
michael@0 1010 freeUntilPos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos;
michael@0 1011 IonSpew(IonSpew_RegAlloc, " Register %s free until %u", reg.name(), pos.pos());
michael@0 1012 }
michael@0 1013 }
michael@0 1014 }
michael@0 1015 }
michael@0 1016
michael@0 1017 AnyRegister::Code bestCode = AnyRegister::Invalid;
michael@0 1018 if (current->index()) {
michael@0 1019 // As an optimization, use the allocation from the previous interval if
michael@0 1020 // it is available.
michael@0 1021 LiveInterval *previous = vregs[current->vreg()].getInterval(current->index() - 1);
michael@0 1022 LAllocation *alloc = previous->getAllocation();
michael@0 1023 if (alloc->isRegister(needFloat)) {
michael@0 1024 AnyRegister prevReg = alloc->toRegister();
michael@0 1025 if (freeUntilPos[prevReg.code()] != CodePosition::MIN)
michael@0 1026 bestCode = prevReg.code();
michael@0 1027 }
michael@0 1028 }
michael@0 1029
michael@0 1030 // Assign the register suggested by the hint if it's free.
michael@0 1031 const Requirement *hint = current->hint();
michael@0 1032 if (hint->kind() == Requirement::FIXED && hint->allocation().isRegister()) {
michael@0 1033 AnyRegister hintReg = hint->allocation().toRegister();
michael@0 1034 if (freeUntilPos[hintReg.code()] > hint->pos())
michael@0 1035 bestCode = hintReg.code();
michael@0 1036 } else if (hint->kind() == Requirement::SAME_AS_OTHER) {
michael@0 1037 LiveInterval *other = vregs[hint->virtualRegister()].intervalFor(hint->pos());
michael@0 1038 if (other && other->getAllocation()->isRegister()) {
michael@0 1039 AnyRegister hintReg = other->getAllocation()->toRegister();
michael@0 1040 if (freeUntilPos[hintReg.code()] > hint->pos())
michael@0 1041 bestCode = hintReg.code();
michael@0 1042 }
michael@0 1043 }
michael@0 1044
michael@0 1045 if (bestCode == AnyRegister::Invalid) {
michael@0 1046 // If all else fails, search freeUntilPos for largest value
michael@0 1047 for (uint32_t i = 0; i < AnyRegister::Total; i++) {
michael@0 1048 if (freeUntilPos[i] == CodePosition::MIN)
michael@0 1049 continue;
michael@0 1050 if (bestCode == AnyRegister::Invalid || freeUntilPos[i] > freeUntilPos[bestCode])
michael@0 1051 bestCode = AnyRegister::Code(i);
michael@0 1052 }
michael@0 1053 }
michael@0 1054
michael@0 1055 if (bestCode != AnyRegister::Invalid)
michael@0 1056 *freeUntil = freeUntilPos[bestCode];
michael@0 1057 return bestCode;
michael@0 1058 }
michael@0 1059
michael@0 1060 /*
michael@0 1061 * This function locates a register which is assigned to an active interval,
michael@0 1062 * and returns the one with the furthest away next use. As a side effect,
michael@0 1063 * the nextUsePos array is updated with the next use position of all active
michael@0 1064 * intervals for use elsewhere in the algorithm.
michael@0 1065 */
michael@0 1066 AnyRegister::Code
michael@0 1067 LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
michael@0 1068 {
michael@0 1069 IonSpew(IonSpew_RegAlloc, " Computing nextUsePos");
michael@0 1070
michael@0 1071 // Compute next-used positions for all registers
michael@0 1072 CodePosition nextUsePos[AnyRegister::Total];
michael@0 1073 bool needFloat = vregs[current->vreg()].isFloatReg();
michael@0 1074 for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) {
michael@0 1075 AnyRegister reg = regs.takeAny(needFloat);
michael@0 1076 nextUsePos[reg.code()] = CodePosition::MAX;
michael@0 1077 }
michael@0 1078 for (IntervalIterator i(active.begin()); i != active.end(); i++) {
michael@0 1079 LAllocation *alloc = i->getAllocation();
michael@0 1080 if (alloc->isRegister(needFloat)) {
michael@0 1081 AnyRegister reg = alloc->toRegister();
michael@0 1082 if (i->start() == current->start()) {
michael@0 1083 nextUsePos[reg.code()] = CodePosition::MIN;
michael@0 1084 IonSpew(IonSpew_RegAlloc, " Disqualifying %s due to recency", reg.name());
michael@0 1085 } else if (nextUsePos[reg.code()] != CodePosition::MIN) {
michael@0 1086 nextUsePos[reg.code()] = i->nextUsePosAfter(current->start());
michael@0 1087 IonSpew(IonSpew_RegAlloc, " Register %s next used %u", reg.name(),
michael@0 1088 nextUsePos[reg.code()].pos());
michael@0 1089 }
michael@0 1090 }
michael@0 1091 }
michael@0 1092 for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
michael@0 1093 LAllocation *alloc = i->getAllocation();
michael@0 1094 if (alloc->isRegister(needFloat)) {
michael@0 1095 AnyRegister reg = alloc->toRegister();
michael@0 1096 CodePosition pos = i->nextUsePosAfter(current->start());
michael@0 1097 if (pos < nextUsePos[reg.code()]) {
michael@0 1098 nextUsePos[reg.code()] = pos;
michael@0 1099 IonSpew(IonSpew_RegAlloc, " Register %s next used %u", reg.name(), pos.pos());
michael@0 1100 }
michael@0 1101 }
michael@0 1102 }
michael@0 1103
michael@0 1104 CodePosition fixedPos = fixedIntervalsUnion->intersect(current);
michael@0 1105 if (fixedPos != CodePosition::MIN) {
michael@0 1106 for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) {
michael@0 1107 AnyRegister reg = i->getAllocation()->toRegister();
michael@0 1108 if (nextUsePos[reg.code()] != CodePosition::MIN) {
michael@0 1109 CodePosition pos = i->intersect(current);
michael@0 1110 if (pos != CodePosition::MIN && pos < nextUsePos[reg.code()]) {
michael@0 1111 nextUsePos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos;
michael@0 1112 IonSpew(IonSpew_RegAlloc, " Register %s next used %u (fixed)", reg.name(), pos.pos());
michael@0 1113 }
michael@0 1114 }
michael@0 1115 }
michael@0 1116 }
michael@0 1117
michael@0 1118 // Search nextUsePos for largest value
michael@0 1119 AnyRegister::Code bestCode = AnyRegister::Invalid;
michael@0 1120 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 1121 if (nextUsePos[i] == CodePosition::MIN)
michael@0 1122 continue;
michael@0 1123 if (bestCode == AnyRegister::Invalid || nextUsePos[i] > nextUsePos[bestCode])
michael@0 1124 bestCode = AnyRegister::Code(i);
michael@0 1125 }
michael@0 1126
michael@0 1127 if (bestCode != AnyRegister::Invalid)
michael@0 1128 *nextUsed = nextUsePos[bestCode];
michael@0 1129 return bestCode;
michael@0 1130 }
michael@0 1131
michael@0 1132 /*
michael@0 1133 * Two intervals can coexist if any of the following conditions is met:
michael@0 1134 *
michael@0 1135 * - The intervals do not intersect.
michael@0 1136 * - The intervals have different allocations.
michael@0 1137 * - The intervals have the same allocation, but the allocation may be
michael@0 1138 * shared.
michael@0 1139 *
michael@0 1140 * Intuitively, it is a bug if any allocated intervals exist which can not
michael@0 1141 * coexist.
michael@0 1142 */
michael@0 1143 bool
michael@0 1144 LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b)
michael@0 1145 {
michael@0 1146 LAllocation *aa = a->getAllocation();
michael@0 1147 LAllocation *ba = b->getAllocation();
michael@0 1148 if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister())
michael@0 1149 return a->intersect(b) == CodePosition::MIN;
michael@0 1150 return true;
michael@0 1151 }
michael@0 1152
michael@0 1153 #ifdef DEBUG
michael@0 1154 /*
michael@0 1155 * Ensure intervals appear in exactly the appropriate one of {active,inactive,
michael@0 1156 * handled}, and that active and inactive intervals do not conflict. Handled
michael@0 1157 * intervals are checked for conflicts in validateAllocations for performance
michael@0 1158 * reasons.
michael@0 1159 */
michael@0 1160 void
michael@0 1161 LinearScanAllocator::validateIntervals()
michael@0 1162 {
michael@0 1163 if (!js_JitOptions.checkGraphConsistency)
michael@0 1164 return;
michael@0 1165
michael@0 1166 for (IntervalIterator i(active.begin()); i != active.end(); i++) {
michael@0 1167 JS_ASSERT(i->numRanges() > 0);
michael@0 1168 JS_ASSERT(i->covers(current->start()));
michael@0 1169
michael@0 1170 for (IntervalIterator j(active.begin()); j != i; j++)
michael@0 1171 JS_ASSERT(canCoexist(*i, *j));
michael@0 1172 }
michael@0 1173
michael@0 1174 for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
michael@0 1175 JS_ASSERT(i->numRanges() > 0);
michael@0 1176 JS_ASSERT(i->end() >= current->start());
michael@0 1177 JS_ASSERT(!i->covers(current->start()));
michael@0 1178
michael@0 1179 for (IntervalIterator j(active.begin()); j != active.end(); j++) {
michael@0 1180 JS_ASSERT(*i != *j);
michael@0 1181 JS_ASSERT(canCoexist(*i, *j));
michael@0 1182 }
michael@0 1183 for (IntervalIterator j(inactive.begin()); j != i; j++)
michael@0 1184 JS_ASSERT(canCoexist(*i, *j));
michael@0 1185 }
michael@0 1186
michael@0 1187 for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
michael@0 1188 JS_ASSERT(!i->getAllocation()->isUse());
michael@0 1189 JS_ASSERT(i->numRanges() > 0);
michael@0 1190 if (i->getAllocation()->isRegister()) {
michael@0 1191 JS_ASSERT(i->end() <= current->start());
michael@0 1192 JS_ASSERT(!i->covers(current->start()));
michael@0 1193 }
michael@0 1194
michael@0 1195 for (IntervalIterator j(active.begin()); j != active.end(); j++)
michael@0 1196 JS_ASSERT(*i != *j);
michael@0 1197 for (IntervalIterator j(inactive.begin()); j != inactive.end(); j++)
michael@0 1198 JS_ASSERT(*i != *j);
michael@0 1199 }
michael@0 1200 }
michael@0 1201
michael@0 1202 /*
michael@0 1203 * This function performs a nice, expensive check that all intervals
michael@0 1204 * in the function can coexist with every other interval.
michael@0 1205 */
michael@0 1206 void
michael@0 1207 LinearScanAllocator::validateAllocations()
michael@0 1208 {
michael@0 1209 if (!js_JitOptions.checkGraphConsistency)
michael@0 1210 return;
michael@0 1211
michael@0 1212 for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
michael@0 1213 for (IntervalIterator j(handled.begin()); j != i; j++) {
michael@0 1214 JS_ASSERT(*i != *j);
michael@0 1215 JS_ASSERT(canCoexist(*i, *j));
michael@0 1216 }
michael@0 1217 LinearScanVirtualRegister *reg = &vregs[i->vreg()];
michael@0 1218 bool found = false;
michael@0 1219 for (size_t j = 0; j < reg->numIntervals(); j++) {
michael@0 1220 if (reg->getInterval(j) == *i) {
michael@0 1221 JS_ASSERT(j == i->index());
michael@0 1222 found = true;
michael@0 1223 break;
michael@0 1224 }
michael@0 1225 }
michael@0 1226 JS_ASSERT(found);
michael@0 1227 }
michael@0 1228 }
michael@0 1229
michael@0 1230 #endif // DEBUG
michael@0 1231
michael@0 1232 bool
michael@0 1233 LinearScanAllocator::go()
michael@0 1234 {
michael@0 1235 IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
michael@0 1236
michael@0 1237 IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
michael@0 1238 if (!buildLivenessInfo())
michael@0 1239 return false;
michael@0 1240 IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
michael@0 1241
michael@0 1242 if (mir->shouldCancel("LSRA Liveness"))
michael@0 1243 return false;
michael@0 1244
michael@0 1245 IonSpew(IonSpew_RegAlloc, "Beginning preliminary register allocation");
michael@0 1246 if (!allocateRegisters())
michael@0 1247 return false;
michael@0 1248 IonSpew(IonSpew_RegAlloc, "Preliminary register allocation complete");
michael@0 1249
michael@0 1250 if (mir->shouldCancel("LSRA Preliminary Regalloc"))
michael@0 1251 return false;
michael@0 1252
michael@0 1253 IonSpew(IonSpew_RegAlloc, "Beginning control flow resolution");
michael@0 1254 if (!resolveControlFlow())
michael@0 1255 return false;
michael@0 1256 IonSpew(IonSpew_RegAlloc, "Control flow resolution complete");
michael@0 1257
michael@0 1258 if (mir->shouldCancel("LSRA Control Flow"))
michael@0 1259 return false;
michael@0 1260
michael@0 1261 IonSpew(IonSpew_RegAlloc, "Beginning register allocation reification");
michael@0 1262 if (!reifyAllocations())
michael@0 1263 return false;
michael@0 1264 IonSpew(IonSpew_RegAlloc, "Register allocation reification complete");
michael@0 1265
michael@0 1266 if (mir->shouldCancel("LSRA Reification"))
michael@0 1267 return false;
michael@0 1268
michael@0 1269 IonSpew(IonSpew_RegAlloc, "Beginning safepoint population.");
michael@0 1270 if (!populateSafepoints())
michael@0 1271 return false;
michael@0 1272 IonSpew(IonSpew_RegAlloc, "Safepoint population complete.");
michael@0 1273
michael@0 1274 if (mir->shouldCancel("LSRA Safepoints"))
michael@0 1275 return false;
michael@0 1276
michael@0 1277 IonSpew(IonSpew_RegAlloc, "Register allocation complete");
michael@0 1278
michael@0 1279 return true;
michael@0 1280 }
michael@0 1281
michael@0 1282 void
michael@0 1283 LinearScanAllocator::setIntervalRequirement(LiveInterval *interval)
michael@0 1284 {
michael@0 1285 JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
michael@0 1286 JS_ASSERT(interval->hint()->kind() == Requirement::NONE);
michael@0 1287
michael@0 1288 // This function computes requirement by virtual register, other types of
michael@0 1289 // interval should have requirements set manually
michael@0 1290 LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 1291
michael@0 1292 if (interval->index() == 0) {
michael@0 1293 // The first interval is the definition, so deal with any definition
michael@0 1294 // constraints/hints
michael@0 1295
michael@0 1296 if (reg->def()->policy() == LDefinition::PRESET) {
michael@0 1297 // Preset policies get a FIXED requirement or hint.
michael@0 1298 if (reg->def()->output()->isRegister())
michael@0 1299 interval->setHint(Requirement(*reg->def()->output()));
michael@0 1300 else
michael@0 1301 interval->setRequirement(Requirement(*reg->def()->output()));
michael@0 1302 } else if (reg->def()->policy() == LDefinition::MUST_REUSE_INPUT) {
michael@0 1303 // Reuse policies get either a FIXED requirement or a SAME_AS hint.
michael@0 1304 LUse *use = reg->ins()->getOperand(reg->def()->getReusedInput())->toUse();
michael@0 1305 interval->setRequirement(Requirement(Requirement::REGISTER));
michael@0 1306 interval->setHint(Requirement(use->virtualRegister(), interval->start().previous()));
michael@0 1307 } else if (reg->ins()->isPhi()) {
michael@0 1308 // Phis don't have any requirements, but they should prefer
michael@0 1309 // their input allocations, so they get a SAME_AS hint of the
michael@0 1310 // first input
michael@0 1311 LUse *use = reg->ins()->getOperand(0)->toUse();
michael@0 1312 LBlock *predecessor = reg->block()->mir()->getPredecessor(0)->lir();
michael@0 1313 CodePosition predEnd = outputOf(predecessor->lastId());
michael@0 1314 interval->setHint(Requirement(use->virtualRegister(), predEnd));
michael@0 1315 } else {
michael@0 1316 // Non-phis get a REGISTER requirement
michael@0 1317 interval->setRequirement(Requirement(Requirement::REGISTER));
michael@0 1318 }
michael@0 1319 }
michael@0 1320
michael@0 1321 UsePosition *fixedOp = nullptr;
michael@0 1322 UsePosition *registerOp = nullptr;
michael@0 1323
michael@0 1324 // Search uses at the start of the interval for requirements.
michael@0 1325 UsePositionIterator usePos(interval->usesBegin());
michael@0 1326 for (; usePos != interval->usesEnd(); usePos++) {
michael@0 1327 if (interval->start().next() < usePos->pos)
michael@0 1328 break;
michael@0 1329
michael@0 1330 LUse::Policy policy = usePos->use->policy();
michael@0 1331 if (policy == LUse::FIXED) {
michael@0 1332 fixedOp = *usePos;
michael@0 1333 interval->setRequirement(Requirement(Requirement::REGISTER));
michael@0 1334 break;
michael@0 1335 } else if (policy == LUse::REGISTER) {
michael@0 1336 // Register uses get a REGISTER requirement
michael@0 1337 interval->setRequirement(Requirement(Requirement::REGISTER));
michael@0 1338 }
michael@0 1339 }
michael@0 1340
michael@0 1341 // Search other uses for hints. If the virtual register already has a
michael@0 1342 // canonical spill location, we will eagerly spill this interval, so we
michael@0 1343 // don't have to search for hints.
michael@0 1344 if (!fixedOp && !vregs[interval->vreg()].canonicalSpill()) {
michael@0 1345 for (; usePos != interval->usesEnd(); usePos++) {
michael@0 1346 LUse::Policy policy = usePos->use->policy();
michael@0 1347 if (policy == LUse::FIXED) {
michael@0 1348 fixedOp = *usePos;
michael@0 1349 break;
michael@0 1350 } else if (policy == LUse::REGISTER) {
michael@0 1351 if (!registerOp)
michael@0 1352 registerOp = *usePos;
michael@0 1353 }
michael@0 1354 }
michael@0 1355 }
michael@0 1356
michael@0 1357 if (fixedOp) {
michael@0 1358 // Intervals with a fixed use now get a FIXED hint.
michael@0 1359 AnyRegister required = GetFixedRegister(reg->def(), fixedOp->use);
michael@0 1360 interval->setHint(Requirement(LAllocation(required), fixedOp->pos));
michael@0 1361 } else if (registerOp) {
michael@0 1362 // Intervals with register uses get a REGISTER hint. We may have already
michael@0 1363 // assigned a SAME_AS hint, make sure we don't overwrite it with a weaker
michael@0 1364 // hint.
michael@0 1365 if (interval->hint()->kind() == Requirement::NONE)
michael@0 1366 interval->setHint(Requirement(Requirement::REGISTER, registerOp->pos));
michael@0 1367 }
michael@0 1368 }
michael@0 1369
michael@0 1370 /*
michael@0 1371 * Enqueue by iteration starting from the node with the lowest start position.
michael@0 1372 *
michael@0 1373 * If we assign registers to intervals in order of their start positions
michael@0 1374 * without regard to their requirements, we can end up in situations where
michael@0 1375 * intervals that do not require registers block intervals that must have
michael@0 1376 * registers from getting one. To avoid this, we ensure that intervals are
michael@0 1377 * ordered by position and priority so intervals with more stringent
michael@0 1378 * requirements are handled first.
michael@0 1379 */
michael@0 1380 void
michael@0 1381 LinearScanAllocator::UnhandledQueue::enqueueBackward(LiveInterval *interval)
michael@0 1382 {
michael@0 1383 IntervalReverseIterator i(rbegin());
michael@0 1384
michael@0 1385 for (; i != rend(); i++) {
michael@0 1386 if (i->start() > interval->start())
michael@0 1387 break;
michael@0 1388 if (i->start() == interval->start() &&
michael@0 1389 i->requirement()->priority() >= interval->requirement()->priority())
michael@0 1390 {
michael@0 1391 break;
michael@0 1392 }
michael@0 1393 }
michael@0 1394 insertAfter(*i, interval);
michael@0 1395 }
michael@0 1396
michael@0 1397 /*
michael@0 1398 * Enqueue by iteration from high start position to low start position,
michael@0 1399 * after a provided node.
michael@0 1400 */
michael@0 1401 void
michael@0 1402 LinearScanAllocator::UnhandledQueue::enqueueForward(LiveInterval *after, LiveInterval *interval)
michael@0 1403 {
michael@0 1404 IntervalIterator i(begin(after));
michael@0 1405 i++; // Skip the initial node.
michael@0 1406
michael@0 1407 for (; i != end(); i++) {
michael@0 1408 if (i->start() < interval->start())
michael@0 1409 break;
michael@0 1410 if (i->start() == interval->start() &&
michael@0 1411 i->requirement()->priority() < interval->requirement()->priority())
michael@0 1412 {
michael@0 1413 break;
michael@0 1414 }
michael@0 1415 }
michael@0 1416 insertBefore(*i, interval);
michael@0 1417 }
michael@0 1418
michael@0 1419 void
michael@0 1420 LinearScanAllocator::UnhandledQueue::assertSorted()
michael@0 1421 {
michael@0 1422 #ifdef DEBUG
michael@0 1423 LiveInterval *prev = nullptr;
michael@0 1424 for (IntervalIterator i(begin()); i != end(); i++) {
michael@0 1425 if (prev) {
michael@0 1426 JS_ASSERT(prev->start() >= i->start());
michael@0 1427 JS_ASSERT_IF(prev->start() == i->start(),
michael@0 1428 prev->requirement()->priority() >= i->requirement()->priority());
michael@0 1429 }
michael@0 1430 prev = *i;
michael@0 1431 }
michael@0 1432 #endif
michael@0 1433 }
michael@0 1434
michael@0 1435 LiveInterval *
michael@0 1436 LinearScanAllocator::UnhandledQueue::dequeue()
michael@0 1437 {
michael@0 1438 if (rbegin() == rend())
michael@0 1439 return nullptr;
michael@0 1440
michael@0 1441 LiveInterval *result = *rbegin();
michael@0 1442 remove(result);
michael@0 1443 return result;
michael@0 1444 }

mercurial