js/src/jit/LinearScan.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #include "jit/LinearScan.h"
     9 #include "mozilla/DebugOnly.h"
    11 #include "jit/BitSet.h"
    12 #include "jit/IonSpewer.h"
    14 using namespace js;
    15 using namespace js::jit;
    17 using mozilla::DebugOnly;
    19 /*
    20  * Merge virtual register intervals into the UnhandledQueue, taking advantage
    21  * of their nearly-sorted ordering.
    22  */
    23 void
    24 LinearScanAllocator::enqueueVirtualRegisterIntervals()
    25 {
    26     // Cursor into the unhandled queue, iterating through start positions.
    27     IntervalReverseIterator curr = unhandled.rbegin();
    29     // Start position is non-monotonically increasing by virtual register number.
    30     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    31         LiveInterval *live = vregs[i].getInterval(0);
    32         if (live->numRanges() > 0) {
    33             setIntervalRequirement(live);
    35             // Iterate backward until the next highest class of start position.
    36             for (; curr != unhandled.rend(); curr++) {
    37                 if (curr->start() > live->start())
    38                     break;
    39             }
    41             // Insert forward from the current position, thereby
    42             // sorting by priority within the start class.
    43             unhandled.enqueueForward(*curr, live);
    44         }
    45     }
    46 }
    48 /*
    49  * This function performs a preliminary allocation on the already-computed
    50  * live intervals, storing the result in the allocation field of the intervals
    51  * themselves.
    52  *
    53  * The algorithm is based on the one published in:
    54  *
    55  * Wimmer, Christian, and Hanspeter Mössenböck. "Optimized Interval Splitting
    56  *     in a Linear Scan Register Allocator." Proceedings of the First
    57  *     ACM/USENIX Conference on Virtual Execution Environments. Chicago, IL,
    58  *     USA, ACM. 2005. PDF.
    59  *
    60  * The algorithm proceeds over each interval in order of their start position.
    61  * If a free register is available, the register that is free for the largest
    62  * portion of the current interval is allocated. Otherwise, the interval
    63  * with the farthest-away next use is spilled to make room for this one. In all
    64  * cases, intervals which conflict either with other intervals or with
    65  * use or definition constraints are split at the point of conflict to be
    66  * assigned a compatible allocation later.
    67  */
    68 bool
    69 LinearScanAllocator::allocateRegisters()
    70 {
    71     // The unhandled queue currently contains only spill intervals, in sorted
    72     // order. Intervals for virtual registers exist in sorted order based on
    73     // start position by vreg ID, but may have priorities that require them to
    74     // be reordered when adding to the unhandled queue.
    75     enqueueVirtualRegisterIntervals();
    76     unhandled.assertSorted();
    78     // Add fixed intervals with ranges to fixed.
    79     for (size_t i = 0; i < AnyRegister::Total; i++) {
    80         if (fixedIntervals[i]->numRanges() > 0)
    81             fixed.pushBack(fixedIntervals[i]);
    82     }
    84     // Iterate through all intervals in ascending start order.
    85     CodePosition prevPosition = CodePosition::MIN;
    86     while ((current = unhandled.dequeue()) != nullptr) {
    87         JS_ASSERT(current->getAllocation()->isUse());
    88         JS_ASSERT(current->numRanges() > 0);
    90         if (mir->shouldCancel("LSRA Allocate Registers (main loop)"))
    91             return false;
    93         CodePosition position = current->start();
    94         const Requirement *req = current->requirement();
    95         const Requirement *hint = current->hint();
    97         IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)",
    98                 current->hasVreg() ? current->vreg() : 0, current->start().pos(),
    99                 current->end().pos(), current->requirement()->priority());
   101         // Shift active intervals to the inactive or handled sets as appropriate
   102         if (position != prevPosition) {
   103             JS_ASSERT(position > prevPosition);
   104             prevPosition = position;
   106             for (IntervalIterator i(active.begin()); i != active.end(); ) {
   107                 LiveInterval *it = *i;
   108                 JS_ASSERT(it->numRanges() > 0);
   110                 if (it->end() <= position) {
   111                     i = active.removeAt(i);
   112                     finishInterval(it);
   113                 } else if (!it->covers(position)) {
   114                     i = active.removeAt(i);
   115                     inactive.pushBack(it);
   116                 } else {
   117                     i++;
   118                 }
   119             }
   121             // Shift inactive intervals to the active or handled sets as appropriate
   122             for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
   123                 LiveInterval *it = *i;
   124                 JS_ASSERT(it->numRanges() > 0);
   126                 if (it->end() <= position) {
   127                     i = inactive.removeAt(i);
   128                     finishInterval(it);
   129                 } else if (it->covers(position)) {
   130                     i = inactive.removeAt(i);
   131                     active.pushBack(it);
   132                 } else {
   133                     i++;
   134                 }
   135             }
   136         }
   138         // Sanity check all intervals in all sets
   139         validateIntervals();
   141         // If the interval has a hard requirement, grant it.
   142         if (req->kind() == Requirement::FIXED) {
   143             JS_ASSERT(!req->allocation().isRegister());
   144             if (!assign(req->allocation()))
   145                 return false;
   146             continue;
   147         }
   149         // If we don't really need this in a register, don't allocate one
   150         if (req->kind() != Requirement::REGISTER && hint->kind() == Requirement::NONE) {
   151             IonSpew(IonSpew_RegAlloc, "  Eagerly spilling virtual register %d",
   152                     current->hasVreg() ? current->vreg() : 0);
   153             if (!spill())
   154                 return false;
   155             continue;
   156         }
   158         // Try to allocate a free register
   159         IonSpew(IonSpew_RegAlloc, " Attempting free register allocation");
   160         CodePosition bestFreeUntil;
   161         AnyRegister::Code bestCode = findBestFreeRegister(&bestFreeUntil);
   162         if (bestCode != AnyRegister::Invalid) {
   163             AnyRegister best = AnyRegister::FromCode(bestCode);
   164             IonSpew(IonSpew_RegAlloc, "  Decided best register was %s", best.name());
   166             // Split when the register is next needed if necessary
   167             if (bestFreeUntil < current->end()) {
   168                 if (!splitInterval(current, bestFreeUntil))
   169                     return false;
   170             }
   171             if (!assign(LAllocation(best)))
   172                 return false;
   174             continue;
   175         }
   177         IonSpew(IonSpew_RegAlloc, " Attempting blocked register allocation");
   179         // If we absolutely need a register or our next use is closer than the
   180         // selected blocking register then we spill the blocker. Otherwise, we
   181         // spill the current interval.
   182         CodePosition bestNextUsed;
   183         bestCode = findBestBlockedRegister(&bestNextUsed);
   184         if (bestCode != AnyRegister::Invalid &&
   185             (req->kind() == Requirement::REGISTER || hint->pos() < bestNextUsed))
   186         {
   187             AnyRegister best = AnyRegister::FromCode(bestCode);
   188             IonSpew(IonSpew_RegAlloc, "  Decided best register was %s", best.name());
   190             if (!splitBlockingIntervals(LAllocation(best)))
   191                 return false;
   192             if (!assign(LAllocation(best)))
   193                 return false;
   195             continue;
   196         }
   198         IonSpew(IonSpew_RegAlloc, "  No registers available to spill");
   199         JS_ASSERT(req->kind() == Requirement::NONE);
   201         if (!spill())
   202             return false;
   203     }
   205     validateAllocations();
   206     validateVirtualRegisters();
   208     return true;
   209 }
   211 /*
   212  * This function iterates over control flow edges in the function and resolves
   213  * conflicts wherein two predecessors of a block have different allocations
   214  * for a virtual register than the block itself. It also turns phis into moves.
   215  *
   216  * The algorithm is based on the one published in "Linear Scan Register
   217  * Allocation on SSA Form" by C. Wimmer et al., for which the full citation
   218  * appears above.
   219  */
   220 bool
   221 LinearScanAllocator::resolveControlFlow()
   222 {
   223     for (size_t i = 0; i < graph.numBlocks(); i++) {
   224         if (mir->shouldCancel("LSRA Resolve Control Flow (main loop)"))
   225             return false;
   227         LBlock *successor = graph.getBlock(i);
   228         MBasicBlock *mSuccessor = successor->mir();
   229         if (mSuccessor->numPredecessors() < 1)
   230             continue;
   232         // Resolve phis to moves
   233         for (size_t j = 0; j < successor->numPhis(); j++) {
   234             LPhi *phi = successor->getPhi(j);
   235             JS_ASSERT(phi->numDefs() == 1);
   236             LDefinition *def = phi->getDef(0);
   237             LinearScanVirtualRegister *vreg = &vregs[def];
   238             LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
   239             JS_ASSERT(to);
   241             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
   242                 LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
   243                 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
   245                 LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
   246                 LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
   247                 JS_ASSERT(from);
   249                 if (!moveAtExit(predecessor, from, to, def->type()))
   250                     return false;
   251             }
   253             if (vreg->mustSpillAtDefinition() && !to->isSpill()) {
   254                 // Make sure this phi is spilled at the loop header.
   255                 LMoveGroup *moves = successor->getEntryMoveGroup(alloc());
   256                 if (!moves->add(to->getAllocation(), vregs[to->vreg()].canonicalSpill(),
   257                                 def->type()))
   258                     return false;
   259             }
   260         }
   262         // Resolve split intervals with moves
   263         BitSet *live = liveIn[mSuccessor->id()];
   265         for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
   266             LinearScanVirtualRegister *vreg = &vregs[*liveRegId];
   267             LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
   268             JS_ASSERT(to);
   270             for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
   271                 LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
   272                 LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId()));
   273                 JS_ASSERT(from);
   275                 if (*from->getAllocation() == *to->getAllocation())
   276                     continue;
   278                 // If this value is spilled at its definition, other stores
   279                 // are redundant.
   280                 if (vreg->mustSpillAtDefinition() && to->getAllocation()->isStackSlot()) {
   281                     JS_ASSERT(vreg->canonicalSpill());
   282                     JS_ASSERT(*vreg->canonicalSpill() == *to->getAllocation());
   283                     continue;
   284                 }
   286                 if (mSuccessor->numPredecessors() > 1) {
   287                     JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
   288                     if (!moveAtExit(predecessor, from, to, vreg->type()))
   289                         return false;
   290                 } else {
   291                     if (!moveAtEntry(successor, from, to, vreg->type()))
   292                         return false;
   293                 }
   294             }
   295         }
   296     }
   298     return true;
   299 }
   301 bool
   302 LinearScanAllocator::moveInputAlloc(CodePosition pos, LAllocation *from, LAllocation *to,
   303                                     LDefinition::Type type)
   304 {
   305     if (*from == *to)
   306         return true;
   307     LMoveGroup *moves = getInputMoveGroup(pos);
   308     return moves->add(from, to, type);
   309 }
   311 static inline void
   312 SetOsiPointUses(LiveInterval *interval, CodePosition defEnd, const LAllocation &allocation)
   313 {
   314     // Moves are inserted after OsiPoint instructions. This function sets
   315     // any OsiPoint uses of this interval to the allocation of the value
   316     // before the move.
   318     JS_ASSERT(interval->index() == 0);
   320     for (UsePositionIterator usePos(interval->usesBegin());
   321          usePos != interval->usesEnd();
   322          usePos++)
   323     {
   324         if (usePos->pos > defEnd)
   325             break;
   326         *static_cast<LAllocation *>(usePos->use) = allocation;
   327     }
   328 }
   330 /*
   331  * This function takes the allocations assigned to the live intervals and
   332  * erases all virtual registers in the function with the allocations
   333  * corresponding to the appropriate interval.
   334  */
   335 bool
   336 LinearScanAllocator::reifyAllocations()
   337 {
   338     // Iterate over each interval, ensuring that definitions are visited before uses.
   339     for (size_t j = 1; j < graph.numVirtualRegisters(); j++) {
   340         LinearScanVirtualRegister *reg = &vregs[j];
   341         if (mir->shouldCancel("LSRA Reification (main loop)"))
   342             return false;
   344     for (size_t k = 0; k < reg->numIntervals(); k++) {
   345         LiveInterval *interval = reg->getInterval(k);
   346         JS_ASSERT(reg == &vregs[interval->vreg()]);
   347         if (!interval->numRanges())
   348             continue;
   350         UsePositionIterator usePos(interval->usesBegin());
   351         for (; usePos != interval->usesEnd(); usePos++) {
   352             if (usePos->use->isFixedRegister()) {
   353                 LiveInterval *to = fixedIntervals[GetFixedRegister(reg->def(), usePos->use).code()];
   355                 *static_cast<LAllocation *>(usePos->use) = *to->getAllocation();
   356                 if (!moveInput(usePos->pos, interval, to, reg->type()))
   357                     return false;
   358             } else {
   359                 JS_ASSERT(UseCompatibleWith(usePos->use, *interval->getAllocation()));
   360                 *static_cast<LAllocation *>(usePos->use) = *interval->getAllocation();
   361             }
   362         }
   364         // Erase the def of this interval if it's the first one
   365         if (interval->index() == 0)
   366         {
   367             LDefinition *def = reg->def();
   368             LAllocation *spillFrom;
   370             // Insert the moves after any OsiPoint or Nop instructions
   371             // following this one. See minimalDefEnd for more info.
   372             CodePosition defEnd = minimalDefEnd(reg->ins());
   374             if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) {
   375                 AnyRegister fixedReg = def->output()->toRegister();
   376                 LiveInterval *from = fixedIntervals[fixedReg.code()];
   378                 // If we insert the move after an OsiPoint that uses this vreg,
   379                 // it should use the fixed register instead.
   380                 SetOsiPointUses(interval, defEnd, LAllocation(fixedReg));
   382                 if (!moveAfter(defEnd, from, interval, def->type()))
   383                     return false;
   384                 spillFrom = from->getAllocation();
   385             } else {
   386                 if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
   387                     LAllocation *inputAlloc = reg->ins()->getOperand(def->getReusedInput());
   388                     LAllocation *origAlloc = LAllocation::New(alloc(), *inputAlloc);
   390                     JS_ASSERT(!inputAlloc->isUse());
   392                     *inputAlloc = *interval->getAllocation();
   393                     if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, inputAlloc, def->type()))
   394                         return false;
   395                 }
   397                 JS_ASSERT(DefinitionCompatibleWith(reg->ins(), def, *interval->getAllocation()));
   398                 def->setOutput(*interval->getAllocation());
   400                 spillFrom = interval->getAllocation();
   401             }
   403             if (reg->ins()->recoversInput()) {
   404                 LSnapshot *snapshot = reg->ins()->snapshot();
   405                 for (size_t i = 0; i < snapshot->numEntries(); i++) {
   406                     LAllocation *entry = snapshot->getEntry(i);
   407                     if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
   408                         *entry = *def->output();
   409                 }
   410             }
   412             if (reg->mustSpillAtDefinition() && !reg->ins()->isPhi() &&
   413                 (*reg->canonicalSpill() != *spillFrom))
   414             {
   415                 // If we move the spill after an OsiPoint, the OsiPoint should
   416                 // use the original location instead.
   417                 SetOsiPointUses(interval, defEnd, *spillFrom);
   419                 // Insert a spill after this instruction (or after any OsiPoint
   420                 // or Nop instructions). Note that we explicitly ignore phis,
   421                 // which should have been handled in resolveControlFlow().
   422                 LMoveGroup *moves = getMoveGroupAfter(defEnd);
   423                 if (!moves->add(spillFrom, reg->canonicalSpill(), def->type()))
   424                     return false;
   425             }
   426         }
   427         else if (interval->start() > inputOf(insData[interval->start()].block()->firstId()) &&
   428                  (!reg->canonicalSpill() ||
   429                   (reg->canonicalSpill() == interval->getAllocation() &&
   430                    !reg->mustSpillAtDefinition()) ||
   431                   *reg->canonicalSpill() != *interval->getAllocation()))
   432         {
   433             // If this virtual register has no canonical spill location, this
   434             // is the first spill to that location, or this is a move to somewhere
   435             // completely different, we have to emit a move for this interval.
   436             // Don't do this if the interval starts at the first instruction of the
   437             // block; this case should have been handled by resolveControlFlow().
   438             //
   439             // If the interval starts at the output half of an instruction, we have to
   440             // emit the move *after* this instruction, to prevent clobbering an input
   441             // register.
   442             LiveInterval *prevInterval = reg->getInterval(interval->index() - 1);
   443             CodePosition start = interval->start();
   444             InstructionData *data = &insData[start];
   446             JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins()));
   448             if (start.subpos() == CodePosition::INPUT) {
   449                 if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type()))
   450                     return false;
   451             } else {
   452                 if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type()))
   453                     return false;
   454             }
   456             // Mark this interval's spill position, if needed.
   457             if (reg->canonicalSpill() == interval->getAllocation() &&
   458                 !reg->mustSpillAtDefinition())
   459             {
   460                 reg->setSpillPosition(interval->start());
   461             }
   462         }
   464         addLiveRegistersForInterval(reg, interval);
   465     }} // Iteration over virtual register intervals.
   467     // Set the graph overall stack height
   468     graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
   470     return true;
   471 }
   473 inline bool
   474 LinearScanAllocator::isSpilledAt(LiveInterval *interval, CodePosition pos)
   475 {
   476     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   477     if (!reg->canonicalSpill() || !reg->canonicalSpill()->isStackSlot())
   478         return false;
   480     if (reg->mustSpillAtDefinition()) {
   481         JS_ASSERT(reg->spillPosition() <= pos);
   482         return true;
   483     }
   485     return interval->getAllocation() == reg->canonicalSpill();
   486 }
   488 bool
   489 LinearScanAllocator::populateSafepoints()
   490 {
   491     size_t firstSafepoint = 0;
   493     for (uint32_t i = 0; i < vregs.numVirtualRegisters(); i++) {
   494         LinearScanVirtualRegister *reg = &vregs[i];
   496         if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
   497             continue;
   499         firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
   500         if (firstSafepoint >= graph.numSafepoints())
   501             break;
   503         // Find the furthest endpoint.
   504         size_t lastInterval = reg->numIntervals() - 1;
   505         CodePosition end = reg->getInterval(lastInterval)->end();
   507         for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
   508             LInstruction *ins = graph.getSafepoint(j);
   510             // Stop processing safepoints if we know we're out of this virtual
   511             // register's range.
   512             if (end < inputOf(ins))
   513                 break;
   515             // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
   516             // is not used with gcthings or nunboxes, or we would have to add the input reg
   517             // to this safepoint.
   518             if (ins == reg->ins() && !reg->isTemp()) {
   519                 DebugOnly<LDefinition*> def = reg->def();
   520                 JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
   521                              def->type() == LDefinition::GENERAL ||
   522                              def->type() == LDefinition::INT32 ||
   523                              def->type() == LDefinition::FLOAT32 ||
   524                              def->type() == LDefinition::DOUBLE);
   525                 continue;
   526             }
   528             LSafepoint *safepoint = ins->safepoint();
   530             if (IsSlotsOrElements(reg)) {
   531                 LiveInterval *interval = reg->intervalFor(inputOf(ins));
   532                 if (!interval)
   533                     continue;
   535                 LAllocation *a = interval->getAllocation();
   536                 if (a->isGeneralReg() && !ins->isCall())
   537                     safepoint->addSlotsOrElementsRegister(a->toGeneralReg()->reg());
   539                 if (isSpilledAt(interval, inputOf(ins))) {
   540                     if (!safepoint->addSlotsOrElementsSlot(reg->canonicalSpillSlot()))
   541                         return false;
   542                 }
   543             } else if (!IsNunbox(reg)) {
   544                 JS_ASSERT(IsTraceable(reg));
   546                 LiveInterval *interval = reg->intervalFor(inputOf(ins));
   547                 if (!interval)
   548                     continue;
   550                 LAllocation *a = interval->getAllocation();
   551                 if (a->isGeneralReg() && !ins->isCall()) {
   552 #ifdef JS_PUNBOX64
   553                     if (reg->type() == LDefinition::BOX) {
   554                         safepoint->addValueRegister(a->toGeneralReg()->reg());
   555                     } else
   556 #endif
   557                     {
   558                         safepoint->addGcRegister(a->toGeneralReg()->reg());
   559                     }
   560                 }
   562                 if (isSpilledAt(interval, inputOf(ins))) {
   563 #ifdef JS_PUNBOX64
   564                     if (reg->type() == LDefinition::BOX) {
   565                         if (!safepoint->addValueSlot(reg->canonicalSpillSlot()))
   566                             return false;
   567                     } else
   568 #endif
   569                     {
   570                         if (!safepoint->addGcSlot(reg->canonicalSpillSlot()))
   571                             return false;
   572                     }
   573                 }
   574 #ifdef JS_NUNBOX32
   575             } else {
   576                 LinearScanVirtualRegister *other = otherHalfOfNunbox(reg);
   577                 LinearScanVirtualRegister *type = (reg->type() == LDefinition::TYPE) ? reg : other;
   578                 LinearScanVirtualRegister *payload = (reg->type() == LDefinition::PAYLOAD) ? reg : other;
   579                 LiveInterval *typeInterval = type->intervalFor(inputOf(ins));
   580                 LiveInterval *payloadInterval = payload->intervalFor(inputOf(ins));
   582                 if (!typeInterval && !payloadInterval)
   583                     continue;
   585                 LAllocation *typeAlloc = typeInterval->getAllocation();
   586                 LAllocation *payloadAlloc = payloadInterval->getAllocation();
   588                 // If the payload is an argument, we'll scan that explicitly as
   589                 // part of the frame. It is therefore safe to not add any
   590                 // safepoint entry, as long as the vreg does not have a stack
   591                 // slot as canonical spill slot.
   592                 if (payloadAlloc->isArgument() &&
   593                     (!payload->canonicalSpill() || payload->canonicalSpill() == payloadAlloc))
   594                 {
   595                     continue;
   596                 }
   598                 if (isSpilledAt(typeInterval, inputOf(ins)) &&
   599                     isSpilledAt(payloadInterval, inputOf(ins)))
   600                 {
   601                     // These two components of the Value are spilled
   602                     // contiguously, so simply keep track of the base slot.
   603                     uint32_t payloadSlot = payload->canonicalSpillSlot();
   604                     uint32_t slot = BaseOfNunboxSlot(LDefinition::PAYLOAD, payloadSlot);
   605                     if (!safepoint->addValueSlot(slot))
   606                         return false;
   607                 }
   609                 if (!ins->isCall() &&
   610                     (!isSpilledAt(typeInterval, inputOf(ins)) || payloadAlloc->isGeneralReg()))
   611                 {
   612                     // Either the payload is on the stack but the type is
   613                     // in a register, or the payload is in a register. In
   614                     // both cases, we don't have a contiguous spill so we
   615                     // add a torn entry.
   616                     if (!safepoint->addNunboxParts(*typeAlloc, *payloadAlloc))
   617                         return false;
   619                     // If the nunbox is stored in multiple places, we need to
   620                     // trace all of them to allow the GC to relocate objects.
   621                     if (payloadAlloc->isGeneralReg() && isSpilledAt(payloadInterval, inputOf(ins))) {
   622                         if (!safepoint->addNunboxParts(*typeAlloc, *payload->canonicalSpill()))
   623                             return false;
   624                     }
   625                 }
   626 #endif
   627             }
   628         }
   630 #ifdef JS_NUNBOX32
   631         if (IsNunbox(reg)) {
   632             // Skip past the next half of this nunbox so we don't track the
   633             // same slot twice.
   634             JS_ASSERT(&vregs[reg->def()->virtualRegister() + 1] == otherHalfOfNunbox(reg));
   635             i++;
   636         }
   637 #endif
   638     }
   640     return true;
   641 }
   643 /*
   644  * Split the given interval at the given position, and add the created
   645  * interval to the unhandled queue.
   646  */
   647 bool
   648 LinearScanAllocator::splitInterval(LiveInterval *interval, CodePosition pos)
   649 {
   650     // Make sure we're actually splitting this interval, not some other
   651     // interval in the same virtual register.
   652     JS_ASSERT(interval->start() < pos && pos < interval->end());
   654     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   656     // "Bogus" intervals cannot be split.
   657     JS_ASSERT(reg);
   659     // Do the split.
   660     LiveInterval *newInterval = LiveInterval::New(alloc(), interval->vreg(), interval->index() + 1);
   661     if (!interval->splitFrom(pos, newInterval))
   662         return false;
   664     JS_ASSERT(interval->numRanges() > 0);
   665     JS_ASSERT(newInterval->numRanges() > 0);
   667     if (!reg->addInterval(newInterval))
   668         return false;
   670     IonSpew(IonSpew_RegAlloc, "  Split interval to %u = [%u, %u]/[%u, %u]",
   671             interval->vreg(), interval->start().pos(),
   672             interval->end().pos(), newInterval->start().pos(),
   673             newInterval->end().pos());
   675     // We always want to enqueue the resulting split. We always split
   676     // forward, and we never want to handle something forward of our
   677     // current position.
   678     setIntervalRequirement(newInterval);
   680     // splitInterval() is usually called to split the node that has just been
   681     // popped from the unhandled queue. Therefore the split will likely be
   682     // closer to the lower start positions in the queue.
   683     unhandled.enqueueBackward(newInterval);
   685     return true;
   686 }
   688 bool
   689 LinearScanAllocator::splitBlockingIntervals(LAllocation allocation)
   690 {
   691     JS_ASSERT(allocation.isRegister());
   693     // Split current before the next fixed use.
   694     LiveInterval *fixed = fixedIntervals[allocation.toRegister().code()];
   695     if (fixed->numRanges() > 0) {
   696         CodePosition fixedPos = current->intersect(fixed);
   697         if (fixedPos != CodePosition::MIN) {
   698             JS_ASSERT(fixedPos > current->start());
   699             JS_ASSERT(fixedPos < current->end());
   700             if (!splitInterval(current, fixedPos))
   701                 return false;
   702         }
   703     }
   705     // Split the blocking interval if it exists.
   706     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
   707         if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
   708             IonSpew(IonSpew_RegAlloc, " Splitting active interval %u = [%u, %u]",
   709                     vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos());
   711             JS_ASSERT(i->start() != current->start());
   712             JS_ASSERT(i->covers(current->start()));
   713             JS_ASSERT(i->start() != current->start());
   715             if (!splitInterval(*i, current->start()))
   716                 return false;
   718             LiveInterval *it = *i;
   719             active.removeAt(i);
   720             finishInterval(it);
   721             break;
   722         }
   723     }
   725     // Split any inactive intervals at the next live point.
   726     for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
   727         if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
   728             IonSpew(IonSpew_RegAlloc, " Splitting inactive interval %u = [%u, %u]",
   729                     vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos());
   731             LiveInterval *it = *i;
   732             CodePosition nextActive = it->nextCoveredAfter(current->start());
   733             JS_ASSERT(nextActive != CodePosition::MIN);
   735             if (!splitInterval(it, nextActive))
   736                 return false;
   738             i = inactive.removeAt(i);
   739             finishInterval(it);
   740         } else {
   741             i++;
   742         }
   743     }
   745     return true;
   746 }
   748 /*
   749  * Assign the current interval the given allocation, splitting conflicting
   750  * intervals as necessary to make the allocation stick.
   751  */
   752 bool
   753 LinearScanAllocator::assign(LAllocation allocation)
   754 {
   755     if (allocation.isRegister())
   756         IonSpew(IonSpew_RegAlloc, "Assigning register %s", allocation.toRegister().name());
   757     current->setAllocation(allocation);
   759     // Split this interval at the next incompatible one
   760     LinearScanVirtualRegister *reg = &vregs[current->vreg()];
   761     if (reg) {
   762         CodePosition splitPos = current->firstIncompatibleUse(allocation);
   763         if (splitPos != CodePosition::MAX) {
   764             // Split before the incompatible use. This ensures the use position is
   765             // part of the second half of the interval and guarantees we never split
   766             // at the end (zero-length intervals are invalid).
   767             splitPos = splitPos.previous();
   768             JS_ASSERT (splitPos < current->end());
   769             if (!splitInterval(current, splitPos))
   770                 return false;
   771         }
   772     }
   774     bool useAsCanonicalSpillSlot = allocation.isMemory();
   775     // Only canonically spill argument values when frame arguments are not
   776     // modified in the body.
   777     if (mir->modifiesFrameArguments())
   778         useAsCanonicalSpillSlot = allocation.isStackSlot();
   780     if (reg && useAsCanonicalSpillSlot) {
   781         if (reg->canonicalSpill()) {
   782             JS_ASSERT(allocation == *reg->canonicalSpill());
   784             // This interval is spilled more than once, so just always spill
   785             // it at its definition.
   786             reg->setSpillAtDefinition(outputOf(reg->ins()));
   787         } else {
   788             reg->setCanonicalSpill(current->getAllocation());
   790             // If this spill is inside a loop, and the definition is outside
   791             // the loop, instead move the spill to outside the loop.
   792             InstructionData *other = &insData[current->start()];
   793             uint32_t loopDepthAtDef = reg->block()->mir()->loopDepth();
   794             uint32_t loopDepthAtSpill = other->block()->mir()->loopDepth();
   795             if (loopDepthAtSpill > loopDepthAtDef)
   796                 reg->setSpillAtDefinition(outputOf(reg->ins()));
   797         }
   798     }
   800     active.pushBack(current);
   802     return true;
   803 }
   805 uint32_t
   806 LinearScanAllocator::allocateSlotFor(const LiveInterval *interval)
   807 {
   808     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   810     SlotList *freed;
   811     if (reg->type() == LDefinition::DOUBLE)
   812         freed = &finishedDoubleSlots_;
   813 #if JS_BITS_PER_WORD == 64
   814     else if (reg->type() == LDefinition::GENERAL ||
   815              reg->type() == LDefinition::OBJECT ||
   816              reg->type() == LDefinition::SLOTS)
   817         freed = &finishedDoubleSlots_;
   818 #endif
   819 #ifdef JS_PUNBOX64
   820     else if (reg->type() == LDefinition::BOX)
   821         freed = &finishedDoubleSlots_;
   822 #endif
   823 #ifdef JS_NUNBOX32
   824     else if (IsNunbox(reg))
   825         freed = &finishedNunboxSlots_;
   826 #endif
   827     else
   828         freed = &finishedSlots_;
   830     if (!freed->empty()) {
   831         LiveInterval *maybeDead = freed->back();
   832         if (maybeDead->end() < reg->getInterval(0)->start()) {
   833             // This spill slot is dead before the start of the interval trying
   834             // to reuse the slot, so reuse is safe. Otherwise, we could
   835             // encounter a situation where a stack slot is allocated and freed
   836             // inside a loop, but the same allocation is then used to hold a
   837             // loop-carried value.
   838             //
   839             // Note that we don't reuse the dead slot if its interval ends right
   840             // before the current interval, to avoid conflicting slot -> reg and
   841             // reg -> slot moves in the same movegroup.
   842             freed->popBack();
   843             LinearScanVirtualRegister *dead = &vregs[maybeDead->vreg()];
   844 #ifdef JS_NUNBOX32
   845             if (IsNunbox(dead))
   846                 return BaseOfNunboxSlot(dead->type(), dead->canonicalSpillSlot());
   847 #endif
   848             return dead->canonicalSpillSlot();
   849         }
   850     }
   852     return stackSlotAllocator.allocateSlot(reg->type());
   853 }
   855 bool
   856 LinearScanAllocator::spill()
   857 {
   858     IonSpew(IonSpew_RegAlloc, "  Decided to spill current interval");
   860     // We can't spill bogus intervals
   861     JS_ASSERT(current->hasVreg());
   863     LinearScanVirtualRegister *reg = &vregs[current->vreg()];
   865     if (reg->canonicalSpill()) {
   866         IonSpew(IonSpew_RegAlloc, "  Allocating canonical spill location");
   868         return assign(*reg->canonicalSpill());
   869     }
   871     uint32_t stackSlot;
   872 #if defined JS_NUNBOX32
   873     if (IsNunbox(reg)) {
   874         LinearScanVirtualRegister *other = otherHalfOfNunbox(reg);
   876         if (other->canonicalSpill()) {
   877             // The other half of this nunbox already has a spill slot. To
   878             // ensure the Value is spilled contiguously, use the other half (it
   879             // was allocated double-wide).
   880             JS_ASSERT(other->canonicalSpill()->isStackSlot());
   881             stackSlot = BaseOfNunboxSlot(other->type(), other->canonicalSpillSlot());
   882         } else {
   883             // No canonical spill location exists for this nunbox yet. Allocate
   884             // one.
   885             stackSlot = allocateSlotFor(current);
   886         }
   887         stackSlot -= OffsetOfNunboxSlot(reg->type());
   888     } else
   889 #endif
   890     {
   891         stackSlot = allocateSlotFor(current);
   892     }
   893     JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
   895     return assign(LStackSlot(stackSlot));
   896 }
   898 void
   899 LinearScanAllocator::freeAllocation(LiveInterval *interval, LAllocation *alloc)
   900 {
   901     LinearScanVirtualRegister *mine = &vregs[interval->vreg()];
   902     if (!IsNunbox(mine)) {
   903         if (alloc->isStackSlot()) {
   904             if (mine->type() == LDefinition::DOUBLE)
   905                 finishedDoubleSlots_.append(interval);
   906 #if JS_BITS_PER_WORD == 64
   907             else if (mine->type() == LDefinition::GENERAL ||
   908                      mine->type() == LDefinition::OBJECT ||
   909                      mine->type() == LDefinition::SLOTS)
   910                 finishedDoubleSlots_.append(interval);
   911 #endif
   912 #ifdef JS_PUNBOX64
   913             else if (mine->type() == LDefinition::BOX)
   914                 finishedDoubleSlots_.append(interval);
   915 #endif
   916             else
   917                 finishedSlots_.append(interval);
   918         }
   919         return;
   920     }
   922 #ifdef JS_NUNBOX32
   923     // Special handling for nunboxes. We can only free the stack slot once we
   924     // know both intervals have been finished.
   925     LinearScanVirtualRegister *other = otherHalfOfNunbox(mine);
   926     if (other->finished()) {
   927         if (!mine->canonicalSpill() && !other->canonicalSpill())
   928             return;
   930         JS_ASSERT_IF(mine->canonicalSpill() && other->canonicalSpill(),
   931                      mine->canonicalSpill()->isStackSlot() == other->canonicalSpill()->isStackSlot());
   933         LinearScanVirtualRegister *candidate = mine->canonicalSpill() ? mine : other;
   934         if (!candidate->canonicalSpill()->isStackSlot())
   935             return;
   937         finishedNunboxSlots_.append(candidate->lastInterval());
   938     }
   939 #endif
   940 }
   942 void
   943 LinearScanAllocator::finishInterval(LiveInterval *interval)
   944 {
   945     LAllocation *alloc = interval->getAllocation();
   946     JS_ASSERT(!alloc->isUse());
   948     // Toss out the bogus interval now that it's run its course
   949     if (!interval->hasVreg())
   950         return;
   952     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   954     // All spills should be equal to the canonical spill location.
   955     JS_ASSERT_IF(alloc->isStackSlot(), *alloc == *reg->canonicalSpill());
   957     bool lastInterval = interval->index() == (reg->numIntervals() - 1);
   958     if (lastInterval) {
   959         freeAllocation(interval, alloc);
   960         reg->setFinished();
   961     }
   963     handled.pushBack(interval);
   964 }
   966 /*
   967  * This function locates a register which may be assigned by the register
   968  * and is not assigned to any active interval. The register which is free
   969  * for the longest period of time is then returned.
   970  */
   971 AnyRegister::Code
   972 LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
   973 {
   974     IonSpew(IonSpew_RegAlloc, "  Computing freeUntilPos");
   976     // Compute free-until positions for all registers
   977     CodePosition freeUntilPos[AnyRegister::Total];
   978     bool needFloat = vregs[current->vreg()].isFloatReg();
   979     for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) {
   980         AnyRegister reg = regs.takeAny(needFloat);
   981         freeUntilPos[reg.code()] = CodePosition::MAX;
   982     }
   983     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
   984         LAllocation *alloc = i->getAllocation();
   985         if (alloc->isRegister(needFloat)) {
   986             AnyRegister reg = alloc->toRegister();
   987             IonSpew(IonSpew_RegAlloc, "   Register %s not free", reg.name());
   988             freeUntilPos[reg.code()] = CodePosition::MIN;
   989         }
   990     }
   991     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
   992         LAllocation *alloc = i->getAllocation();
   993         if (alloc->isRegister(needFloat)) {
   994             AnyRegister reg = alloc->toRegister();
   995             CodePosition pos = current->intersect(*i);
   996             if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
   997                 freeUntilPos[reg.code()] = pos;
   998                 IonSpew(IonSpew_RegAlloc, "   Register %s free until %u", reg.name(), pos.pos());
   999             }
  1003     CodePosition fixedPos = fixedIntervalsUnion->intersect(current);
  1004     if (fixedPos != CodePosition::MIN) {
  1005         for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) {
  1006             AnyRegister reg = i->getAllocation()->toRegister();
  1007             if (freeUntilPos[reg.code()] != CodePosition::MIN) {
  1008                 CodePosition pos = current->intersect(*i);
  1009                 if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
  1010                     freeUntilPos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos;
  1011                     IonSpew(IonSpew_RegAlloc, "   Register %s free until %u", reg.name(), pos.pos());
  1017     AnyRegister::Code bestCode = AnyRegister::Invalid;
  1018     if (current->index()) {
  1019         // As an optimization, use the allocation from the previous interval if
  1020         // it is available.
  1021         LiveInterval *previous = vregs[current->vreg()].getInterval(current->index() - 1);
  1022         LAllocation *alloc = previous->getAllocation();
  1023         if (alloc->isRegister(needFloat)) {
  1024             AnyRegister prevReg = alloc->toRegister();
  1025             if (freeUntilPos[prevReg.code()] != CodePosition::MIN)
  1026                 bestCode = prevReg.code();
  1030     // Assign the register suggested by the hint if it's free.
  1031     const Requirement *hint = current->hint();
  1032     if (hint->kind() == Requirement::FIXED && hint->allocation().isRegister()) {
  1033         AnyRegister hintReg = hint->allocation().toRegister();
  1034         if (freeUntilPos[hintReg.code()] > hint->pos())
  1035             bestCode = hintReg.code();
  1036     } else if (hint->kind() == Requirement::SAME_AS_OTHER) {
  1037         LiveInterval *other = vregs[hint->virtualRegister()].intervalFor(hint->pos());
  1038         if (other && other->getAllocation()->isRegister()) {
  1039             AnyRegister hintReg = other->getAllocation()->toRegister();
  1040             if (freeUntilPos[hintReg.code()] > hint->pos())
  1041                 bestCode = hintReg.code();
  1045     if (bestCode == AnyRegister::Invalid) {
  1046         // If all else fails, search freeUntilPos for largest value
  1047         for (uint32_t i = 0; i < AnyRegister::Total; i++) {
  1048             if (freeUntilPos[i] == CodePosition::MIN)
  1049                 continue;
  1050             if (bestCode == AnyRegister::Invalid || freeUntilPos[i] > freeUntilPos[bestCode])
  1051                 bestCode = AnyRegister::Code(i);
  1055     if (bestCode != AnyRegister::Invalid)
  1056         *freeUntil = freeUntilPos[bestCode];
  1057     return bestCode;
  1060 /*
  1061  * This function locates a register which is assigned to an active interval,
  1062  * and returns the one with the furthest away next use. As a side effect,
  1063  * the nextUsePos array is updated with the next use position of all active
  1064  * intervals for use elsewhere in the algorithm.
  1065  */
  1066 AnyRegister::Code
  1067 LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
  1069     IonSpew(IonSpew_RegAlloc, "  Computing nextUsePos");
  1071     // Compute next-used positions for all registers
  1072     CodePosition nextUsePos[AnyRegister::Total];
  1073     bool needFloat = vregs[current->vreg()].isFloatReg();
  1074     for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) {
  1075         AnyRegister reg = regs.takeAny(needFloat);
  1076         nextUsePos[reg.code()] = CodePosition::MAX;
  1078     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
  1079         LAllocation *alloc = i->getAllocation();
  1080         if (alloc->isRegister(needFloat)) {
  1081             AnyRegister reg = alloc->toRegister();
  1082             if (i->start() == current->start()) {
  1083                 nextUsePos[reg.code()] = CodePosition::MIN;
  1084                 IonSpew(IonSpew_RegAlloc, "   Disqualifying %s due to recency", reg.name());
  1085             } else if (nextUsePos[reg.code()] != CodePosition::MIN) {
  1086                 nextUsePos[reg.code()] = i->nextUsePosAfter(current->start());
  1087                 IonSpew(IonSpew_RegAlloc, "   Register %s next used %u", reg.name(),
  1088                         nextUsePos[reg.code()].pos());
  1092     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
  1093         LAllocation *alloc = i->getAllocation();
  1094         if (alloc->isRegister(needFloat)) {
  1095             AnyRegister reg = alloc->toRegister();
  1096             CodePosition pos = i->nextUsePosAfter(current->start());
  1097             if (pos < nextUsePos[reg.code()]) {
  1098                 nextUsePos[reg.code()] = pos;
  1099                 IonSpew(IonSpew_RegAlloc, "   Register %s next used %u", reg.name(), pos.pos());
  1104     CodePosition fixedPos = fixedIntervalsUnion->intersect(current);
  1105     if (fixedPos != CodePosition::MIN) {
  1106         for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) {
  1107             AnyRegister reg = i->getAllocation()->toRegister();
  1108             if (nextUsePos[reg.code()] != CodePosition::MIN) {
  1109                 CodePosition pos = i->intersect(current);
  1110                 if (pos != CodePosition::MIN && pos < nextUsePos[reg.code()]) {
  1111                     nextUsePos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos;
  1112                     IonSpew(IonSpew_RegAlloc, "   Register %s next used %u (fixed)", reg.name(), pos.pos());
  1118     // Search nextUsePos for largest value
  1119     AnyRegister::Code bestCode = AnyRegister::Invalid;
  1120     for (size_t i = 0; i < AnyRegister::Total; i++) {
  1121         if (nextUsePos[i] == CodePosition::MIN)
  1122             continue;
  1123         if (bestCode == AnyRegister::Invalid || nextUsePos[i] > nextUsePos[bestCode])
  1124             bestCode = AnyRegister::Code(i);
  1127     if (bestCode != AnyRegister::Invalid)
  1128         *nextUsed = nextUsePos[bestCode];
  1129     return bestCode;
  1132 /*
  1133  * Two intervals can coexist if any of the following conditions is met:
  1135  *   - The intervals do not intersect.
  1136  *   - The intervals have different allocations.
  1137  *   - The intervals have the same allocation, but the allocation may be
  1138  *     shared.
  1140  * Intuitively, it is a bug if any allocated intervals exist which can not
  1141  * coexist.
  1142  */
  1143 bool
  1144 LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b)
  1146     LAllocation *aa = a->getAllocation();
  1147     LAllocation *ba = b->getAllocation();
  1148     if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister())
  1149         return a->intersect(b) == CodePosition::MIN;
  1150     return true;
  1153 #ifdef DEBUG
  1154 /*
  1155  * Ensure intervals appear in exactly the appropriate one of {active,inactive,
  1156  * handled}, and that active and inactive intervals do not conflict. Handled
  1157  * intervals are checked for conflicts in validateAllocations for performance
  1158  * reasons.
  1159  */
  1160 void
  1161 LinearScanAllocator::validateIntervals()
  1163     if (!js_JitOptions.checkGraphConsistency)
  1164         return;
  1166     for (IntervalIterator i(active.begin()); i != active.end(); i++) {
  1167         JS_ASSERT(i->numRanges() > 0);
  1168         JS_ASSERT(i->covers(current->start()));
  1170         for (IntervalIterator j(active.begin()); j != i; j++)
  1171             JS_ASSERT(canCoexist(*i, *j));
  1174     for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
  1175         JS_ASSERT(i->numRanges() > 0);
  1176         JS_ASSERT(i->end() >= current->start());
  1177         JS_ASSERT(!i->covers(current->start()));
  1179         for (IntervalIterator j(active.begin()); j != active.end(); j++) {
  1180             JS_ASSERT(*i != *j);
  1181             JS_ASSERT(canCoexist(*i, *j));
  1183         for (IntervalIterator j(inactive.begin()); j != i; j++)
  1184             JS_ASSERT(canCoexist(*i, *j));
  1187     for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
  1188         JS_ASSERT(!i->getAllocation()->isUse());
  1189         JS_ASSERT(i->numRanges() > 0);
  1190         if (i->getAllocation()->isRegister()) {
  1191             JS_ASSERT(i->end() <= current->start());
  1192             JS_ASSERT(!i->covers(current->start()));
  1195         for (IntervalIterator j(active.begin()); j != active.end(); j++)
  1196             JS_ASSERT(*i != *j);
  1197         for (IntervalIterator j(inactive.begin()); j != inactive.end(); j++)
  1198             JS_ASSERT(*i != *j);
  1202 /*
  1203  * This function performs a nice, expensive check that all intervals
  1204  * in the function can coexist with every other interval.
  1205  */
  1206 void
  1207 LinearScanAllocator::validateAllocations()
  1209     if (!js_JitOptions.checkGraphConsistency)
  1210         return;
  1212     for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
  1213         for (IntervalIterator j(handled.begin()); j != i; j++) {
  1214             JS_ASSERT(*i != *j);
  1215             JS_ASSERT(canCoexist(*i, *j));
  1217         LinearScanVirtualRegister *reg = &vregs[i->vreg()];
  1218         bool found = false;
  1219         for (size_t j = 0; j < reg->numIntervals(); j++) {
  1220             if (reg->getInterval(j) == *i) {
  1221                 JS_ASSERT(j == i->index());
  1222                 found = true;
  1223                 break;
  1226         JS_ASSERT(found);
  1230 #endif // DEBUG
  1232 bool
  1233 LinearScanAllocator::go()
  1235     IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
  1237     IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
  1238     if (!buildLivenessInfo())
  1239         return false;
  1240     IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
  1242     if (mir->shouldCancel("LSRA Liveness"))
  1243         return false;
  1245     IonSpew(IonSpew_RegAlloc, "Beginning preliminary register allocation");
  1246     if (!allocateRegisters())
  1247         return false;
  1248     IonSpew(IonSpew_RegAlloc, "Preliminary register allocation complete");
  1250     if (mir->shouldCancel("LSRA Preliminary Regalloc"))
  1251         return false;
  1253     IonSpew(IonSpew_RegAlloc, "Beginning control flow resolution");
  1254     if (!resolveControlFlow())
  1255         return false;
  1256     IonSpew(IonSpew_RegAlloc, "Control flow resolution complete");
  1258     if (mir->shouldCancel("LSRA Control Flow"))
  1259         return false;
  1261     IonSpew(IonSpew_RegAlloc, "Beginning register allocation reification");
  1262     if (!reifyAllocations())
  1263         return false;
  1264     IonSpew(IonSpew_RegAlloc, "Register allocation reification complete");
  1266     if (mir->shouldCancel("LSRA Reification"))
  1267         return false;
  1269     IonSpew(IonSpew_RegAlloc, "Beginning safepoint population.");
  1270     if (!populateSafepoints())
  1271         return false;
  1272     IonSpew(IonSpew_RegAlloc, "Safepoint population complete.");
  1274     if (mir->shouldCancel("LSRA Safepoints"))
  1275         return false;
  1277     IonSpew(IonSpew_RegAlloc, "Register allocation complete");
  1279     return true;
  1282 void
  1283 LinearScanAllocator::setIntervalRequirement(LiveInterval *interval)
  1285     JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
  1286     JS_ASSERT(interval->hint()->kind() == Requirement::NONE);
  1288     // This function computes requirement by virtual register, other types of
  1289     // interval should have requirements set manually
  1290     LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
  1292     if (interval->index() == 0) {
  1293         // The first interval is the definition, so deal with any definition
  1294         // constraints/hints
  1296         if (reg->def()->policy() == LDefinition::PRESET) {
  1297             // Preset policies get a FIXED requirement or hint.
  1298             if (reg->def()->output()->isRegister())
  1299                 interval->setHint(Requirement(*reg->def()->output()));
  1300             else
  1301                 interval->setRequirement(Requirement(*reg->def()->output()));
  1302         } else if (reg->def()->policy() == LDefinition::MUST_REUSE_INPUT) {
  1303             // Reuse policies get either a FIXED requirement or a SAME_AS hint.
  1304             LUse *use = reg->ins()->getOperand(reg->def()->getReusedInput())->toUse();
  1305             interval->setRequirement(Requirement(Requirement::REGISTER));
  1306             interval->setHint(Requirement(use->virtualRegister(), interval->start().previous()));
  1307         } else if (reg->ins()->isPhi()) {
  1308             // Phis don't have any requirements, but they should prefer
  1309             // their input allocations, so they get a SAME_AS hint of the
  1310             // first input
  1311             LUse *use = reg->ins()->getOperand(0)->toUse();
  1312             LBlock *predecessor = reg->block()->mir()->getPredecessor(0)->lir();
  1313             CodePosition predEnd = outputOf(predecessor->lastId());
  1314             interval->setHint(Requirement(use->virtualRegister(), predEnd));
  1315         } else {
  1316             // Non-phis get a REGISTER requirement
  1317             interval->setRequirement(Requirement(Requirement::REGISTER));
  1321     UsePosition *fixedOp = nullptr;
  1322     UsePosition *registerOp = nullptr;
  1324     // Search uses at the start of the interval for requirements.
  1325     UsePositionIterator usePos(interval->usesBegin());
  1326     for (; usePos != interval->usesEnd(); usePos++) {
  1327         if (interval->start().next() < usePos->pos)
  1328             break;
  1330         LUse::Policy policy = usePos->use->policy();
  1331         if (policy == LUse::FIXED) {
  1332             fixedOp = *usePos;
  1333             interval->setRequirement(Requirement(Requirement::REGISTER));
  1334             break;
  1335         } else if (policy == LUse::REGISTER) {
  1336             // Register uses get a REGISTER requirement
  1337             interval->setRequirement(Requirement(Requirement::REGISTER));
  1341     // Search other uses for hints. If the virtual register already has a
  1342     // canonical spill location, we will eagerly spill this interval, so we
  1343     // don't have to search for hints.
  1344     if (!fixedOp && !vregs[interval->vreg()].canonicalSpill()) {
  1345         for (; usePos != interval->usesEnd(); usePos++) {
  1346             LUse::Policy policy = usePos->use->policy();
  1347             if (policy == LUse::FIXED) {
  1348                 fixedOp = *usePos;
  1349                 break;
  1350             } else if (policy == LUse::REGISTER) {
  1351                 if (!registerOp)
  1352                     registerOp = *usePos;
  1357     if (fixedOp) {
  1358         // Intervals with a fixed use now get a FIXED hint.
  1359         AnyRegister required = GetFixedRegister(reg->def(), fixedOp->use);
  1360         interval->setHint(Requirement(LAllocation(required), fixedOp->pos));
  1361     } else if (registerOp) {
  1362         // Intervals with register uses get a REGISTER hint. We may have already
  1363         // assigned a SAME_AS hint, make sure we don't overwrite it with a weaker
  1364         // hint.
  1365         if (interval->hint()->kind() == Requirement::NONE)
  1366             interval->setHint(Requirement(Requirement::REGISTER, registerOp->pos));
  1370 /*
  1371  * Enqueue by iteration starting from the node with the lowest start position.
  1373  * If we assign registers to intervals in order of their start positions
  1374  * without regard to their requirements, we can end up in situations where
  1375  * intervals that do not require registers block intervals that must have
  1376  * registers from getting one. To avoid this, we ensure that intervals are
  1377  * ordered by position and priority so intervals with more stringent
  1378  * requirements are handled first.
  1379  */
  1380 void
  1381 LinearScanAllocator::UnhandledQueue::enqueueBackward(LiveInterval *interval)
  1383     IntervalReverseIterator i(rbegin());
  1385     for (; i != rend(); i++) {
  1386         if (i->start() > interval->start())
  1387             break;
  1388         if (i->start() == interval->start() &&
  1389             i->requirement()->priority() >= interval->requirement()->priority())
  1391             break;
  1394     insertAfter(*i, interval);
  1397 /*
  1398  * Enqueue by iteration from high start position to low start position,
  1399  * after a provided node.
  1400  */
  1401 void
  1402 LinearScanAllocator::UnhandledQueue::enqueueForward(LiveInterval *after, LiveInterval *interval)
  1404     IntervalIterator i(begin(after));
  1405     i++; // Skip the initial node.
  1407     for (; i != end(); i++) {
  1408         if (i->start() < interval->start())
  1409             break;
  1410         if (i->start() == interval->start() &&
  1411             i->requirement()->priority() < interval->requirement()->priority())
  1413             break;
  1416     insertBefore(*i, interval);
  1419 void
  1420 LinearScanAllocator::UnhandledQueue::assertSorted()
  1422 #ifdef DEBUG
  1423     LiveInterval *prev = nullptr;
  1424     for (IntervalIterator i(begin()); i != end(); i++) {
  1425         if (prev) {
  1426             JS_ASSERT(prev->start() >= i->start());
  1427             JS_ASSERT_IF(prev->start() == i->start(),
  1428                          prev->requirement()->priority() >= i->requirement()->priority());
  1430         prev = *i;
  1432 #endif
  1435 LiveInterval *
  1436 LinearScanAllocator::UnhandledQueue::dequeue()
  1438     if (rbegin() == rend())
  1439         return nullptr;
  1441     LiveInterval *result = *rbegin();
  1442     remove(result);
  1443     return result;

mercurial