js/src/jit/LiveRangeAllocator.cpp

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #include "jit/LiveRangeAllocator.h"
     9 #include "mozilla/DebugOnly.h"
    11 #include "jsprf.h"
    13 #include "jit/BacktrackingAllocator.h"
    14 #include "jit/BitSet.h"
    15 #include "jit/LinearScan.h"
    17 using namespace js;
    18 using namespace js::jit;
    20 using mozilla::DebugOnly;
    22 int
    23 Requirement::priority() const
    24 {
    25     switch (kind_) {
    26       case Requirement::FIXED:
    27         return 0;
    29       case Requirement::REGISTER:
    30         return 1;
    32       case Requirement::NONE:
    33         return 2;
    35       default:
    36         MOZ_ASSUME_UNREACHABLE("Unknown requirement kind.");
    37     }
    38 }
    40 bool
    41 LiveInterval::Range::contains(const Range *other) const
    42 {
    43     return from <= other->from && to >= other->to;
    44 }
    46 void
    47 LiveInterval::Range::intersect(const Range *other, Range *pre, Range *inside, Range *post) const
    48 {
    49     JS_ASSERT(pre->empty() && inside->empty() && post->empty());
    51     CodePosition innerFrom = from;
    52     if (from < other->from) {
    53         if (to < other->from) {
    54             *pre = Range(from, to);
    55             return;
    56         }
    57         *pre = Range(from, other->from);
    58         innerFrom = other->from;
    59     }
    61     CodePosition innerTo = to;
    62     if (to > other->to) {
    63         if (from >= other->to) {
    64             *post = Range(from, to);
    65             return;
    66         }
    67         *post = Range(other->to, to);
    68         innerTo = other->to;
    69     }
    71     if (innerFrom != innerTo)
    72         *inside = Range(innerFrom, innerTo);
    73 }
    75 bool
    76 LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
    77 {
    78     JS_ASSERT(from < to);
    80     Range newRange(from, to);
    82     if (ranges_.empty())
    83         return ranges_.append(newRange);
    85     Range &first = ranges_.back();
    86     if (to < first.from)
    87         return ranges_.append(newRange);
    89     if (to == first.from) {
    90         first.from = from;
    91         return true;
    92     }
    94     JS_ASSERT(from < first.to);
    95     JS_ASSERT(to > first.from);
    96     if (from < first.from)
    97         first.from = from;
    98     if (to > first.to)
    99         first.to = to;
   101     return true;
   102 }
   104 bool
   105 LiveInterval::addRange(CodePosition from, CodePosition to)
   106 {
   107     JS_ASSERT(from < to);
   109     Range newRange(from, to);
   111     Range *i;
   112     // Find the location to insert the new range
   113     for (i = ranges_.end() - 1; i >= ranges_.begin(); i--) {
   114         if (newRange.from <= i->to) {
   115             if (i->from < newRange.from)
   116                 newRange.from = i->from;
   117             break;
   118         }
   119     }
   120     // Perform coalescing on overlapping ranges
   121     for (; i >= ranges_.begin(); i--) {
   122         if (newRange.to < i->from)
   123             break;
   124         if (newRange.to < i->to)
   125             newRange.to = i->to;
   126         ranges_.erase(i);
   127     }
   129     return ranges_.insert(i + 1, newRange);
   130 }
   132 void
   133 LiveInterval::setFrom(CodePosition from)
   134 {
   135     while (!ranges_.empty()) {
   136         if (ranges_.back().to < from) {
   137             ranges_.erase(&ranges_.back());
   138         } else {
   139             if (from == ranges_.back().to)
   140                 ranges_.erase(&ranges_.back());
   141             else
   142                 ranges_.back().from = from;
   143             break;
   144         }
   145     }
   146 }
   148 bool
   149 LiveInterval::covers(CodePosition pos)
   150 {
   151     if (pos < start() || pos >= end())
   152         return false;
   154     // Loop over the ranges in ascending order.
   155     size_t i = lastProcessedRangeIfValid(pos);
   156     for (; i < ranges_.length(); i--) {
   157         if (pos < ranges_[i].from)
   158             return false;
   159         setLastProcessedRange(i, pos);
   160         if (pos < ranges_[i].to)
   161             return true;
   162     }
   163     return false;
   164 }
   166 CodePosition
   167 LiveInterval::nextCoveredAfter(CodePosition pos)
   168 {
   169     for (size_t i = 0; i < ranges_.length(); i++) {
   170         if (ranges_[i].to <= pos) {
   171             if (i)
   172                 return ranges_[i-1].from;
   173             break;
   174         }
   175         if (ranges_[i].from <= pos)
   176             return pos;
   177     }
   178     return CodePosition::MIN;
   179 }
   181 CodePosition
   182 LiveInterval::intersect(LiveInterval *other)
   183 {
   184     if (start() > other->start())
   185         return other->intersect(this);
   187     // Loop over the ranges in ascending order. As an optimization,
   188     // try to start at the last processed range.
   189     size_t i = lastProcessedRangeIfValid(other->start());
   190     size_t j = other->ranges_.length() - 1;
   191     if (i >= ranges_.length() || j >= other->ranges_.length())
   192         return CodePosition::MIN;
   194     while (true) {
   195         const Range &r1 = ranges_[i];
   196         const Range &r2 = other->ranges_[j];
   198         if (r1.from <= r2.from) {
   199             if (r1.from <= other->start())
   200                 setLastProcessedRange(i, other->start());
   201             if (r2.from < r1.to)
   202                 return r2.from;
   203             if (i == 0 || ranges_[i-1].from > other->end())
   204                 break;
   205             i--;
   206         } else {
   207             if (r1.from < r2.to)
   208                 return r1.from;
   209             if (j == 0 || other->ranges_[j-1].from > end())
   210                 break;
   211             j--;
   212         }
   213     }
   215     return CodePosition::MIN;
   216 }
   218 /*
   219  * This function takes the callee interval and moves all ranges following or
   220  * including provided position to the target interval. Additionally, if a
   221  * range in the callee interval spans the given position, it is split and the
   222  * latter half is placed in the target interval.
   223  *
   224  * This function should only be called if it is known that the interval should
   225  * actually be split (and, presumably, a move inserted). As such, it is an
   226  * error for the caller to request a split that moves all intervals into the
   227  * target. Doing so will trip the assertion at the bottom of the function.
   228  */
   229 bool
   230 LiveInterval::splitFrom(CodePosition pos, LiveInterval *after)
   231 {
   232     JS_ASSERT(pos >= start() && pos < end());
   233     JS_ASSERT(after->ranges_.empty());
   235     // Move all intervals over to the target
   236     size_t bufferLength = ranges_.length();
   237     Range *buffer = ranges_.extractRawBuffer();
   238     if (!buffer)
   239         return false;
   240     after->ranges_.replaceRawBuffer(buffer, bufferLength);
   242     // Move intervals back as required
   243     for (Range *i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) {
   244         if (pos >= i->to)
   245             continue;
   247         if (pos > i->from) {
   248             // Split the range
   249             Range split(i->from, pos);
   250             i->from = pos;
   251             if (!ranges_.append(split))
   252                 return false;
   253         }
   254         if (!ranges_.append(i + 1, after->ranges_.end()))
   255             return false;
   256         after->ranges_.shrinkBy(after->ranges_.end() - i - 1);
   257         break;
   258     }
   260     // Split the linked list of use positions
   261     UsePosition *prev = nullptr;
   262     for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
   263         if (usePos->pos > pos)
   264             break;
   265         prev = *usePos;
   266     }
   268     uses_.splitAfter(prev, &after->uses_);
   269     return true;
   270 }
   272 void
   273 LiveInterval::addUse(UsePosition *use)
   274 {
   275     // Insert use positions in ascending order. Note that instructions
   276     // are visited in reverse order, so in most cases the loop terminates
   277     // at the first iteration and the use position will be added to the
   278     // front of the list.
   279     UsePosition *prev = nullptr;
   280     for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) {
   281         if (current->pos >= use->pos)
   282             break;
   283         prev = *current;
   284     }
   286     if (prev)
   287         uses_.insertAfter(prev, use);
   288     else
   289         uses_.pushFront(use);
   290 }
   292 void
   293 LiveInterval::addUseAtEnd(UsePosition *use)
   294 {
   295     JS_ASSERT(uses_.empty() || use->pos >= uses_.back()->pos);
   296     uses_.pushBack(use);
   297 }
   299 UsePosition *
   300 LiveInterval::nextUseAfter(CodePosition after)
   301 {
   302     for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
   303         if (usePos->pos >= after) {
   304             LUse::Policy policy = usePos->use->policy();
   305             JS_ASSERT(policy != LUse::RECOVERED_INPUT);
   306             if (policy != LUse::KEEPALIVE)
   307                 return *usePos;
   308         }
   309     }
   310     return nullptr;
   311 }
   313 /*
   314  * This function locates the first "real" use of this interval that follows
   315  * the given code position. Non-"real" uses are currently just snapshots,
   316  * which keep virtual registers alive but do not result in the
   317  * generation of code that use them.
   318  */
   319 CodePosition
   320 LiveInterval::nextUsePosAfter(CodePosition after)
   321 {
   322     UsePosition *min = nextUseAfter(after);
   323     return min ? min->pos : CodePosition::MAX;
   324 }
   326 /*
   327  * This function finds the position of the first use of this interval
   328  * that is incompatible with the provideded allocation. For example,
   329  * a use with a REGISTER policy would be incompatible with a stack slot
   330  * allocation.
   331  */
   332 CodePosition
   333 LiveInterval::firstIncompatibleUse(LAllocation alloc)
   334 {
   335     for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
   336         if (!UseCompatibleWith(usePos->use, alloc))
   337             return usePos->pos;
   338     }
   339     return CodePosition::MAX;
   340 }
   342 LiveInterval *
   343 VirtualRegister::intervalFor(CodePosition pos)
   344 {
   345     for (LiveInterval **i = intervals_.begin(); i != intervals_.end(); i++) {
   346         if ((*i)->covers(pos))
   347             return *i;
   348         if (pos < (*i)->end())
   349             break;
   350     }
   351     return nullptr;
   352 }
   354 LiveInterval *
   355 VirtualRegister::getFirstInterval()
   356 {
   357     JS_ASSERT(!intervals_.empty());
   358     return intervals_[0];
   359 }
   361 // Instantiate LiveRangeAllocator for each template instance.
   362 template bool LiveRangeAllocator<LinearScanVirtualRegister, true>::buildLivenessInfo();
   363 template bool LiveRangeAllocator<BacktrackingVirtualRegister, false>::buildLivenessInfo();
   365 #ifdef DEBUG
   366 static inline bool
   367 NextInstructionHasFixedUses(LBlock *block, LInstruction *ins)
   368 {
   369     LInstructionIterator iter(block->begin(ins));
   370     iter++;
   371     for (LInstruction::InputIterator alloc(**iter); alloc.more(); alloc.next()) {
   372         if (alloc->isUse() && alloc->toUse()->isFixedRegister())
   373             return true;
   374     }
   375     return false;
   376 }
   378 // Returns true iff ins has a def/temp reusing the input allocation.
   379 static bool
   380 IsInputReused(LInstruction *ins, LUse *use)
   381 {
   382     for (size_t i = 0; i < ins->numDefs(); i++) {
   383         if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
   384             ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
   385         {
   386             return true;
   387         }
   388     }
   390     for (size_t i = 0; i < ins->numTemps(); i++) {
   391         if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
   392             ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
   393         {
   394             return true;
   395         }
   396     }
   398     return false;
   399 }
   400 #endif
   402 /*
   403  * This function pre-allocates and initializes as much global state as possible
   404  * to avoid littering the algorithms with memory management cruft.
   405  */
   406 template <typename VREG, bool forLSRA>
   407 bool
   408 LiveRangeAllocator<VREG, forLSRA>::init()
   409 {
   410     if (!RegisterAllocator::init())
   411         return false;
   413     liveIn = mir->allocate<BitSet*>(graph.numBlockIds());
   414     if (!liveIn)
   415         return false;
   417     // Initialize fixed intervals.
   418     for (size_t i = 0; i < AnyRegister::Total; i++) {
   419         AnyRegister reg = AnyRegister::FromCode(i);
   420         LiveInterval *interval = LiveInterval::New(alloc(), 0);
   421         interval->setAllocation(LAllocation(reg));
   422         fixedIntervals[i] = interval;
   423     }
   425     fixedIntervalsUnion = LiveInterval::New(alloc(), 0);
   427     if (!vregs.init(mir, graph.numVirtualRegisters()))
   428         return false;
   430     // Build virtual register objects
   431     for (size_t i = 0; i < graph.numBlocks(); i++) {
   432         if (mir->shouldCancel("Create data structures (main loop)"))
   433             return false;
   435         LBlock *block = graph.getBlock(i);
   436         for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
   437             for (size_t j = 0; j < ins->numDefs(); j++) {
   438                 LDefinition *def = ins->getDef(j);
   439                 if (def->policy() != LDefinition::PASSTHROUGH) {
   440                     if (!vregs[def].init(alloc(), block, *ins, def, /* isTemp */ false))
   441                         return false;
   442                 }
   443             }
   445             for (size_t j = 0; j < ins->numTemps(); j++) {
   446                 LDefinition *def = ins->getTemp(j);
   447                 if (def->isBogusTemp())
   448                     continue;
   449                 if (!vregs[def].init(alloc(), block, *ins, def, /* isTemp */ true))
   450                     return false;
   451             }
   452         }
   453         for (size_t j = 0; j < block->numPhis(); j++) {
   454             LPhi *phi = block->getPhi(j);
   455             LDefinition *def = phi->getDef(0);
   456             if (!vregs[def].init(alloc(), block, phi, def, /* isTemp */ false))
   457                 return false;
   458         }
   459     }
   461     return true;
   462 }
   464 static void
   465 AddRegisterToSafepoint(LSafepoint *safepoint, AnyRegister reg, const LDefinition &def)
   466 {
   467     safepoint->addLiveRegister(reg);
   469     JS_ASSERT(def.type() == LDefinition::GENERAL ||
   470               def.type() == LDefinition::INT32 ||
   471               def.type() == LDefinition::DOUBLE ||
   472               def.type() == LDefinition::FLOAT32 ||
   473               def.type() == LDefinition::OBJECT);
   475     if (def.type() == LDefinition::OBJECT)
   476         safepoint->addGcRegister(reg.gpr());
   477 }
   479 /*
   480  * This function builds up liveness intervals for all virtual registers
   481  * defined in the function. Additionally, it populates the liveIn array with
   482  * information about which registers are live at the beginning of a block, to
   483  * aid resolution and reification in a later phase.
   484  *
   485  * The algorithm is based on the one published in:
   486  *
   487  * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
   488  *     SSA Form." Proceedings of the International Symposium on Code Generation
   489  *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
   490  *
   491  * The algorithm operates on blocks ordered such that dominators of a block
   492  * are before the block itself, and such that all blocks of a loop are
   493  * contiguous. It proceeds backwards over the instructions in this order,
   494  * marking registers live at their uses, ending their live intervals at
   495  * definitions, and recording which registers are live at the top of every
   496  * block. To deal with loop backedges, variables live at the beginning of
   497  * a loop gain an interval covering the entire loop.
   498  */
   499 template <typename VREG, bool forLSRA>
   500 bool
   501 LiveRangeAllocator<VREG, forLSRA>::buildLivenessInfo()
   502 {
   503     if (!init())
   504         return false;
   506     Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
   507     BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds());
   508     if (!loopDone)
   509         return false;
   511     for (size_t i = graph.numBlocks(); i > 0; i--) {
   512         if (mir->shouldCancel("Build Liveness Info (main loop)"))
   513             return false;
   515         LBlock *block = graph.getBlock(i - 1);
   516         MBasicBlock *mblock = block->mir();
   518         BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters());
   519         if (!live)
   520             return false;
   521         liveIn[mblock->id()] = live;
   523         // Propagate liveIn from our successors to us
   524         for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
   525             MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
   526             // Skip backedges, as we fix them up at the loop header.
   527             if (mblock->id() < successor->id())
   528                 live->insertAll(liveIn[successor->id()]);
   529         }
   531         // Add successor phis
   532         if (mblock->successorWithPhis()) {
   533             LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
   534             for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
   535                 LPhi *phi = phiSuccessor->getPhi(j);
   536                 LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
   537                 uint32_t reg = use->toUse()->virtualRegister();
   538                 live->insert(reg);
   539             }
   540         }
   542         // Variables are assumed alive for the entire block, a define shortens
   543         // the interval to the point of definition.
   544         for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
   545             if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
   546                                                                   outputOf(block->lastId()).next()))
   547             {
   548                 return false;
   549             }
   550         }
   552         // Shorten the front end of live intervals for live variables to their
   553         // point of definition, if found.
   554         for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
   555             // Calls may clobber registers, so force a spill and reload around the callsite.
   556             if (ins->isCall()) {
   557                 for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
   558                     if (forLSRA) {
   559                         if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
   560                             return false;
   561                     } else {
   562                         bool found = false;
   563                         for (size_t i = 0; i < ins->numDefs(); i++) {
   564                             if (ins->getDef(i)->isPreset() &&
   565                                 *ins->getDef(i)->output() == LAllocation(*iter)) {
   566                                 found = true;
   567                                 break;
   568                             }
   569                         }
   570                         if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
   571                             return false;
   572                     }
   573                 }
   574             }
   576             for (size_t i = 0; i < ins->numDefs(); i++) {
   577                 if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
   578                     LDefinition *def = ins->getDef(i);
   580                     CodePosition from;
   581                     if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
   582                         // The fixed range covers the current instruction so the
   583                         // interval for the virtual register starts at the next
   584                         // instruction. If the next instruction has a fixed use,
   585                         // this can lead to unnecessary register moves. To avoid
   586                         // special handling for this, assert the next instruction
   587                         // has no fixed uses. defineFixed guarantees this by inserting
   588                         // an LNop.
   589                         JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
   590                         AnyRegister reg = def->output()->toRegister();
   591                         if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
   592                             return false;
   593                         from = outputOf(*ins).next();
   594                     } else {
   595                         from = forLSRA ? inputOf(*ins) : outputOf(*ins);
   596                     }
   598                     if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
   599                         // MUST_REUSE_INPUT is implemented by allocating an output
   600                         // register and moving the input to it. Register hints are
   601                         // used to avoid unnecessary moves. We give the input an
   602                         // LUse::ANY policy to avoid allocating a register for the
   603                         // input.
   604                         LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse();
   605                         JS_ASSERT(inputUse->policy() == LUse::REGISTER);
   606                         JS_ASSERT(inputUse->usedAtStart());
   607                         *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
   608                     }
   610                     LiveInterval *interval = vregs[def].getInterval(0);
   611                     interval->setFrom(from);
   613                     // Ensure that if there aren't any uses, there's at least
   614                     // some interval for the output to go into.
   615                     if (interval->numRanges() == 0) {
   616                         if (!interval->addRangeAtHead(from, from.next()))
   617                             return false;
   618                     }
   619                     live->remove(def->virtualRegister());
   620                 }
   621             }
   623             for (size_t i = 0; i < ins->numTemps(); i++) {
   624                 LDefinition *temp = ins->getTemp(i);
   625                 if (temp->isBogusTemp())
   626                     continue;
   628                 if (forLSRA) {
   629                     if (temp->policy() == LDefinition::PRESET) {
   630                         if (ins->isCall())
   631                             continue;
   632                         AnyRegister reg = temp->output()->toRegister();
   633                         if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
   634                             return false;
   636                         // Fixed intervals are not added to safepoints, so do it
   637                         // here.
   638                         if (LSafepoint *safepoint = ins->safepoint())
   639                             AddRegisterToSafepoint(safepoint, reg, *temp);
   640                     } else {
   641                         JS_ASSERT(!ins->isCall());
   642                         if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins)))
   643                             return false;
   644                     }
   645                 } else {
   646                     // Normally temps are considered to cover both the input
   647                     // and output of the associated instruction. In some cases
   648                     // though we want to use a fixed register as both an input
   649                     // and clobbered register in the instruction, so watch for
   650                     // this and shorten the temp to cover only the output.
   651                     CodePosition from = inputOf(*ins);
   652                     if (temp->policy() == LDefinition::PRESET) {
   653                         AnyRegister reg = temp->output()->toRegister();
   654                         for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
   655                             if (alloc->isUse()) {
   656                                 LUse *use = alloc->toUse();
   657                                 if (use->isFixedRegister()) {
   658                                     if (GetFixedRegister(vregs[use].def(), use) == reg)
   659                                         from = outputOf(*ins);
   660                                 }
   661                             }
   662                         }
   663                     }
   665                     CodePosition to =
   666                         ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
   667                     if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
   668                         return false;
   669                 }
   670             }
   672             DebugOnly<bool> hasUseRegister = false;
   673             DebugOnly<bool> hasUseRegisterAtStart = false;
   675             for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
   676                 if (inputAlloc->isUse()) {
   677                     LUse *use = inputAlloc->toUse();
   679                     // The first instruction, LLabel, has no uses.
   680                     JS_ASSERT_IF(forLSRA, inputOf(*ins) > outputOf(block->firstId()));
   682                     // Call uses should always be at-start or fixed, since the fixed intervals
   683                     // use all registers.
   684                     JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
   685                                  use->isFixedRegister() || use->usedAtStart());
   687 #ifdef DEBUG
   688                     // Don't allow at-start call uses if there are temps of the same kind,
   689                     // so that we don't assign the same register.
   690                     if (ins->isCall() && use->usedAtStart()) {
   691                         for (size_t i = 0; i < ins->numTemps(); i++)
   692                             JS_ASSERT(vregs[ins->getTemp(i)].isFloatReg() != vregs[use].isFloatReg());
   693                     }
   695                     // If there are both useRegisterAtStart(x) and useRegister(y)
   696                     // uses, we may assign the same register to both operands due to
   697                     // interval splitting (bug 772830). Don't allow this for now.
   698                     if (use->policy() == LUse::REGISTER) {
   699                         if (use->usedAtStart()) {
   700                             if (!IsInputReused(*ins, use))
   701                                 hasUseRegisterAtStart = true;
   702                         } else {
   703                             hasUseRegister = true;
   704                         }
   705                     }
   707                     JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
   708 #endif
   710                     // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
   711                     if (use->policy() == LUse::RECOVERED_INPUT)
   712                         continue;
   714                     CodePosition to;
   715                     if (forLSRA) {
   716                         if (use->isFixedRegister()) {
   717                             AnyRegister reg = GetFixedRegister(vregs[use].def(), use);
   718                             if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins)))
   719                                 return false;
   720                             to = inputOf(*ins);
   722                             // Fixed intervals are not added to safepoints, so do it
   723                             // here.
   724                             LSafepoint *safepoint = ins->safepoint();
   725                             if (!ins->isCall() && safepoint)
   726                                 AddRegisterToSafepoint(safepoint, reg, *vregs[use].def());
   727                         } else {
   728                             to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
   729                         }
   730                     } else {
   731                         to = (use->usedAtStart() || ins->isCall())
   732                            ? inputOf(*ins) : outputOf(*ins);
   733                         if (use->isFixedRegister()) {
   734                             LAllocation reg(AnyRegister::FromCode(use->registerCode()));
   735                             for (size_t i = 0; i < ins->numDefs(); i++) {
   736                                 LDefinition *def = ins->getDef(i);
   737                                 if (def->policy() == LDefinition::PRESET && *def->output() == reg)
   738                                     to = inputOf(*ins);
   739                             }
   740                         }
   741                     }
   743                     LiveInterval *interval = vregs[use].getInterval(0);
   744                     if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next()))
   745                         return false;
   746                     interval->addUse(new(alloc()) UsePosition(use, to));
   748                     live->insert(use->virtualRegister());
   749                 }
   750             }
   751         }
   753         // Phis have simultaneous assignment semantics at block begin, so at
   754         // the beginning of the block we can be sure that liveIn does not
   755         // contain any phi outputs.
   756         for (unsigned int i = 0; i < block->numPhis(); i++) {
   757             LDefinition *def = block->getPhi(i)->getDef(0);
   758             if (live->contains(def->virtualRegister())) {
   759                 live->remove(def->virtualRegister());
   760             } else {
   761                 // This is a dead phi, so add a dummy range over all phis. This
   762                 // can go away if we have an earlier dead code elimination pass.
   763                 if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
   764                                                                outputOf(block->firstId())))
   765                 {
   766                     return false;
   767                 }
   768             }
   769         }
   771         if (mblock->isLoopHeader()) {
   772             // A divergence from the published algorithm is required here, as
   773             // our block order does not guarantee that blocks of a loop are
   774             // contiguous. As a result, a single live interval spanning the
   775             // loop is not possible. Additionally, we require liveIn in a later
   776             // pass for resolution, so that must also be fixed up here.
   777             MBasicBlock *loopBlock = mblock->backedge();
   778             while (true) {
   779                 // Blocks must already have been visited to have a liveIn set.
   780                 JS_ASSERT(loopBlock->id() >= mblock->id());
   782                 // Add an interval for this entire loop block
   783                 CodePosition from = inputOf(loopBlock->lir()->firstId());
   784                 CodePosition to = outputOf(loopBlock->lir()->lastId()).next();
   786                 for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
   787                     if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
   788                         return false;
   789                 }
   791                 // Fix up the liveIn set to account for the new interval
   792                 liveIn[loopBlock->id()]->insertAll(live);
   794                 // Make sure we don't visit this node again
   795                 loopDone->insert(loopBlock->id());
   797                 // If this is the loop header, any predecessors are either the
   798                 // backedge or out of the loop, so skip any predecessors of
   799                 // this block
   800                 if (loopBlock != mblock) {
   801                     for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
   802                         MBasicBlock *pred = loopBlock->getPredecessor(i);
   803                         if (loopDone->contains(pred->id()))
   804                             continue;
   805                         if (!loopWorkList.append(pred))
   806                             return false;
   807                     }
   808                 }
   810                 // Terminate loop if out of work.
   811                 if (loopWorkList.empty())
   812                     break;
   814                 // Grab the next block off the work list, skipping any OSR block.
   815                 while (!loopWorkList.empty()) {
   816                     loopBlock = loopWorkList.popCopy();
   817                     if (loopBlock->lir() != graph.osrBlock())
   818                         break;
   819                 }
   821                 // If end is reached without finding a non-OSR block, then no more work items were found.
   822                 if (loopBlock->lir() == graph.osrBlock()) {
   823                     JS_ASSERT(loopWorkList.empty());
   824                     break;
   825                 }
   826             }
   828             // Clear the done set for other loops
   829             loopDone->clear();
   830         }
   832         JS_ASSERT_IF(!mblock->numPredecessors(), live->empty());
   833     }
   835     validateVirtualRegisters();
   837     // If the script has an infinite loop, there may be no MReturn and therefore
   838     // no fixed intervals. Add a small range to fixedIntervalsUnion so that the
   839     // rest of the allocator can assume it has at least one range.
   840     if (fixedIntervalsUnion->numRanges() == 0) {
   841         if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
   842                                                  CodePosition(0, CodePosition::OUTPUT)))
   843         {
   844             return false;
   845         }
   846     }
   848     return true;
   849 }
   851 #ifdef DEBUG
   853 void
   854 LiveInterval::validateRanges()
   855 {
   856     Range *prev = nullptr;
   858     for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
   859         Range *range = &ranges_[i];
   861         JS_ASSERT(range->from < range->to);
   862         JS_ASSERT_IF(prev, prev->to <= range->from);
   863         prev = range;
   864     }
   865 }
   867 #endif // DEBUG
   869 const char *
   870 LiveInterval::rangesToString() const
   871 {
   872 #ifdef DEBUG
   873     if (!numRanges())
   874         return " empty";
   876     // Not reentrant!
   877     static char buf[1000];
   879     char *cursor = buf;
   880     char *end = cursor + sizeof(buf);
   882     for (size_t i = 0; i < numRanges(); i++) {
   883         const LiveInterval::Range *range = getRange(i);
   884         int n = JS_snprintf(cursor, end - cursor, " [%u,%u>", range->from.pos(), range->to.pos());
   885         if (n < 0)
   886             return " ???";
   887         cursor += n;
   888     }
   890     return buf;
   891 #else
   892     return " ???";
   893 #endif
   894 }
   896 void
   897 LiveInterval::dump()
   898 {
   899     fprintf(stderr, "v%u: index=%u allocation=%s %s\n",
   900             vreg(), index(), getAllocation()->toString(), rangesToString());
   901 }

mercurial