js/src/jit/BacktrackingAllocator.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #include "jit/BacktrackingAllocator.h"
     8 #include "jit/BitSet.h"
    10 using namespace js;
    11 using namespace js::jit;
    13 using mozilla::DebugOnly;
    15 bool
    16 BacktrackingAllocator::init()
    17 {
    18     RegisterSet remainingRegisters(allRegisters_);
    19     while (!remainingRegisters.empty(/* float = */ false)) {
    20         AnyRegister reg = AnyRegister(remainingRegisters.takeGeneral());
    21         registers[reg.code()].allocatable = true;
    22     }
    23     while (!remainingRegisters.empty(/* float = */ true)) {
    24         AnyRegister reg = AnyRegister(remainingRegisters.takeFloat());
    25         registers[reg.code()].allocatable = true;
    26     }
    28     LifoAlloc *lifoAlloc = mir->alloc().lifoAlloc();
    29     for (size_t i = 0; i < AnyRegister::Total; i++) {
    30         registers[i].reg = AnyRegister::FromCode(i);
    31         registers[i].allocations.setAllocator(lifoAlloc);
    33         LiveInterval *fixed = fixedIntervals[i];
    34         for (size_t j = 0; j < fixed->numRanges(); j++) {
    35             AllocatedRange range(fixed, fixed->getRange(j));
    36             if (!registers[i].allocations.insert(range))
    37                 return false;
    38         }
    39     }
    41     hotcode.setAllocator(lifoAlloc);
    43     // Partition the graph into hot and cold sections, for helping to make
    44     // splitting decisions. Since we don't have any profiling data this is a
    45     // crapshoot, so just mark the bodies of inner loops as hot and everything
    46     // else as cold.
    48     LiveInterval *hotcodeInterval = LiveInterval::New(alloc(), 0);
    50     LBlock *backedge = nullptr;
    51     for (size_t i = 0; i < graph.numBlocks(); i++) {
    52         LBlock *block = graph.getBlock(i);
    54         // If we see a loop header, mark the backedge so we know when we have
    55         // hit the end of the loop. Don't process the loop immediately, so that
    56         // if there is an inner loop we will ignore the outer backedge.
    57         if (block->mir()->isLoopHeader())
    58             backedge = block->mir()->backedge()->lir();
    60         if (block == backedge) {
    61             LBlock *header = block->mir()->loopHeaderOfBackedge()->lir();
    62             CodePosition from = inputOf(header->firstId());
    63             CodePosition to = outputOf(block->lastId()).next();
    64             if (!hotcodeInterval->addRange(from, to))
    65                 return false;
    66         }
    67     }
    69     for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) {
    70         AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i));
    71         if (!hotcode.insert(range))
    72             return false;
    73     }
    75     return true;
    76 }
    78 bool
    79 BacktrackingAllocator::go()
    80 {
    81     IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
    83     IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
    84     if (!buildLivenessInfo())
    85         return false;
    86     IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
    88     if (!init())
    89         return false;
    91     if (IonSpewEnabled(IonSpew_RegAlloc))
    92         dumpLiveness();
    94     if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
    95         return false;
    97     if (!groupAndQueueRegisters())
    98         return false;
   100     if (IonSpewEnabled(IonSpew_RegAlloc))
   101         dumpRegisterGroups();
   103     // Allocate, spill and split register intervals until finished.
   104     while (!allocationQueue.empty()) {
   105         if (mir->shouldCancel("Backtracking Allocation"))
   106             return false;
   108         QueueItem item = allocationQueue.removeHighest();
   109         if (item.interval ? !processInterval(item.interval) : !processGroup(item.group))
   110             return false;
   111     }
   113     if (IonSpewEnabled(IonSpew_RegAlloc))
   114         dumpAllocations();
   116     return resolveControlFlow() && reifyAllocations() && populateSafepoints();
   117 }
   119 static bool
   120 LifetimesOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1)
   121 {
   122     // Registers may have been eagerly split in two, see tryGroupReusedRegister.
   123     // In such cases, only consider the first interval.
   124     JS_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2);
   126     LiveInterval *interval0 = reg0->getInterval(0), *interval1 = reg1->getInterval(0);
   128     // Interval ranges are sorted in reverse order. The lifetimes overlap if
   129     // any of their ranges overlap.
   130     size_t index0 = 0, index1 = 0;
   131     while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) {
   132         const LiveInterval::Range
   133             *range0 = interval0->getRange(index0),
   134             *range1 = interval1->getRange(index1);
   135         if (range0->from >= range1->to)
   136             index0++;
   137         else if (range1->from >= range0->to)
   138             index1++;
   139         else
   140             return true;
   141     }
   143     return false;
   144 }
   146 bool
   147 BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg)
   148 {
   149     for (size_t i = 0; i < group->registers.length(); i++) {
   150         if (LifetimesOverlap(reg, &vregs[group->registers[i]]))
   151             return false;
   152     }
   153     return true;
   154 }
   156 bool
   157 BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
   158 {
   159     // See if reg0 and reg1 can be placed in the same group, following the
   160     // restrictions imposed by VirtualRegisterGroup and any other registers
   161     // already grouped with reg0 or reg1.
   162     BacktrackingVirtualRegister *reg0 = &vregs[vreg0], *reg1 = &vregs[vreg1];
   164     if (reg0->isFloatReg() != reg1->isFloatReg())
   165         return true;
   167     VirtualRegisterGroup *group0 = reg0->group(), *group1 = reg1->group();
   169     if (!group0 && group1)
   170         return tryGroupRegisters(vreg1, vreg0);
   172     if (group0) {
   173         if (group1) {
   174             if (group0 == group1) {
   175                 // The registers are already grouped together.
   176                 return true;
   177             }
   178             // Try to unify the two distinct groups.
   179             for (size_t i = 0; i < group1->registers.length(); i++) {
   180                 if (!canAddToGroup(group0, &vregs[group1->registers[i]]))
   181                     return true;
   182             }
   183             for (size_t i = 0; i < group1->registers.length(); i++) {
   184                 uint32_t vreg = group1->registers[i];
   185                 if (!group0->registers.append(vreg))
   186                     return false;
   187                 vregs[vreg].setGroup(group0);
   188             }
   189             return true;
   190         }
   191         if (!canAddToGroup(group0, reg1))
   192             return true;
   193         if (!group0->registers.append(vreg1))
   194             return false;
   195         reg1->setGroup(group0);
   196         return true;
   197     }
   199     if (LifetimesOverlap(reg0, reg1))
   200         return true;
   202     VirtualRegisterGroup *group = new(alloc()) VirtualRegisterGroup(alloc());
   203     if (!group->registers.append(vreg0) || !group->registers.append(vreg1))
   204         return false;
   206     reg0->setGroup(group);
   207     reg1->setGroup(group);
   208     return true;
   209 }
   211 bool
   212 BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use)
   213 {
   214     BacktrackingVirtualRegister &reg = vregs[def], &usedReg = vregs[use];
   216     // reg is a vreg which reuses its input usedReg for its output physical
   217     // register. Try to group reg with usedReg if at all possible, as avoiding
   218     // copies before reg's instruction is crucial for the quality of the
   219     // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions
   220     // on x86/x64).
   222     if (reg.intervalFor(inputOf(reg.ins()))) {
   223         JS_ASSERT(reg.isTemp());
   224         reg.setMustCopyInput();
   225         return true;
   226     }
   228     if (!usedReg.intervalFor(outputOf(reg.ins()))) {
   229         // The input is not live after the instruction, either in a safepoint
   230         // for the instruction or in subsequent code. The input and output
   231         // can thus be in the same group.
   232         return tryGroupRegisters(use, def);
   233     }
   235     // The input is live afterwards, either in future instructions or in a
   236     // safepoint for the reusing instruction. This is impossible to satisfy
   237     // without copying the input.
   238     //
   239     // It may or may not be better to split the interval at the point of the
   240     // definition, which may permit grouping. One case where it is definitely
   241     // better to split is if the input never has any register uses after the
   242     // instruction. Handle this splitting eagerly.
   244     if (usedReg.numIntervals() != 1 ||
   245         (usedReg.def()->isPreset() && !usedReg.def()->output()->isRegister())) {
   246         reg.setMustCopyInput();
   247         return true;
   248     }
   249     LiveInterval *interval = usedReg.getInterval(0);
   250     LBlock *block = insData[reg.ins()].block();
   252     // The input's lifetime must end within the same block as the definition,
   253     // otherwise it could live on in phis elsewhere.
   254     if (interval->end() > outputOf(block->lastId())) {
   255         reg.setMustCopyInput();
   256         return true;
   257     }
   259     for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
   260         if (iter->pos <= inputOf(reg.ins()))
   261             continue;
   263         LUse *use = iter->use;
   264         if (FindReusingDefinition(insData[iter->pos].ins(), use)) {
   265             reg.setMustCopyInput();
   266             return true;
   267         }
   268         if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) {
   269             reg.setMustCopyInput();
   270             return true;
   271         }
   272     }
   274     LiveInterval *preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
   275     for (size_t i = 0; i < interval->numRanges(); i++) {
   276         const LiveInterval::Range *range = interval->getRange(i);
   277         JS_ASSERT(range->from <= inputOf(reg.ins()));
   279         CodePosition to = (range->to <= outputOf(reg.ins())) ? range->to : outputOf(reg.ins());
   280         if (!preInterval->addRange(range->from, to))
   281             return false;
   282     }
   284     LiveInterval *postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
   285     if (!postInterval->addRange(inputOf(reg.ins()), interval->end()))
   286         return false;
   288     LiveIntervalVector newIntervals;
   289     if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
   290         return false;
   292     distributeUses(interval, newIntervals);
   294     if (!split(interval, newIntervals))
   295         return false;
   297     JS_ASSERT(usedReg.numIntervals() == 2);
   299     usedReg.setCanonicalSpillExclude(inputOf(reg.ins()));
   301     return tryGroupRegisters(use, def);
   302 }
   304 bool
   305 BacktrackingAllocator::groupAndQueueRegisters()
   306 {
   307     // Try to group registers with their reused inputs.
   308     // Virtual register number 0 is unused.
   309     JS_ASSERT(vregs[0u].numIntervals() == 0);
   310     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
   311         BacktrackingVirtualRegister &reg = vregs[i];
   312         if (!reg.numIntervals())
   313             continue;
   315         if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
   316             LUse *use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
   317             if (!tryGroupReusedRegister(i, use->virtualRegister()))
   318                 return false;
   319         }
   320     }
   322     // Try to group phis with their inputs.
   323     for (size_t i = 0; i < graph.numBlocks(); i++) {
   324         LBlock *block = graph.getBlock(i);
   325         for (size_t j = 0; j < block->numPhis(); j++) {
   326             LPhi *phi = block->getPhi(j);
   327             uint32_t output = phi->getDef(0)->virtualRegister();
   328             for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
   329                 uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
   330                 if (!tryGroupRegisters(input, output))
   331                     return false;
   332             }
   333         }
   334     }
   336     // Virtual register number 0 is unused.
   337     JS_ASSERT(vregs[0u].numIntervals() == 0);
   338     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
   339         if (mir->shouldCancel("Backtracking Enqueue Registers"))
   340             return false;
   342         BacktrackingVirtualRegister &reg = vregs[i];
   343         JS_ASSERT(reg.numIntervals() <= 2);
   344         JS_ASSERT(!reg.canonicalSpill());
   346         if (!reg.numIntervals())
   347             continue;
   349         // Disable this for now; see bugs 906858, 931487, and 932465.
   350 #if 0
   351         // Eagerly set the canonical spill slot for registers which are preset
   352         // for that slot, and reuse it for other registers in the group.
   353         LDefinition *def = reg.def();
   354         if (def->policy() == LDefinition::PRESET && !def->output()->isRegister()) {
   355             reg.setCanonicalSpill(*def->output());
   356             if (reg.group() && reg.group()->spill.isUse())
   357                 reg.group()->spill = *def->output();
   358         }
   359 #endif
   361         // Place all intervals for this register on the allocation queue.
   362         // During initial queueing use single queue items for groups of
   363         // registers, so that they will be allocated together and reduce the
   364         // risk of unnecessary conflicts. This is in keeping with the idea that
   365         // register groups are effectively single registers whose value changes
   366         // during execution. If any intervals in the group are evicted later
   367         // then they will be reallocated individually.
   368         size_t start = 0;
   369         if (VirtualRegisterGroup *group = reg.group()) {
   370             if (i == group->canonicalReg()) {
   371                 size_t priority = computePriority(group);
   372                 if (!allocationQueue.insert(QueueItem(group, priority)))
   373                     return false;
   374             }
   375             start++;
   376         }
   377         for (; start < reg.numIntervals(); start++) {
   378             LiveInterval *interval = reg.getInterval(start);
   379             if (interval->numRanges() > 0) {
   380                 size_t priority = computePriority(interval);
   381                 if (!allocationQueue.insert(QueueItem(interval, priority)))
   382                     return false;
   383             }
   384         }
   385     }
   387     return true;
   388 }
   390 static const size_t MAX_ATTEMPTS = 2;
   392 bool
   393 BacktrackingAllocator::tryAllocateFixed(LiveInterval *interval, bool *success,
   394                                         bool *pfixed, LiveInterval **pconflicting)
   395 {
   396     // Spill intervals which are required to be in a certain stack slot.
   397     if (!interval->requirement()->allocation().isRegister()) {
   398         IonSpew(IonSpew_RegAlloc, "stack allocation requirement");
   399         interval->setAllocation(interval->requirement()->allocation());
   400         *success = true;
   401         return true;
   402     }
   404     AnyRegister reg = interval->requirement()->allocation().toRegister();
   405     return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting);
   406 }
   408 bool
   409 BacktrackingAllocator::tryAllocateNonFixed(LiveInterval *interval, bool *success,
   410                                            bool *pfixed, LiveInterval **pconflicting)
   411 {
   412     // If we want, but do not require an interval to be in a specific
   413     // register, only look at that register for allocating and evict
   414     // or spill if it is not available. Picking a separate register may
   415     // be even worse than spilling, as it will still necessitate moves
   416     // and will tie up more registers than if we spilled.
   417     if (interval->hint()->kind() == Requirement::FIXED) {
   418         AnyRegister reg = interval->hint()->allocation().toRegister();
   419         if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting))
   420             return false;
   421         if (*success)
   422             return true;
   423     }
   425     // Spill intervals which have no hint or register requirement.
   426     if (interval->requirement()->kind() == Requirement::NONE) {
   427         spill(interval);
   428         *success = true;
   429         return true;
   430     }
   432     if (!*pconflicting || minimalInterval(interval)) {
   433         // Search for any available register which the interval can be
   434         // allocated to.
   435         for (size_t i = 0; i < AnyRegister::Total; i++) {
   436             if (!tryAllocateRegister(registers[i], interval, success, pfixed, pconflicting))
   437                 return false;
   438             if (*success)
   439                 return true;
   440         }
   441     }
   443     // We failed to allocate this interval.
   444     JS_ASSERT(!*success);
   445     return true;
   446 }
   448 bool
   449 BacktrackingAllocator::processInterval(LiveInterval *interval)
   450 {
   451     if (IonSpewEnabled(IonSpew_RegAlloc)) {
   452         IonSpew(IonSpew_RegAlloc, "Allocating v%u [priority %lu] [weight %lu]: %s",
   453                 interval->vreg(), computePriority(interval), computeSpillWeight(interval),
   454                 interval->rangesToString());
   455     }
   457     // An interval can be processed by doing any of the following:
   458     //
   459     // - Assigning the interval a register. The interval cannot overlap any
   460     //   other interval allocated for that physical register.
   461     //
   462     // - Spilling the interval, provided it has no register uses.
   463     //
   464     // - Splitting the interval into two or more intervals which cover the
   465     //   original one. The new intervals are placed back onto the priority
   466     //   queue for later processing.
   467     //
   468     // - Evicting one or more existing allocated intervals, and then doing one
   469     //   of the above operations. Evicted intervals are placed back on the
   470     //   priority queue. Any evicted intervals must have a lower spill weight
   471     //   than the interval being processed.
   472     //
   473     // As long as this structure is followed, termination is guaranteed.
   474     // In general, we want to minimize the amount of interval splitting
   475     // (which generally necessitates spills), so allocate longer lived, lower
   476     // weight intervals first and evict and split them later if they prevent
   477     // allocation for higher weight intervals.
   479     bool canAllocate = setIntervalRequirement(interval);
   481     bool fixed;
   482     LiveInterval *conflict = nullptr;
   483     for (size_t attempt = 0;; attempt++) {
   484         if (canAllocate) {
   485             bool success = false;
   486             fixed = false;
   487             conflict = nullptr;
   489             // Ok, let's try allocating for this interval.
   490             if (interval->requirement()->kind() == Requirement::FIXED) {
   491                 if (!tryAllocateFixed(interval, &success, &fixed, &conflict))
   492                     return false;
   493             } else {
   494                 if (!tryAllocateNonFixed(interval, &success, &fixed, &conflict))
   495                     return false;
   496             }
   498             // If that worked, we're done!
   499             if (success)
   500                 return true;
   502             // If that didn't work, but we have a non-fixed LiveInterval known
   503             // to be conflicting, maybe we can evict it and try again.
   504             if (attempt < MAX_ATTEMPTS &&
   505                 !fixed &&
   506                 conflict &&
   507                 computeSpillWeight(conflict) < computeSpillWeight(interval))
   508             {
   509                 if (!evictInterval(conflict))
   510                     return false;
   511                 continue;
   512             }
   513         }
   515         // A minimal interval cannot be split any further. If we try to split
   516         // it at this point we will just end up with the same interval and will
   517         // enter an infinite loop. Weights and the initial live intervals must
   518         // be constructed so that any minimal interval is allocatable.
   519         JS_ASSERT(!minimalInterval(interval));
   521         if (canAllocate && fixed)
   522             return splitAcrossCalls(interval);
   523         return chooseIntervalSplit(interval, conflict);
   524     }
   525 }
   527 bool
   528 BacktrackingAllocator::processGroup(VirtualRegisterGroup *group)
   529 {
   530     if (IonSpewEnabled(IonSpew_RegAlloc)) {
   531         IonSpew(IonSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]",
   532                 group->registers[0], computePriority(group), computeSpillWeight(group));
   533     }
   535     bool fixed;
   536     LiveInterval *conflict;
   537     for (size_t attempt = 0;; attempt++) {
   538         // Search for any available register which the group can be allocated to.
   539         fixed = false;
   540         conflict = nullptr;
   541         for (size_t i = 0; i < AnyRegister::Total; i++) {
   542             bool success;
   543             if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict))
   544                 return false;
   545             if (success) {
   546                 conflict = nullptr;
   547                 break;
   548             }
   549         }
   551         if (attempt < MAX_ATTEMPTS &&
   552             !fixed &&
   553             conflict &&
   554             conflict->hasVreg() &&
   555             computeSpillWeight(conflict) < computeSpillWeight(group))
   556         {
   557             if (!evictInterval(conflict))
   558                 return false;
   559             continue;
   560         }
   562         for (size_t i = 0; i < group->registers.length(); i++) {
   563             VirtualRegister &reg = vregs[group->registers[i]];
   564             JS_ASSERT(reg.numIntervals() <= 2);
   565             if (!processInterval(reg.getInterval(0)))
   566                 return false;
   567         }
   569         return true;
   570     }
   571 }
   573 bool
   574 BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval)
   575 {
   576     // Set any requirement or hint on interval according to its definition and
   577     // uses. Return false if there are conflicting requirements which will
   578     // require the interval to be split.
   579     interval->setHint(Requirement());
   580     interval->setRequirement(Requirement());
   582     BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
   584     // Set a hint if another interval in the same group is in a register.
   585     if (VirtualRegisterGroup *group = reg->group()) {
   586         if (group->allocation.isRegister()) {
   587             if (IonSpewEnabled(IonSpew_RegAlloc)) {
   588                 IonSpew(IonSpew_RegAlloc, "Hint %s, used by group allocation",
   589                         group->allocation.toString());
   590             }
   591             interval->setHint(Requirement(group->allocation));
   592         }
   593     }
   595     if (interval->index() == 0) {
   596         // The first interval is the definition, so deal with any definition
   597         // constraints/hints.
   599         LDefinition::Policy policy = reg->def()->policy();
   600         if (policy == LDefinition::PRESET) {
   601             // Preset policies get a FIXED requirement.
   602             if (IonSpewEnabled(IonSpew_RegAlloc)) {
   603                 IonSpew(IonSpew_RegAlloc, "Requirement %s, preset by definition",
   604                         reg->def()->output()->toString());
   605             }
   606             interval->setRequirement(Requirement(*reg->def()->output()));
   607         } else if (reg->ins()->isPhi()) {
   608             // Phis don't have any requirements, but they should prefer their
   609             // input allocations. This is captured by the group hints above.
   610         } else {
   611             // Non-phis get a REGISTER requirement.
   612             interval->setRequirement(Requirement(Requirement::REGISTER));
   613         }
   614     }
   616     // Search uses for requirements.
   617     for (UsePositionIterator iter = interval->usesBegin();
   618          iter != interval->usesEnd();
   619          iter++)
   620     {
   621         LUse::Policy policy = iter->use->policy();
   622         if (policy == LUse::FIXED) {
   623             AnyRegister required = GetFixedRegister(reg->def(), iter->use);
   625             if (IonSpewEnabled(IonSpew_RegAlloc)) {
   626                 IonSpew(IonSpew_RegAlloc, "Requirement %s, due to use at %u",
   627                         required.name(), iter->pos.pos());
   628             }
   630             // If there are multiple fixed registers which the interval is
   631             // required to use, fail. The interval will need to be split before
   632             // it can be allocated.
   633             if (!interval->addRequirement(Requirement(LAllocation(required))))
   634                 return false;
   635         } else if (policy == LUse::REGISTER) {
   636             if (!interval->addRequirement(Requirement(Requirement::REGISTER)))
   637                 return false;
   638         }
   639     }
   641     return true;
   642 }
   644 bool
   645 BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
   646                                                 bool *psuccess, bool *pfixed, LiveInterval **pconflicting)
   647 {
   648     *psuccess = false;
   650     if (!r.allocatable)
   651         return true;
   653     if (r.reg.isFloat() != vregs[group->registers[0]].isFloatReg())
   654         return true;
   656     bool allocatable = true;
   657     LiveInterval *conflicting = nullptr;
   659     for (size_t i = 0; i < group->registers.length(); i++) {
   660         VirtualRegister &reg = vregs[group->registers[i]];
   661         JS_ASSERT(reg.numIntervals() <= 2);
   662         LiveInterval *interval = reg.getInterval(0);
   664         for (size_t j = 0; j < interval->numRanges(); j++) {
   665             AllocatedRange range(interval, interval->getRange(j)), existing;
   666             if (r.allocations.contains(range, &existing)) {
   667                 if (conflicting) {
   668                     if (conflicting != existing.interval)
   669                         return true;
   670                 } else {
   671                     conflicting = existing.interval;
   672                 }
   673                 allocatable = false;
   674             }
   675         }
   676     }
   678     if (!allocatable) {
   679         JS_ASSERT(conflicting);
   680         if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting))
   681             *pconflicting = conflicting;
   682         if (!conflicting->hasVreg())
   683             *pfixed = true;
   684         return true;
   685     }
   687     *psuccess = true;
   689     group->allocation = LAllocation(r.reg);
   690     return true;
   691 }
   693 bool
   694 BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
   695                                            bool *success, bool *pfixed, LiveInterval **pconflicting)
   696 {
   697     *success = false;
   699     if (!r.allocatable)
   700         return true;
   702     BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
   703     if (reg->isFloatReg() != r.reg.isFloat())
   704         return true;
   706     JS_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED,
   707                  interval->requirement()->allocation() == LAllocation(r.reg));
   709     for (size_t i = 0; i < interval->numRanges(); i++) {
   710         AllocatedRange range(interval, interval->getRange(i)), existing;
   711         if (r.allocations.contains(range, &existing)) {
   712             if (existing.interval->hasVreg()) {
   713                 if (IonSpewEnabled(IonSpew_RegAlloc)) {
   714                     IonSpew(IonSpew_RegAlloc, "%s collides with v%u [%u,%u> [weight %lu]",
   715                             r.reg.name(), existing.interval->vreg(),
   716                             existing.range->from.pos(), existing.range->to.pos(),
   717                             computeSpillWeight(existing.interval));
   718                 }
   719                 if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting))
   720                     *pconflicting = existing.interval;
   721             } else {
   722                 if (IonSpewEnabled(IonSpew_RegAlloc)) {
   723                     IonSpew(IonSpew_RegAlloc, "%s collides with fixed use [%u,%u>",
   724                             r.reg.name(), existing.range->from.pos(), existing.range->to.pos());
   725                 }
   726                 *pfixed = true;
   727             }
   728             return true;
   729         }
   730     }
   732     IonSpew(IonSpew_RegAlloc, "allocated to %s", r.reg.name());
   734     for (size_t i = 0; i < interval->numRanges(); i++) {
   735         AllocatedRange range(interval, interval->getRange(i));
   736         if (!r.allocations.insert(range))
   737             return false;
   738     }
   740     // Set any register hint for allocating other intervals in the same group.
   741     if (VirtualRegisterGroup *group = reg->group()) {
   742         if (!group->allocation.isRegister())
   743             group->allocation = LAllocation(r.reg);
   744     }
   746     interval->setAllocation(LAllocation(r.reg));
   747     *success = true;
   748     return true;
   749 }
   751 bool
   752 BacktrackingAllocator::evictInterval(LiveInterval *interval)
   753 {
   754     if (IonSpewEnabled(IonSpew_RegAlloc)) {
   755         IonSpew(IonSpew_RegAlloc, "Evicting interval v%u: %s",
   756                 interval->vreg(), interval->rangesToString());
   757     }
   759     JS_ASSERT(interval->getAllocation()->isRegister());
   761     AnyRegister reg(interval->getAllocation()->toRegister());
   762     PhysicalRegister &physical = registers[reg.code()];
   763     JS_ASSERT(physical.reg == reg && physical.allocatable);
   765     for (size_t i = 0; i < interval->numRanges(); i++) {
   766         AllocatedRange range(interval, interval->getRange(i));
   767         physical.allocations.remove(range);
   768     }
   770     interval->setAllocation(LAllocation());
   772     size_t priority = computePriority(interval);
   773     return allocationQueue.insert(QueueItem(interval, priority));
   774 }
   776 void
   777 BacktrackingAllocator::distributeUses(LiveInterval *interval,
   778                                       const LiveIntervalVector &newIntervals)
   779 {
   780     JS_ASSERT(newIntervals.length() >= 2);
   782     // Simple redistribution of uses from an old interval to a set of new
   783     // intervals. Intervals are permitted to overlap, in which case this will
   784     // assign uses in the overlapping section to the interval with the latest
   785     // start position.
   786     for (UsePositionIterator iter(interval->usesBegin());
   787          iter != interval->usesEnd();
   788          iter++)
   789     {
   790         CodePosition pos = iter->pos;
   791         LiveInterval *addInterval = nullptr;
   792         for (size_t i = 0; i < newIntervals.length(); i++) {
   793             LiveInterval *newInterval = newIntervals[i];
   794             if (newInterval->covers(pos)) {
   795                 if (!addInterval || newInterval->start() < addInterval->start())
   796                     addInterval = newInterval;
   797             }
   798         }
   799         addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
   800     }
   801 }
   803 bool
   804 BacktrackingAllocator::split(LiveInterval *interval,
   805                              const LiveIntervalVector &newIntervals)
   806 {
   807     if (IonSpewEnabled(IonSpew_RegAlloc)) {
   808         IonSpew(IonSpew_RegAlloc, "splitting interval v%u %s into:",
   809                 interval->vreg(), interval->rangesToString());
   810         for (size_t i = 0; i < newIntervals.length(); i++)
   811             IonSpew(IonSpew_RegAlloc, "    %s", newIntervals[i]->rangesToString());
   812     }
   814     JS_ASSERT(newIntervals.length() >= 2);
   816     // Find the earliest interval in the new list.
   817     LiveInterval *first = newIntervals[0];
   818     for (size_t i = 1; i < newIntervals.length(); i++) {
   819         if (newIntervals[i]->start() < first->start())
   820             first = newIntervals[i];
   821     }
   823     // Replace the old interval in the virtual register's state with the new intervals.
   824     VirtualRegister *reg = &vregs[interval->vreg()];
   825     reg->replaceInterval(interval, first);
   826     for (size_t i = 0; i < newIntervals.length(); i++) {
   827         if (newIntervals[i] != first && !reg->addInterval(newIntervals[i]))
   828             return false;
   829     }
   831     return true;
   832 }
   834 bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector &newIntervals)
   835 {
   836     // Queue the new intervals for register assignment.
   837     for (size_t i = 0; i < newIntervals.length(); i++) {
   838         LiveInterval *newInterval = newIntervals[i];
   839         size_t priority = computePriority(newInterval);
   840         if (!allocationQueue.insert(QueueItem(newInterval, priority)))
   841             return false;
   842     }
   843     return true;
   844 }
   846 void
   847 BacktrackingAllocator::spill(LiveInterval *interval)
   848 {
   849     IonSpew(IonSpew_RegAlloc, "Spilling interval");
   851     JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
   852     JS_ASSERT(!interval->getAllocation()->isStackSlot());
   854     // We can't spill bogus intervals.
   855     JS_ASSERT(interval->hasVreg());
   857     BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
   859     bool useCanonical = !reg->hasCanonicalSpillExclude()
   860         || interval->start() < reg->canonicalSpillExclude();
   862     if (useCanonical) {
   863         if (reg->canonicalSpill()) {
   864             IonSpew(IonSpew_RegAlloc, "  Picked canonical spill location %s",
   865                     reg->canonicalSpill()->toString());
   866             interval->setAllocation(*reg->canonicalSpill());
   867             return;
   868         }
   870         if (reg->group() && !reg->group()->spill.isUse()) {
   871             IonSpew(IonSpew_RegAlloc, "  Reusing group spill location %s",
   872                     reg->group()->spill.toString());
   873             interval->setAllocation(reg->group()->spill);
   874             reg->setCanonicalSpill(reg->group()->spill);
   875             return;
   876         }
   877     }
   879     uint32_t stackSlot = stackSlotAllocator.allocateSlot(reg->type());
   880     JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
   882     LStackSlot alloc(stackSlot);
   883     interval->setAllocation(alloc);
   885     IonSpew(IonSpew_RegAlloc, "  Allocating spill location %s", alloc.toString());
   887     if (useCanonical) {
   888         reg->setCanonicalSpill(alloc);
   889         if (reg->group())
   890             reg->group()->spill = alloc;
   891     }
   892 }
   894 // Add moves to resolve conflicting assignments between a block and its
   895 // predecessors. XXX try to common this with LinearScanAllocator.
   896 bool
   897 BacktrackingAllocator::resolveControlFlow()
   898 {
   899     // Virtual register number 0 is unused.
   900     JS_ASSERT(vregs[0u].numIntervals() == 0);
   901     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
   902         BacktrackingVirtualRegister *reg = &vregs[i];
   904         if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)"))
   905             return false;
   907         for (size_t j = 1; j < reg->numIntervals(); j++) {
   908             LiveInterval *interval = reg->getInterval(j);
   909             JS_ASSERT(interval->index() == j);
   911             bool skip = false;
   912             for (int k = j - 1; k >= 0; k--) {
   913                 LiveInterval *prevInterval = reg->getInterval(k);
   914                 if (prevInterval->start() != interval->start())
   915                     break;
   916                 if (*prevInterval->getAllocation() == *interval->getAllocation()) {
   917                     skip = true;
   918                     break;
   919                 }
   920             }
   921             if (skip)
   922                 continue;
   924             CodePosition start = interval->start();
   925             InstructionData *data = &insData[start];
   926             if (interval->start() > inputOf(data->block()->firstId())) {
   927                 JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins()));
   929                 LiveInterval *prevInterval = reg->intervalFor(start.previous());
   930                 if (start.subpos() == CodePosition::INPUT) {
   931                     if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type()))
   932                         return false;
   933                 } else {
   934                     if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type()))
   935                         return false;
   936                 }
   937             }
   938         }
   939     }
   941     for (size_t i = 0; i < graph.numBlocks(); i++) {
   942         if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
   943             return false;
   945         LBlock *successor = graph.getBlock(i);
   946         MBasicBlock *mSuccessor = successor->mir();
   947         if (mSuccessor->numPredecessors() < 1)
   948             continue;
   950         // Resolve phis to moves
   951         for (size_t j = 0; j < successor->numPhis(); j++) {
   952             LPhi *phi = successor->getPhi(j);
   953             JS_ASSERT(phi->numDefs() == 1);
   954             LDefinition *def = phi->getDef(0);
   955             VirtualRegister *vreg = &vregs[def];
   956             LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
   957             JS_ASSERT(to);
   959             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
   960                 LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
   961                 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
   963                 LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
   964                 LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
   965                 JS_ASSERT(from);
   967                 if (!moveAtExit(predecessor, from, to, def->type()))
   968                     return false;
   969             }
   970         }
   972         // Resolve split intervals with moves
   973         BitSet *live = liveIn[mSuccessor->id()];
   975         for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
   976             VirtualRegister &reg = vregs[*liveRegId];
   978             for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
   979                 LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
   981                 for (size_t k = 0; k < reg.numIntervals(); k++) {
   982                     LiveInterval *to = reg.getInterval(k);
   983                     if (!to->covers(inputOf(successor->firstId())))
   984                         continue;
   985                     if (to->covers(outputOf(predecessor->lastId())))
   986                         continue;
   988                     LiveInterval *from = reg.intervalFor(outputOf(predecessor->lastId()));
   990                     if (mSuccessor->numPredecessors() > 1) {
   991                         JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
   992                         if (!moveAtExit(predecessor, from, to, reg.type()))
   993                             return false;
   994                     } else {
   995                         if (!moveAtEntry(successor, from, to, reg.type()))
   996                             return false;
   997                     }
   998                 }
   999             }
  1003     return true;
  1006 bool
  1007 BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy)
  1009     if (LDefinition *def = FindReusingDefinition(ins, use))
  1010         return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
  1011     return false;
  1014 bool
  1015 BacktrackingAllocator::isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy)
  1017     switch (use->policy()) {
  1018       case LUse::ANY:
  1019         return isReusedInput(use, ins, considerCopy);
  1021       case LUse::REGISTER:
  1022       case LUse::FIXED:
  1023         return true;
  1025       default:
  1026         return false;
  1030 bool
  1031 BacktrackingAllocator::isRegisterDefinition(LiveInterval *interval)
  1033     if (interval->index() != 0)
  1034         return false;
  1036     VirtualRegister &reg = vregs[interval->vreg()];
  1037     if (reg.ins()->isPhi())
  1038         return false;
  1040     if (reg.def()->policy() == LDefinition::PRESET && !reg.def()->output()->isRegister())
  1041         return false;
  1043     return true;
  1046 bool
  1047 BacktrackingAllocator::reifyAllocations()
  1049     // Virtual register number 0 is unused.
  1050     JS_ASSERT(vregs[0u].numIntervals() == 0);
  1051     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
  1052         VirtualRegister *reg = &vregs[i];
  1054         if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
  1055             return false;
  1057         for (size_t j = 0; j < reg->numIntervals(); j++) {
  1058             LiveInterval *interval = reg->getInterval(j);
  1059             JS_ASSERT(interval->index() == j);
  1061             if (interval->index() == 0) {
  1062                 reg->def()->setOutput(*interval->getAllocation());
  1063                 if (reg->ins()->recoversInput()) {
  1064                     LSnapshot *snapshot = reg->ins()->snapshot();
  1065                     for (size_t i = 0; i < snapshot->numEntries(); i++) {
  1066                         LAllocation *entry = snapshot->getEntry(i);
  1067                         if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
  1068                             *entry = *reg->def()->output();
  1073             for (UsePositionIterator iter(interval->usesBegin());
  1074                  iter != interval->usesEnd();
  1075                  iter++)
  1077                 LAllocation *alloc = iter->use;
  1078                 *alloc = *interval->getAllocation();
  1080                 // For any uses which feed into MUST_REUSE_INPUT definitions,
  1081                 // add copies if the use and def have different allocations.
  1082                 LInstruction *ins = insData[iter->pos].ins();
  1083                 if (LDefinition *def = FindReusingDefinition(ins, alloc)) {
  1084                     LiveInterval *outputInterval =
  1085                         vregs[def->virtualRegister()].intervalFor(outputOf(ins));
  1086                     LAllocation *res = outputInterval->getAllocation();
  1087                     LAllocation *sourceAlloc = interval->getAllocation();
  1089                     if (*res != *alloc) {
  1090                         LMoveGroup *group = getInputMoveGroup(inputOf(ins));
  1091                         if (!group->addAfter(sourceAlloc, res, def->type()))
  1092                             return false;
  1093                         *alloc = *res;
  1098             addLiveRegistersForInterval(reg, interval);
  1102     graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
  1103     return true;
  1106 bool
  1107 BacktrackingAllocator::populateSafepoints()
  1109     size_t firstSafepoint = 0;
  1111     // Virtual register number 0 is unused.
  1112     JS_ASSERT(!vregs[0u].def());
  1113     for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) {
  1114         BacktrackingVirtualRegister *reg = &vregs[i];
  1116         if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
  1117             continue;
  1119         firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
  1120         if (firstSafepoint >= graph.numSafepoints())
  1121             break;
  1123         // Find the furthest endpoint.
  1124         CodePosition end = reg->getInterval(0)->end();
  1125         for (size_t j = 1; j < reg->numIntervals(); j++)
  1126             end = Max(end, reg->getInterval(j)->end());
  1128         for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
  1129             LInstruction *ins = graph.getSafepoint(j);
  1131             // Stop processing safepoints if we know we're out of this virtual
  1132             // register's range.
  1133             if (end < outputOf(ins))
  1134                 break;
  1136             // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
  1137             // is not used with gcthings or nunboxes, or we would have to add the input reg
  1138             // to this safepoint.
  1139             if (ins == reg->ins() && !reg->isTemp()) {
  1140                 DebugOnly<LDefinition*> def = reg->def();
  1141                 JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
  1142                              def->type() == LDefinition::GENERAL ||
  1143                              def->type() == LDefinition::INT32 ||
  1144                              def->type() == LDefinition::FLOAT32 ||
  1145                              def->type() == LDefinition::DOUBLE);
  1146                 continue;
  1149             LSafepoint *safepoint = ins->safepoint();
  1151             for (size_t k = 0; k < reg->numIntervals(); k++) {
  1152                 LiveInterval *interval = reg->getInterval(k);
  1153                 if (!interval->covers(inputOf(ins)))
  1154                     continue;
  1156                 LAllocation *a = interval->getAllocation();
  1157                 if (a->isGeneralReg() && ins->isCall())
  1158                     continue;
  1160                 switch (reg->type()) {
  1161                   case LDefinition::OBJECT:
  1162                     safepoint->addGcPointer(*a);
  1163                     break;
  1164                   case LDefinition::SLOTS:
  1165                     safepoint->addSlotsOrElementsPointer(*a);
  1166                     break;
  1167 #ifdef JS_NUNBOX32
  1168                   case LDefinition::TYPE:
  1169                     safepoint->addNunboxType(i, *a);
  1170                     break;
  1171                   case LDefinition::PAYLOAD:
  1172                     safepoint->addNunboxPayload(i, *a);
  1173                     break;
  1174 #else
  1175                   case LDefinition::BOX:
  1176                     safepoint->addBoxedValue(*a);
  1177                     break;
  1178 #endif
  1179                   default:
  1180                     MOZ_ASSUME_UNREACHABLE("Bad register type");
  1186     return true;
  1189 void
  1190 BacktrackingAllocator::dumpRegisterGroups()
  1192     fprintf(stderr, "Register groups:\n");
  1194     // Virtual register number 0 is unused.
  1195     JS_ASSERT(!vregs[0u].group());
  1196     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
  1197         VirtualRegisterGroup *group = vregs[i].group();
  1198         if (group && i == group->canonicalReg()) {
  1199             for (size_t j = 0; j < group->registers.length(); j++)
  1200                 fprintf(stderr, " v%u", group->registers[j]);
  1201             fprintf(stderr, "\n");
  1206 void
  1207 BacktrackingAllocator::dumpLiveness()
  1209 #ifdef DEBUG
  1210     fprintf(stderr, "Virtual Registers:\n");
  1212     for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
  1213         LBlock *block = graph.getBlock(blockIndex);
  1214         MBasicBlock *mir = block->mir();
  1216         fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
  1217         for (size_t i = 0; i < mir->numSuccessors(); i++)
  1218             fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
  1219         fprintf(stderr, "\n");
  1221         for (size_t i = 0; i < block->numPhis(); i++) {
  1222             LPhi *phi = block->getPhi(i);
  1224             // Don't print the inputOf for phi nodes, since it's never used.
  1225             fprintf(stderr, "[,%u Phi [def v%u] <-",
  1226                     outputOf(phi).pos(),
  1227                     phi->getDef(0)->virtualRegister());
  1228             for (size_t j = 0; j < phi->numOperands(); j++)
  1229                 fprintf(stderr, " %s", phi->getOperand(j)->toString());
  1230             fprintf(stderr, "]\n");
  1233         for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
  1234             LInstruction *ins = *iter;
  1236             fprintf(stderr, "[%u,%u %s]", inputOf(ins).pos(), outputOf(ins).pos(), ins->opName());
  1238             for (size_t i = 0; i < ins->numTemps(); i++) {
  1239                 LDefinition *temp = ins->getTemp(i);
  1240                 if (!temp->isBogusTemp())
  1241                     fprintf(stderr, " [temp v%u]", temp->virtualRegister());
  1244             for (size_t i = 0; i < ins->numDefs(); i++) {
  1245                 LDefinition *def = ins->getDef(i);
  1246                 fprintf(stderr, " [def v%u]", def->virtualRegister());
  1249             for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
  1250                 fprintf(stderr, " [use %s]", alloc->toString());
  1252             fprintf(stderr, "\n");
  1256     fprintf(stderr, "\nLive Ranges:\n\n");
  1258     for (size_t i = 0; i < AnyRegister::Total; i++)
  1259         if (registers[i].allocatable)
  1260             fprintf(stderr, "reg %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->rangesToString());
  1262     // Virtual register number 0 is unused.
  1263     JS_ASSERT(vregs[0u].numIntervals() == 0);
  1264     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
  1265         fprintf(stderr, "v%lu:", static_cast<unsigned long>(i));
  1266         VirtualRegister &vreg = vregs[i];
  1267         for (size_t j = 0; j < vreg.numIntervals(); j++) {
  1268             if (j)
  1269                 fprintf(stderr, " *");
  1270             fprintf(stderr, "%s", vreg.getInterval(j)->rangesToString());
  1272         fprintf(stderr, "\n");
  1275     fprintf(stderr, "\n");
  1276 #endif // DEBUG
  1279 #ifdef DEBUG
  1280 struct BacktrackingAllocator::PrintLiveIntervalRange
  1282     void operator()(const AllocatedRange &item)
  1284         if (item.range == item.interval->getRange(0)) {
  1285             if (item.interval->hasVreg())
  1286                 fprintf(stderr, "  v%u: %s\n",
  1287                        item.interval->vreg(),
  1288                        item.interval->rangesToString());
  1289             else
  1290                 fprintf(stderr, "  fixed: %s\n",
  1291                        item.interval->rangesToString());
  1294 };
  1295 #endif
  1297 void
  1298 BacktrackingAllocator::dumpAllocations()
  1300 #ifdef DEBUG
  1301     fprintf(stderr, "Allocations:\n");
  1303     // Virtual register number 0 is unused.
  1304     JS_ASSERT(vregs[0u].numIntervals() == 0);
  1305     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
  1306         fprintf(stderr, "v%lu:", static_cast<unsigned long>(i));
  1307         VirtualRegister &vreg = vregs[i];
  1308         for (size_t j = 0; j < vreg.numIntervals(); j++) {
  1309             if (j)
  1310                 fprintf(stderr, " *");
  1311             LiveInterval *interval = vreg.getInterval(j);
  1312             fprintf(stderr, "%s :: %s", interval->rangesToString(), interval->getAllocation()->toString());
  1314         fprintf(stderr, "\n");
  1317     fprintf(stderr, "\n");
  1319     for (size_t i = 0; i < AnyRegister::Total; i++) {
  1320         if (registers[i].allocatable) {
  1321             fprintf(stderr, "reg %s:\n", AnyRegister::FromCode(i).name());
  1322             registers[i].allocations.forEach(PrintLiveIntervalRange());
  1326     fprintf(stderr, "\n");
  1327 #endif // DEBUG
  1330 bool
  1331 BacktrackingAllocator::addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
  1332                                        LiveInterval *spillInterval,
  1333                                        CodePosition from, CodePosition to)
  1335     LiveInterval *interval = LiveInterval::New(alloc(), vreg, 0);
  1336     interval->setSpillInterval(spillInterval);
  1337     return interval->addRange(from, to) && intervals.append(interval);
  1340 ///////////////////////////////////////////////////////////////////////////////
  1341 // Heuristic Methods
  1342 ///////////////////////////////////////////////////////////////////////////////
  1344 size_t
  1345 BacktrackingAllocator::computePriority(const LiveInterval *interval)
  1347     // The priority of an interval is its total length, so that longer lived
  1348     // intervals will be processed before shorter ones (even if the longer ones
  1349     // have a low spill weight). See processInterval().
  1350     size_t lifetimeTotal = 0;
  1352     for (size_t i = 0; i < interval->numRanges(); i++) {
  1353         const LiveInterval::Range *range = interval->getRange(i);
  1354         lifetimeTotal += range->to.pos() - range->from.pos();
  1357     return lifetimeTotal;
  1360 size_t
  1361 BacktrackingAllocator::computePriority(const VirtualRegisterGroup *group)
  1363     size_t priority = 0;
  1364     for (size_t j = 0; j < group->registers.length(); j++) {
  1365         uint32_t vreg = group->registers[j];
  1366         priority += computePriority(vregs[vreg].getInterval(0));
  1368     return priority;
  1371 bool
  1372 BacktrackingAllocator::minimalDef(const LiveInterval *interval, LInstruction *ins)
  1374     // Whether interval is a minimal interval capturing a definition at ins.
  1375     return (interval->end() <= minimalDefEnd(ins).next()) &&
  1376         ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins));
  1379 bool
  1380 BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *ins)
  1382     // Whether interval is a minimal interval capturing a use at ins.
  1383     return (interval->start() == inputOf(ins)) &&
  1384         (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next());
  1387 bool
  1388 BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed)
  1390     if (!interval->hasVreg()) {
  1391         *pfixed = true;
  1392         return true;
  1395     if (interval->index() == 0) {
  1396         VirtualRegister &reg = vregs[interval->vreg()];
  1397         if (pfixed)
  1398             *pfixed = reg.def()->policy() == LDefinition::PRESET && reg.def()->output()->isRegister();
  1399         return minimalDef(interval, reg.ins());
  1402     bool fixed = false, minimal = false;
  1404     for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
  1405         LUse *use = iter->use;
  1407         switch (use->policy()) {
  1408           case LUse::FIXED:
  1409             if (fixed)
  1410                 return false;
  1411             fixed = true;
  1412             if (minimalUse(interval, insData[iter->pos].ins()))
  1413                 minimal = true;
  1414             break;
  1416           case LUse::REGISTER:
  1417             if (minimalUse(interval, insData[iter->pos].ins()))
  1418                 minimal = true;
  1419             break;
  1421           default:
  1422             break;
  1426     if (pfixed)
  1427         *pfixed = fixed;
  1428     return minimal;
  1431 size_t
  1432 BacktrackingAllocator::computeSpillWeight(const LiveInterval *interval)
  1434     // Minimal intervals have an extremely high spill weight, to ensure they
  1435     // can evict any other intervals and be allocated to a register.
  1436     bool fixed;
  1437     if (minimalInterval(interval, &fixed))
  1438         return fixed ? 2000000 : 1000000;
  1440     size_t usesTotal = 0;
  1442     if (interval->index() == 0) {
  1443         VirtualRegister *reg = &vregs[interval->vreg()];
  1444         if (reg->def()->policy() == LDefinition::PRESET && reg->def()->output()->isRegister())
  1445             usesTotal += 2000;
  1446         else if (!reg->ins()->isPhi())
  1447             usesTotal += 2000;
  1450     for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
  1451         LUse *use = iter->use;
  1453         switch (use->policy()) {
  1454           case LUse::ANY:
  1455             usesTotal += 1000;
  1456             break;
  1458           case LUse::REGISTER:
  1459           case LUse::FIXED:
  1460             usesTotal += 2000;
  1461             break;
  1463           case LUse::KEEPALIVE:
  1464             break;
  1466           default:
  1467             // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
  1468             MOZ_ASSUME_UNREACHABLE("Bad use");
  1472     // Intervals for registers in groups get higher weights.
  1473     if (interval->hint()->kind() != Requirement::NONE)
  1474         usesTotal += 2000;
  1476     // Compute spill weight as a use density, lowering the weight for long
  1477     // lived intervals with relatively few uses.
  1478     size_t lifetimeTotal = computePriority(interval);
  1479     return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
  1482 size_t
  1483 BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup *group)
  1485     size_t maxWeight = 0;
  1486     for (size_t j = 0; j < group->registers.length(); j++) {
  1487         uint32_t vreg = group->registers[j];
  1488         maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0)));
  1490     return maxWeight;
  1493 bool
  1494 BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *success)
  1496     // If this interval has portions that are hot and portions that are cold,
  1497     // split it at the boundaries between hot and cold code.
  1499     const LiveInterval::Range *hotRange = nullptr;
  1501     for (size_t i = 0; i < interval->numRanges(); i++) {
  1502         AllocatedRange range(interval, interval->getRange(i)), existing;
  1503         if (hotcode.contains(range, &existing)) {
  1504             hotRange = existing.range;
  1505             break;
  1509     // Don't split if there is no hot code in the interval.
  1510     if (!hotRange)
  1511         return true;
  1513     // Don't split if there is no cold code in the interval.
  1514     bool coldCode = false;
  1515     for (size_t i = 0; i < interval->numRanges(); i++) {
  1516         if (!hotRange->contains(interval->getRange(i))) {
  1517             coldCode = true;
  1518             break;
  1521     if (!coldCode)
  1522         return true;
  1524     SplitPositionVector splitPositions;
  1525     if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to))
  1526         return false;
  1527     *success = true;
  1528     return splitAt(interval, splitPositions);
  1531 bool
  1532 BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success)
  1534     // If this interval's later uses do not require it to be in a register,
  1535     // split it after the last use which does require a register. If conflict
  1536     // is specified, only consider register uses before the conflict starts.
  1538     CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
  1540     for (UsePositionIterator iter(interval->usesBegin());
  1541          iter != interval->usesEnd();
  1542          iter++)
  1544         LUse *use = iter->use;
  1545         LInstruction *ins = insData[iter->pos].ins();
  1547         // Uses in the interval should be sorted.
  1548         JS_ASSERT(iter->pos >= lastUse);
  1549         lastUse = inputOf(ins);
  1551         if (!conflict || outputOf(ins) < conflict->start()) {
  1552             if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
  1553                 lastRegisterFrom = inputOf(ins);
  1554                 lastRegisterTo = iter->pos.next();
  1559     if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) {
  1560         // Can't trim non-register uses off the end by splitting.
  1561         return true;
  1564     SplitPositionVector splitPositions;
  1565     if (!splitPositions.append(lastRegisterTo))
  1566         return false;
  1567     *success = true;
  1568     return splitAt(interval, splitPositions);
  1571 bool
  1572 BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval)
  1574     // Split this interval so that all its register uses become minimal
  1575     // intervals and allow the vreg to be spilled throughout its range.
  1577     LiveIntervalVector newIntervals;
  1578     uint32_t vreg = interval->vreg();
  1580     // If this LiveInterval is the result of an earlier split which created a
  1581     // spill interval, that spill interval covers the whole range, so we don't
  1582     // need to create a new one.
  1583     bool spillIntervalIsNew = false;
  1584     LiveInterval *spillInterval = interval->spillInterval();
  1585     if (!spillInterval) {
  1586         spillInterval = LiveInterval::New(alloc(), vreg, 0);
  1587         spillIntervalIsNew = true;
  1590     CodePosition spillStart = interval->start();
  1591     if (isRegisterDefinition(interval)) {
  1592         // Treat the definition of the interval as a register use so that it
  1593         // can be split and spilled ASAP.
  1594         CodePosition from = interval->start();
  1595         CodePosition to = minimalDefEnd(insData[from].ins()).next();
  1596         if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
  1597             return false;
  1598         spillStart = to;
  1601     if (spillIntervalIsNew) {
  1602         for (size_t i = 0; i < interval->numRanges(); i++) {
  1603             const LiveInterval::Range *range = interval->getRange(i);
  1604             CodePosition from = range->from < spillStart ? spillStart : range->from;
  1605             if (!spillInterval->addRange(from, range->to))
  1606                 return false;
  1610     for (UsePositionIterator iter(interval->usesBegin());
  1611          iter != interval->usesEnd();
  1612          iter++)
  1614         LInstruction *ins = insData[iter->pos].ins();
  1615         if (iter->pos < spillStart) {
  1616             newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
  1617         } else if (isRegisterUse(iter->use, ins)) {
  1618             // For register uses which are not useRegisterAtStart, pick an
  1619             // interval that covers both the instruction's input and output, so
  1620             // that the register is not reused for an output.
  1621             CodePosition from = inputOf(ins);
  1622             CodePosition to = iter->pos.next();
  1624             // Use the same interval for duplicate use positions, except when
  1625             // the uses are fixed (they may require incompatible registers).
  1626             if (newIntervals.empty() || newIntervals.back()->end() != to || iter->use->policy() == LUse::FIXED) {
  1627                 if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
  1628                     return false;
  1631             newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
  1632         } else {
  1633             JS_ASSERT(spillIntervalIsNew);
  1634             spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
  1638     if (spillIntervalIsNew && !newIntervals.append(spillInterval))
  1639         return false;
  1641     return split(interval, newIntervals) && requeueIntervals(newIntervals);
  1644 // Find the next split position after the current position.
  1645 static size_t NextSplitPosition(size_t activeSplitPosition,
  1646                                 const SplitPositionVector &splitPositions,
  1647                                 CodePosition currentPos)
  1649     while (activeSplitPosition < splitPositions.length() &&
  1650            splitPositions[activeSplitPosition] <= currentPos)
  1652         ++activeSplitPosition;
  1654     return activeSplitPosition;
  1657 // Test whether the current position has just crossed a split point.
  1658 static bool SplitHere(size_t activeSplitPosition,
  1659                       const SplitPositionVector &splitPositions,
  1660                       CodePosition currentPos)
  1662     return activeSplitPosition < splitPositions.length() &&
  1663            currentPos >= splitPositions[activeSplitPosition];
  1666 bool
  1667 BacktrackingAllocator::splitAt(LiveInterval *interval,
  1668                                const SplitPositionVector &splitPositions)
  1670     // Split the interval at the given split points. Unlike splitAtAllRegisterUses,
  1671     // consolidate any register uses which have no intervening split points into the
  1672     // same resulting interval.
  1674     // splitPositions should be non-empty and sorted.
  1675     JS_ASSERT(!splitPositions.empty());
  1676     for (size_t i = 1; i < splitPositions.length(); ++i)
  1677         JS_ASSERT(splitPositions[i-1] < splitPositions[i]);
  1679     // Don't spill the interval until after the end of its definition.
  1680     CodePosition spillStart = interval->start();
  1681     if (isRegisterDefinition(interval))
  1682         spillStart = minimalDefEnd(insData[interval->start()].ins()).next();
  1684     uint32_t vreg = interval->vreg();
  1686     // If this LiveInterval is the result of an earlier split which created a
  1687     // spill interval, that spill interval covers the whole range, so we don't
  1688     // need to create a new one.
  1689     bool spillIntervalIsNew = false;
  1690     LiveInterval *spillInterval = interval->spillInterval();
  1691     if (!spillInterval) {
  1692         spillInterval = LiveInterval::New(alloc(), vreg, 0);
  1693         spillIntervalIsNew = true;
  1695         for (size_t i = 0; i < interval->numRanges(); i++) {
  1696             const LiveInterval::Range *range = interval->getRange(i);
  1697             CodePosition from = range->from < spillStart ? spillStart : range->from;
  1698             if (!spillInterval->addRange(from, range->to))
  1699                 return false;
  1703     LiveIntervalVector newIntervals;
  1705     CodePosition lastRegisterUse;
  1706     if (spillStart != interval->start()) {
  1707         LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0);
  1708         newInterval->setSpillInterval(spillInterval);
  1709         if (!newIntervals.append(newInterval))
  1710             return false;
  1711         lastRegisterUse = interval->start();
  1714     size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start());
  1715     for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) {
  1716         LInstruction *ins = insData[iter->pos].ins();
  1717         if (iter->pos < spillStart) {
  1718             newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
  1719             activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos);
  1720         } else if (isRegisterUse(iter->use, ins)) {
  1721             if (lastRegisterUse.pos() == 0 ||
  1722                 SplitHere(activeSplitPosition, splitPositions, iter->pos))
  1724                 // Place this register use into a different interval from the
  1725                 // last one if there are any split points between the two uses.
  1726                 LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0);
  1727                 newInterval->setSpillInterval(spillInterval);
  1728                 if (!newIntervals.append(newInterval))
  1729                     return false;
  1730                 activeSplitPosition = NextSplitPosition(activeSplitPosition,
  1731                                                         splitPositions,
  1732                                                         iter->pos);
  1734             newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
  1735             lastRegisterUse = iter->pos;
  1736         } else {
  1737             JS_ASSERT(spillIntervalIsNew);
  1738             spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
  1742     // Compute ranges for each new interval that cover all its uses.
  1743     size_t activeRange = interval->numRanges();
  1744     for (size_t i = 0; i < newIntervals.length(); i++) {
  1745         LiveInterval *newInterval = newIntervals[i];
  1746         CodePosition start, end;
  1747         if (i == 0 && spillStart != interval->start()) {
  1748             start = interval->start();
  1749             if (newInterval->usesEmpty())
  1750                 end = spillStart;
  1751             else
  1752                 end = newInterval->usesBack()->pos.next();
  1753         } else {
  1754             start = inputOf(insData[newInterval->usesBegin()->pos].ins());
  1755             end = newInterval->usesBack()->pos.next();
  1757         for (; activeRange > 0; --activeRange) {
  1758             const LiveInterval::Range *range = interval->getRange(activeRange - 1);
  1759             if (range->to <= start)
  1760                 continue;
  1761             if (range->from >= end)
  1762                 break;
  1763             if (!newInterval->addRange(Max(range->from, start),
  1764                                        Min(range->to, end)))
  1765                 return false;
  1766             if (range->to >= end)
  1767                 break;
  1771     if (spillIntervalIsNew && !newIntervals.append(spillInterval))
  1772         return false;
  1774     return split(interval, newIntervals) && requeueIntervals(newIntervals);
  1777 bool
  1778 BacktrackingAllocator::splitAcrossCalls(LiveInterval *interval)
  1780     // Split the interval to separate register uses and non-register uses and
  1781     // allow the vreg to be spilled across its range.
  1783     // Find the locations of all calls in the interval's range. Fixed intervals
  1784     // are introduced by buildLivenessInfo only for calls when allocating for
  1785     // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so
  1786     // iterate through it backwards.
  1787     SplitPositionVector callPositions;
  1788     for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) {
  1789         const LiveInterval::Range *range = fixedIntervalsUnion->getRange(i - 1);
  1790         if (interval->covers(range->from) && interval->covers(range->from.previous())) {
  1791             if (!callPositions.append(range->from))
  1792                 return false;
  1795     JS_ASSERT(callPositions.length());
  1797     return splitAt(interval, callPositions);
  1800 bool
  1801 BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict)
  1803     bool success = false;
  1805     if (!trySplitAcrossHotcode(interval, &success))
  1806         return false;
  1807     if (success)
  1808         return true;
  1810     if (!trySplitAfterLastRegisterUse(interval, conflict, &success))
  1811         return false;
  1812     if (success)
  1813         return true;
  1815     return splitAtAllRegisterUses(interval);

mercurial