michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/BacktrackingAllocator.h" michael@0: #include "jit/BitSet.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::DebugOnly; michael@0: michael@0: bool michael@0: BacktrackingAllocator::init() michael@0: { michael@0: RegisterSet remainingRegisters(allRegisters_); michael@0: while (!remainingRegisters.empty(/* float = */ false)) { michael@0: AnyRegister reg = AnyRegister(remainingRegisters.takeGeneral()); michael@0: registers[reg.code()].allocatable = true; michael@0: } michael@0: while (!remainingRegisters.empty(/* float = */ true)) { michael@0: AnyRegister reg = AnyRegister(remainingRegisters.takeFloat()); michael@0: registers[reg.code()].allocatable = true; michael@0: } michael@0: michael@0: LifoAlloc *lifoAlloc = mir->alloc().lifoAlloc(); michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) { michael@0: registers[i].reg = AnyRegister::FromCode(i); michael@0: registers[i].allocations.setAllocator(lifoAlloc); michael@0: michael@0: LiveInterval *fixed = fixedIntervals[i]; michael@0: for (size_t j = 0; j < fixed->numRanges(); j++) { michael@0: AllocatedRange range(fixed, fixed->getRange(j)); michael@0: if (!registers[i].allocations.insert(range)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: hotcode.setAllocator(lifoAlloc); michael@0: michael@0: // Partition the graph into hot and cold sections, for helping to make michael@0: // splitting decisions. Since we don't have any profiling data this is a michael@0: // crapshoot, so just mark the bodies of inner loops as hot and everything michael@0: // else as cold. michael@0: michael@0: LiveInterval *hotcodeInterval = LiveInterval::New(alloc(), 0); michael@0: michael@0: LBlock *backedge = nullptr; michael@0: for (size_t i = 0; i < graph.numBlocks(); i++) { michael@0: LBlock *block = graph.getBlock(i); michael@0: michael@0: // If we see a loop header, mark the backedge so we know when we have michael@0: // hit the end of the loop. Don't process the loop immediately, so that michael@0: // if there is an inner loop we will ignore the outer backedge. michael@0: if (block->mir()->isLoopHeader()) michael@0: backedge = block->mir()->backedge()->lir(); michael@0: michael@0: if (block == backedge) { michael@0: LBlock *header = block->mir()->loopHeaderOfBackedge()->lir(); michael@0: CodePosition from = inputOf(header->firstId()); michael@0: CodePosition to = outputOf(block->lastId()).next(); michael@0: if (!hotcodeInterval->addRange(from, to)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) { michael@0: AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i)); michael@0: if (!hotcode.insert(range)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::go() michael@0: { michael@0: IonSpew(IonSpew_RegAlloc, "Beginning register allocation"); michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis"); michael@0: if (!buildLivenessInfo()) michael@0: return false; michael@0: IonSpew(IonSpew_RegAlloc, "Liveness analysis complete"); michael@0: michael@0: if (!init()) michael@0: return false; michael@0: michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) michael@0: dumpLiveness(); michael@0: michael@0: if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) michael@0: return false; michael@0: michael@0: if (!groupAndQueueRegisters()) michael@0: return false; michael@0: michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) michael@0: dumpRegisterGroups(); michael@0: michael@0: // Allocate, spill and split register intervals until finished. michael@0: while (!allocationQueue.empty()) { michael@0: if (mir->shouldCancel("Backtracking Allocation")) michael@0: return false; michael@0: michael@0: QueueItem item = allocationQueue.removeHighest(); michael@0: if (item.interval ? !processInterval(item.interval) : !processGroup(item.group)) michael@0: return false; michael@0: } michael@0: michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) michael@0: dumpAllocations(); michael@0: michael@0: return resolveControlFlow() && reifyAllocations() && populateSafepoints(); michael@0: } michael@0: michael@0: static bool michael@0: LifetimesOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1) michael@0: { michael@0: // Registers may have been eagerly split in two, see tryGroupReusedRegister. michael@0: // In such cases, only consider the first interval. michael@0: JS_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2); michael@0: michael@0: LiveInterval *interval0 = reg0->getInterval(0), *interval1 = reg1->getInterval(0); michael@0: michael@0: // Interval ranges are sorted in reverse order. The lifetimes overlap if michael@0: // any of their ranges overlap. michael@0: size_t index0 = 0, index1 = 0; michael@0: while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) { michael@0: const LiveInterval::Range michael@0: *range0 = interval0->getRange(index0), michael@0: *range1 = interval1->getRange(index1); michael@0: if (range0->from >= range1->to) michael@0: index0++; michael@0: else if (range1->from >= range0->to) michael@0: index1++; michael@0: else michael@0: return true; michael@0: } michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg) michael@0: { michael@0: for (size_t i = 0; i < group->registers.length(); i++) { michael@0: if (LifetimesOverlap(reg, &vregs[group->registers[i]])) michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1) michael@0: { michael@0: // See if reg0 and reg1 can be placed in the same group, following the michael@0: // restrictions imposed by VirtualRegisterGroup and any other registers michael@0: // already grouped with reg0 or reg1. michael@0: BacktrackingVirtualRegister *reg0 = &vregs[vreg0], *reg1 = &vregs[vreg1]; michael@0: michael@0: if (reg0->isFloatReg() != reg1->isFloatReg()) michael@0: return true; michael@0: michael@0: VirtualRegisterGroup *group0 = reg0->group(), *group1 = reg1->group(); michael@0: michael@0: if (!group0 && group1) michael@0: return tryGroupRegisters(vreg1, vreg0); michael@0: michael@0: if (group0) { michael@0: if (group1) { michael@0: if (group0 == group1) { michael@0: // The registers are already grouped together. michael@0: return true; michael@0: } michael@0: // Try to unify the two distinct groups. michael@0: for (size_t i = 0; i < group1->registers.length(); i++) { michael@0: if (!canAddToGroup(group0, &vregs[group1->registers[i]])) michael@0: return true; michael@0: } michael@0: for (size_t i = 0; i < group1->registers.length(); i++) { michael@0: uint32_t vreg = group1->registers[i]; michael@0: if (!group0->registers.append(vreg)) michael@0: return false; michael@0: vregs[vreg].setGroup(group0); michael@0: } michael@0: return true; michael@0: } michael@0: if (!canAddToGroup(group0, reg1)) michael@0: return true; michael@0: if (!group0->registers.append(vreg1)) michael@0: return false; michael@0: reg1->setGroup(group0); michael@0: return true; michael@0: } michael@0: michael@0: if (LifetimesOverlap(reg0, reg1)) michael@0: return true; michael@0: michael@0: VirtualRegisterGroup *group = new(alloc()) VirtualRegisterGroup(alloc()); michael@0: if (!group->registers.append(vreg0) || !group->registers.append(vreg1)) michael@0: return false; michael@0: michael@0: reg0->setGroup(group); michael@0: reg1->setGroup(group); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use) michael@0: { michael@0: BacktrackingVirtualRegister ® = vregs[def], &usedReg = vregs[use]; michael@0: michael@0: // reg is a vreg which reuses its input usedReg for its output physical michael@0: // register. Try to group reg with usedReg if at all possible, as avoiding michael@0: // copies before reg's instruction is crucial for the quality of the michael@0: // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions michael@0: // on x86/x64). michael@0: michael@0: if (reg.intervalFor(inputOf(reg.ins()))) { michael@0: JS_ASSERT(reg.isTemp()); michael@0: reg.setMustCopyInput(); michael@0: return true; michael@0: } michael@0: michael@0: if (!usedReg.intervalFor(outputOf(reg.ins()))) { michael@0: // The input is not live after the instruction, either in a safepoint michael@0: // for the instruction or in subsequent code. The input and output michael@0: // can thus be in the same group. michael@0: return tryGroupRegisters(use, def); michael@0: } michael@0: michael@0: // The input is live afterwards, either in future instructions or in a michael@0: // safepoint for the reusing instruction. This is impossible to satisfy michael@0: // without copying the input. michael@0: // michael@0: // It may or may not be better to split the interval at the point of the michael@0: // definition, which may permit grouping. One case where it is definitely michael@0: // better to split is if the input never has any register uses after the michael@0: // instruction. Handle this splitting eagerly. michael@0: michael@0: if (usedReg.numIntervals() != 1 || michael@0: (usedReg.def()->isPreset() && !usedReg.def()->output()->isRegister())) { michael@0: reg.setMustCopyInput(); michael@0: return true; michael@0: } michael@0: LiveInterval *interval = usedReg.getInterval(0); michael@0: LBlock *block = insData[reg.ins()].block(); michael@0: michael@0: // The input's lifetime must end within the same block as the definition, michael@0: // otherwise it could live on in phis elsewhere. michael@0: if (interval->end() > outputOf(block->lastId())) { michael@0: reg.setMustCopyInput(); michael@0: return true; michael@0: } michael@0: michael@0: for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { michael@0: if (iter->pos <= inputOf(reg.ins())) michael@0: continue; michael@0: michael@0: LUse *use = iter->use; michael@0: if (FindReusingDefinition(insData[iter->pos].ins(), use)) { michael@0: reg.setMustCopyInput(); michael@0: return true; michael@0: } michael@0: if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) { michael@0: reg.setMustCopyInput(); michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: LiveInterval *preInterval = LiveInterval::New(alloc(), interval->vreg(), 0); michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: const LiveInterval::Range *range = interval->getRange(i); michael@0: JS_ASSERT(range->from <= inputOf(reg.ins())); michael@0: michael@0: CodePosition to = (range->to <= outputOf(reg.ins())) ? range->to : outputOf(reg.ins()); michael@0: if (!preInterval->addRange(range->from, to)) michael@0: return false; michael@0: } michael@0: michael@0: LiveInterval *postInterval = LiveInterval::New(alloc(), interval->vreg(), 0); michael@0: if (!postInterval->addRange(inputOf(reg.ins()), interval->end())) michael@0: return false; michael@0: michael@0: LiveIntervalVector newIntervals; michael@0: if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval)) michael@0: return false; michael@0: michael@0: distributeUses(interval, newIntervals); michael@0: michael@0: if (!split(interval, newIntervals)) michael@0: return false; michael@0: michael@0: JS_ASSERT(usedReg.numIntervals() == 2); michael@0: michael@0: usedReg.setCanonicalSpillExclude(inputOf(reg.ins())); michael@0: michael@0: return tryGroupRegisters(use, def); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::groupAndQueueRegisters() michael@0: { michael@0: // Try to group registers with their reused inputs. michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(vregs[0u].numIntervals() == 0); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: BacktrackingVirtualRegister ® = vregs[i]; michael@0: if (!reg.numIntervals()) michael@0: continue; michael@0: michael@0: if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) { michael@0: LUse *use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse(); michael@0: if (!tryGroupReusedRegister(i, use->virtualRegister())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Try to group phis with their inputs. michael@0: for (size_t i = 0; i < graph.numBlocks(); i++) { michael@0: LBlock *block = graph.getBlock(i); michael@0: for (size_t j = 0; j < block->numPhis(); j++) { michael@0: LPhi *phi = block->getPhi(j); michael@0: uint32_t output = phi->getDef(0)->virtualRegister(); michael@0: for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) { michael@0: uint32_t input = phi->getOperand(k)->toUse()->virtualRegister(); michael@0: if (!tryGroupRegisters(input, output)) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(vregs[0u].numIntervals() == 0); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: if (mir->shouldCancel("Backtracking Enqueue Registers")) michael@0: return false; michael@0: michael@0: BacktrackingVirtualRegister ® = vregs[i]; michael@0: JS_ASSERT(reg.numIntervals() <= 2); michael@0: JS_ASSERT(!reg.canonicalSpill()); michael@0: michael@0: if (!reg.numIntervals()) michael@0: continue; michael@0: michael@0: // Disable this for now; see bugs 906858, 931487, and 932465. michael@0: #if 0 michael@0: // Eagerly set the canonical spill slot for registers which are preset michael@0: // for that slot, and reuse it for other registers in the group. michael@0: LDefinition *def = reg.def(); michael@0: if (def->policy() == LDefinition::PRESET && !def->output()->isRegister()) { michael@0: reg.setCanonicalSpill(*def->output()); michael@0: if (reg.group() && reg.group()->spill.isUse()) michael@0: reg.group()->spill = *def->output(); michael@0: } michael@0: #endif michael@0: michael@0: // Place all intervals for this register on the allocation queue. michael@0: // During initial queueing use single queue items for groups of michael@0: // registers, so that they will be allocated together and reduce the michael@0: // risk of unnecessary conflicts. This is in keeping with the idea that michael@0: // register groups are effectively single registers whose value changes michael@0: // during execution. If any intervals in the group are evicted later michael@0: // then they will be reallocated individually. michael@0: size_t start = 0; michael@0: if (VirtualRegisterGroup *group = reg.group()) { michael@0: if (i == group->canonicalReg()) { michael@0: size_t priority = computePriority(group); michael@0: if (!allocationQueue.insert(QueueItem(group, priority))) michael@0: return false; michael@0: } michael@0: start++; michael@0: } michael@0: for (; start < reg.numIntervals(); start++) { michael@0: LiveInterval *interval = reg.getInterval(start); michael@0: if (interval->numRanges() > 0) { michael@0: size_t priority = computePriority(interval); michael@0: if (!allocationQueue.insert(QueueItem(interval, priority))) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static const size_t MAX_ATTEMPTS = 2; michael@0: michael@0: bool michael@0: BacktrackingAllocator::tryAllocateFixed(LiveInterval *interval, bool *success, michael@0: bool *pfixed, LiveInterval **pconflicting) michael@0: { michael@0: // Spill intervals which are required to be in a certain stack slot. michael@0: if (!interval->requirement()->allocation().isRegister()) { michael@0: IonSpew(IonSpew_RegAlloc, "stack allocation requirement"); michael@0: interval->setAllocation(interval->requirement()->allocation()); michael@0: *success = true; michael@0: return true; michael@0: } michael@0: michael@0: AnyRegister reg = interval->requirement()->allocation().toRegister(); michael@0: return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::tryAllocateNonFixed(LiveInterval *interval, bool *success, michael@0: bool *pfixed, LiveInterval **pconflicting) michael@0: { michael@0: // If we want, but do not require an interval to be in a specific michael@0: // register, only look at that register for allocating and evict michael@0: // or spill if it is not available. Picking a separate register may michael@0: // be even worse than spilling, as it will still necessitate moves michael@0: // and will tie up more registers than if we spilled. michael@0: if (interval->hint()->kind() == Requirement::FIXED) { michael@0: AnyRegister reg = interval->hint()->allocation().toRegister(); michael@0: if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting)) michael@0: return false; michael@0: if (*success) michael@0: return true; michael@0: } michael@0: michael@0: // Spill intervals which have no hint or register requirement. michael@0: if (interval->requirement()->kind() == Requirement::NONE) { michael@0: spill(interval); michael@0: *success = true; michael@0: return true; michael@0: } michael@0: michael@0: if (!*pconflicting || minimalInterval(interval)) { michael@0: // Search for any available register which the interval can be michael@0: // allocated to. michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) { michael@0: if (!tryAllocateRegister(registers[i], interval, success, pfixed, pconflicting)) michael@0: return false; michael@0: if (*success) michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: // We failed to allocate this interval. michael@0: JS_ASSERT(!*success); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::processInterval(LiveInterval *interval) michael@0: { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "Allocating v%u [priority %lu] [weight %lu]: %s", michael@0: interval->vreg(), computePriority(interval), computeSpillWeight(interval), michael@0: interval->rangesToString()); michael@0: } michael@0: michael@0: // An interval can be processed by doing any of the following: michael@0: // michael@0: // - Assigning the interval a register. The interval cannot overlap any michael@0: // other interval allocated for that physical register. michael@0: // michael@0: // - Spilling the interval, provided it has no register uses. michael@0: // michael@0: // - Splitting the interval into two or more intervals which cover the michael@0: // original one. The new intervals are placed back onto the priority michael@0: // queue for later processing. michael@0: // michael@0: // - Evicting one or more existing allocated intervals, and then doing one michael@0: // of the above operations. Evicted intervals are placed back on the michael@0: // priority queue. Any evicted intervals must have a lower spill weight michael@0: // than the interval being processed. michael@0: // michael@0: // As long as this structure is followed, termination is guaranteed. michael@0: // In general, we want to minimize the amount of interval splitting michael@0: // (which generally necessitates spills), so allocate longer lived, lower michael@0: // weight intervals first and evict and split them later if they prevent michael@0: // allocation for higher weight intervals. michael@0: michael@0: bool canAllocate = setIntervalRequirement(interval); michael@0: michael@0: bool fixed; michael@0: LiveInterval *conflict = nullptr; michael@0: for (size_t attempt = 0;; attempt++) { michael@0: if (canAllocate) { michael@0: bool success = false; michael@0: fixed = false; michael@0: conflict = nullptr; michael@0: michael@0: // Ok, let's try allocating for this interval. michael@0: if (interval->requirement()->kind() == Requirement::FIXED) { michael@0: if (!tryAllocateFixed(interval, &success, &fixed, &conflict)) michael@0: return false; michael@0: } else { michael@0: if (!tryAllocateNonFixed(interval, &success, &fixed, &conflict)) michael@0: return false; michael@0: } michael@0: michael@0: // If that worked, we're done! michael@0: if (success) michael@0: return true; michael@0: michael@0: // If that didn't work, but we have a non-fixed LiveInterval known michael@0: // to be conflicting, maybe we can evict it and try again. michael@0: if (attempt < MAX_ATTEMPTS && michael@0: !fixed && michael@0: conflict && michael@0: computeSpillWeight(conflict) < computeSpillWeight(interval)) michael@0: { michael@0: if (!evictInterval(conflict)) michael@0: return false; michael@0: continue; michael@0: } michael@0: } michael@0: michael@0: // A minimal interval cannot be split any further. If we try to split michael@0: // it at this point we will just end up with the same interval and will michael@0: // enter an infinite loop. Weights and the initial live intervals must michael@0: // be constructed so that any minimal interval is allocatable. michael@0: JS_ASSERT(!minimalInterval(interval)); michael@0: michael@0: if (canAllocate && fixed) michael@0: return splitAcrossCalls(interval); michael@0: return chooseIntervalSplit(interval, conflict); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::processGroup(VirtualRegisterGroup *group) michael@0: { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]", michael@0: group->registers[0], computePriority(group), computeSpillWeight(group)); michael@0: } michael@0: michael@0: bool fixed; michael@0: LiveInterval *conflict; michael@0: for (size_t attempt = 0;; attempt++) { michael@0: // Search for any available register which the group can be allocated to. michael@0: fixed = false; michael@0: conflict = nullptr; michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) { michael@0: bool success; michael@0: if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict)) michael@0: return false; michael@0: if (success) { michael@0: conflict = nullptr; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (attempt < MAX_ATTEMPTS && michael@0: !fixed && michael@0: conflict && michael@0: conflict->hasVreg() && michael@0: computeSpillWeight(conflict) < computeSpillWeight(group)) michael@0: { michael@0: if (!evictInterval(conflict)) michael@0: return false; michael@0: continue; michael@0: } michael@0: michael@0: for (size_t i = 0; i < group->registers.length(); i++) { michael@0: VirtualRegister ® = vregs[group->registers[i]]; michael@0: JS_ASSERT(reg.numIntervals() <= 2); michael@0: if (!processInterval(reg.getInterval(0))) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval) michael@0: { michael@0: // Set any requirement or hint on interval according to its definition and michael@0: // uses. Return false if there are conflicting requirements which will michael@0: // require the interval to be split. michael@0: interval->setHint(Requirement()); michael@0: interval->setRequirement(Requirement()); michael@0: michael@0: BacktrackingVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: michael@0: // Set a hint if another interval in the same group is in a register. michael@0: if (VirtualRegisterGroup *group = reg->group()) { michael@0: if (group->allocation.isRegister()) { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "Hint %s, used by group allocation", michael@0: group->allocation.toString()); michael@0: } michael@0: interval->setHint(Requirement(group->allocation)); michael@0: } michael@0: } michael@0: michael@0: if (interval->index() == 0) { michael@0: // The first interval is the definition, so deal with any definition michael@0: // constraints/hints. michael@0: michael@0: LDefinition::Policy policy = reg->def()->policy(); michael@0: if (policy == LDefinition::PRESET) { michael@0: // Preset policies get a FIXED requirement. michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "Requirement %s, preset by definition", michael@0: reg->def()->output()->toString()); michael@0: } michael@0: interval->setRequirement(Requirement(*reg->def()->output())); michael@0: } else if (reg->ins()->isPhi()) { michael@0: // Phis don't have any requirements, but they should prefer their michael@0: // input allocations. This is captured by the group hints above. michael@0: } else { michael@0: // Non-phis get a REGISTER requirement. michael@0: interval->setRequirement(Requirement(Requirement::REGISTER)); michael@0: } michael@0: } michael@0: michael@0: // Search uses for requirements. michael@0: for (UsePositionIterator iter = interval->usesBegin(); michael@0: iter != interval->usesEnd(); michael@0: iter++) michael@0: { michael@0: LUse::Policy policy = iter->use->policy(); michael@0: if (policy == LUse::FIXED) { michael@0: AnyRegister required = GetFixedRegister(reg->def(), iter->use); michael@0: michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "Requirement %s, due to use at %u", michael@0: required.name(), iter->pos.pos()); michael@0: } michael@0: michael@0: // If there are multiple fixed registers which the interval is michael@0: // required to use, fail. The interval will need to be split before michael@0: // it can be allocated. michael@0: if (!interval->addRequirement(Requirement(LAllocation(required)))) michael@0: return false; michael@0: } else if (policy == LUse::REGISTER) { michael@0: if (!interval->addRequirement(Requirement(Requirement::REGISTER))) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group, michael@0: bool *psuccess, bool *pfixed, LiveInterval **pconflicting) michael@0: { michael@0: *psuccess = false; michael@0: michael@0: if (!r.allocatable) michael@0: return true; michael@0: michael@0: if (r.reg.isFloat() != vregs[group->registers[0]].isFloatReg()) michael@0: return true; michael@0: michael@0: bool allocatable = true; michael@0: LiveInterval *conflicting = nullptr; michael@0: michael@0: for (size_t i = 0; i < group->registers.length(); i++) { michael@0: VirtualRegister ® = vregs[group->registers[i]]; michael@0: JS_ASSERT(reg.numIntervals() <= 2); michael@0: LiveInterval *interval = reg.getInterval(0); michael@0: michael@0: for (size_t j = 0; j < interval->numRanges(); j++) { michael@0: AllocatedRange range(interval, interval->getRange(j)), existing; michael@0: if (r.allocations.contains(range, &existing)) { michael@0: if (conflicting) { michael@0: if (conflicting != existing.interval) michael@0: return true; michael@0: } else { michael@0: conflicting = existing.interval; michael@0: } michael@0: allocatable = false; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (!allocatable) { michael@0: JS_ASSERT(conflicting); michael@0: if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting)) michael@0: *pconflicting = conflicting; michael@0: if (!conflicting->hasVreg()) michael@0: *pfixed = true; michael@0: return true; michael@0: } michael@0: michael@0: *psuccess = true; michael@0: michael@0: group->allocation = LAllocation(r.reg); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval, michael@0: bool *success, bool *pfixed, LiveInterval **pconflicting) michael@0: { michael@0: *success = false; michael@0: michael@0: if (!r.allocatable) michael@0: return true; michael@0: michael@0: BacktrackingVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: if (reg->isFloatReg() != r.reg.isFloat()) michael@0: return true; michael@0: michael@0: JS_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED, michael@0: interval->requirement()->allocation() == LAllocation(r.reg)); michael@0: michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: AllocatedRange range(interval, interval->getRange(i)), existing; michael@0: if (r.allocations.contains(range, &existing)) { michael@0: if (existing.interval->hasVreg()) { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "%s collides with v%u [%u,%u> [weight %lu]", michael@0: r.reg.name(), existing.interval->vreg(), michael@0: existing.range->from.pos(), existing.range->to.pos(), michael@0: computeSpillWeight(existing.interval)); michael@0: } michael@0: if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting)) michael@0: *pconflicting = existing.interval; michael@0: } else { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "%s collides with fixed use [%u,%u>", michael@0: r.reg.name(), existing.range->from.pos(), existing.range->to.pos()); michael@0: } michael@0: *pfixed = true; michael@0: } michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "allocated to %s", r.reg.name()); michael@0: michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: AllocatedRange range(interval, interval->getRange(i)); michael@0: if (!r.allocations.insert(range)) michael@0: return false; michael@0: } michael@0: michael@0: // Set any register hint for allocating other intervals in the same group. michael@0: if (VirtualRegisterGroup *group = reg->group()) { michael@0: if (!group->allocation.isRegister()) michael@0: group->allocation = LAllocation(r.reg); michael@0: } michael@0: michael@0: interval->setAllocation(LAllocation(r.reg)); michael@0: *success = true; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::evictInterval(LiveInterval *interval) michael@0: { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "Evicting interval v%u: %s", michael@0: interval->vreg(), interval->rangesToString()); michael@0: } michael@0: michael@0: JS_ASSERT(interval->getAllocation()->isRegister()); michael@0: michael@0: AnyRegister reg(interval->getAllocation()->toRegister()); michael@0: PhysicalRegister &physical = registers[reg.code()]; michael@0: JS_ASSERT(physical.reg == reg && physical.allocatable); michael@0: michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: AllocatedRange range(interval, interval->getRange(i)); michael@0: physical.allocations.remove(range); michael@0: } michael@0: michael@0: interval->setAllocation(LAllocation()); michael@0: michael@0: size_t priority = computePriority(interval); michael@0: return allocationQueue.insert(QueueItem(interval, priority)); michael@0: } michael@0: michael@0: void michael@0: BacktrackingAllocator::distributeUses(LiveInterval *interval, michael@0: const LiveIntervalVector &newIntervals) michael@0: { michael@0: JS_ASSERT(newIntervals.length() >= 2); michael@0: michael@0: // Simple redistribution of uses from an old interval to a set of new michael@0: // intervals. Intervals are permitted to overlap, in which case this will michael@0: // assign uses in the overlapping section to the interval with the latest michael@0: // start position. michael@0: for (UsePositionIterator iter(interval->usesBegin()); michael@0: iter != interval->usesEnd(); michael@0: iter++) michael@0: { michael@0: CodePosition pos = iter->pos; michael@0: LiveInterval *addInterval = nullptr; michael@0: for (size_t i = 0; i < newIntervals.length(); i++) { michael@0: LiveInterval *newInterval = newIntervals[i]; michael@0: if (newInterval->covers(pos)) { michael@0: if (!addInterval || newInterval->start() < addInterval->start()) michael@0: addInterval = newInterval; michael@0: } michael@0: } michael@0: addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::split(LiveInterval *interval, michael@0: const LiveIntervalVector &newIntervals) michael@0: { michael@0: if (IonSpewEnabled(IonSpew_RegAlloc)) { michael@0: IonSpew(IonSpew_RegAlloc, "splitting interval v%u %s into:", michael@0: interval->vreg(), interval->rangesToString()); michael@0: for (size_t i = 0; i < newIntervals.length(); i++) michael@0: IonSpew(IonSpew_RegAlloc, " %s", newIntervals[i]->rangesToString()); michael@0: } michael@0: michael@0: JS_ASSERT(newIntervals.length() >= 2); michael@0: michael@0: // Find the earliest interval in the new list. michael@0: LiveInterval *first = newIntervals[0]; michael@0: for (size_t i = 1; i < newIntervals.length(); i++) { michael@0: if (newIntervals[i]->start() < first->start()) michael@0: first = newIntervals[i]; michael@0: } michael@0: michael@0: // Replace the old interval in the virtual register's state with the new intervals. michael@0: VirtualRegister *reg = &vregs[interval->vreg()]; michael@0: reg->replaceInterval(interval, first); michael@0: for (size_t i = 0; i < newIntervals.length(); i++) { michael@0: if (newIntervals[i] != first && !reg->addInterval(newIntervals[i])) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector &newIntervals) michael@0: { michael@0: // Queue the new intervals for register assignment. michael@0: for (size_t i = 0; i < newIntervals.length(); i++) { michael@0: LiveInterval *newInterval = newIntervals[i]; michael@0: size_t priority = computePriority(newInterval); michael@0: if (!allocationQueue.insert(QueueItem(newInterval, priority))) michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: BacktrackingAllocator::spill(LiveInterval *interval) michael@0: { michael@0: IonSpew(IonSpew_RegAlloc, "Spilling interval"); michael@0: michael@0: JS_ASSERT(interval->requirement()->kind() == Requirement::NONE); michael@0: JS_ASSERT(!interval->getAllocation()->isStackSlot()); michael@0: michael@0: // We can't spill bogus intervals. michael@0: JS_ASSERT(interval->hasVreg()); michael@0: michael@0: BacktrackingVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: michael@0: bool useCanonical = !reg->hasCanonicalSpillExclude() michael@0: || interval->start() < reg->canonicalSpillExclude(); michael@0: michael@0: if (useCanonical) { michael@0: if (reg->canonicalSpill()) { michael@0: IonSpew(IonSpew_RegAlloc, " Picked canonical spill location %s", michael@0: reg->canonicalSpill()->toString()); michael@0: interval->setAllocation(*reg->canonicalSpill()); michael@0: return; michael@0: } michael@0: michael@0: if (reg->group() && !reg->group()->spill.isUse()) { michael@0: IonSpew(IonSpew_RegAlloc, " Reusing group spill location %s", michael@0: reg->group()->spill.toString()); michael@0: interval->setAllocation(reg->group()->spill); michael@0: reg->setCanonicalSpill(reg->group()->spill); michael@0: return; michael@0: } michael@0: } michael@0: michael@0: uint32_t stackSlot = stackSlotAllocator.allocateSlot(reg->type()); michael@0: JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight()); michael@0: michael@0: LStackSlot alloc(stackSlot); michael@0: interval->setAllocation(alloc); michael@0: michael@0: IonSpew(IonSpew_RegAlloc, " Allocating spill location %s", alloc.toString()); michael@0: michael@0: if (useCanonical) { michael@0: reg->setCanonicalSpill(alloc); michael@0: if (reg->group()) michael@0: reg->group()->spill = alloc; michael@0: } michael@0: } michael@0: michael@0: // Add moves to resolve conflicting assignments between a block and its michael@0: // predecessors. XXX try to common this with LinearScanAllocator. michael@0: bool michael@0: BacktrackingAllocator::resolveControlFlow() michael@0: { michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(vregs[0u].numIntervals() == 0); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: BacktrackingVirtualRegister *reg = &vregs[i]; michael@0: michael@0: if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)")) michael@0: return false; michael@0: michael@0: for (size_t j = 1; j < reg->numIntervals(); j++) { michael@0: LiveInterval *interval = reg->getInterval(j); michael@0: JS_ASSERT(interval->index() == j); michael@0: michael@0: bool skip = false; michael@0: for (int k = j - 1; k >= 0; k--) { michael@0: LiveInterval *prevInterval = reg->getInterval(k); michael@0: if (prevInterval->start() != interval->start()) michael@0: break; michael@0: if (*prevInterval->getAllocation() == *interval->getAllocation()) { michael@0: skip = true; michael@0: break; michael@0: } michael@0: } michael@0: if (skip) michael@0: continue; michael@0: michael@0: CodePosition start = interval->start(); michael@0: InstructionData *data = &insData[start]; michael@0: if (interval->start() > inputOf(data->block()->firstId())) { michael@0: JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins())); michael@0: michael@0: LiveInterval *prevInterval = reg->intervalFor(start.previous()); michael@0: if (start.subpos() == CodePosition::INPUT) { michael@0: if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type())) michael@0: return false; michael@0: } else { michael@0: if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: for (size_t i = 0; i < graph.numBlocks(); i++) { michael@0: if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) michael@0: return false; michael@0: michael@0: LBlock *successor = graph.getBlock(i); michael@0: MBasicBlock *mSuccessor = successor->mir(); michael@0: if (mSuccessor->numPredecessors() < 1) michael@0: continue; michael@0: michael@0: // Resolve phis to moves michael@0: for (size_t j = 0; j < successor->numPhis(); j++) { michael@0: LPhi *phi = successor->getPhi(j); michael@0: JS_ASSERT(phi->numDefs() == 1); michael@0: LDefinition *def = phi->getDef(0); michael@0: VirtualRegister *vreg = &vregs[def]; michael@0: LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId())); michael@0: JS_ASSERT(to); michael@0: michael@0: for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) { michael@0: LBlock *predecessor = mSuccessor->getPredecessor(k)->lir(); michael@0: JS_ASSERT(predecessor->mir()->numSuccessors() == 1); michael@0: michael@0: LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor()); michael@0: LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId())); michael@0: JS_ASSERT(from); michael@0: michael@0: if (!moveAtExit(predecessor, from, to, def->type())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Resolve split intervals with moves michael@0: BitSet *live = liveIn[mSuccessor->id()]; michael@0: michael@0: for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { michael@0: VirtualRegister ® = vregs[*liveRegId]; michael@0: michael@0: for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) { michael@0: LBlock *predecessor = mSuccessor->getPredecessor(j)->lir(); michael@0: michael@0: for (size_t k = 0; k < reg.numIntervals(); k++) { michael@0: LiveInterval *to = reg.getInterval(k); michael@0: if (!to->covers(inputOf(successor->firstId()))) michael@0: continue; michael@0: if (to->covers(outputOf(predecessor->lastId()))) michael@0: continue; michael@0: michael@0: LiveInterval *from = reg.intervalFor(outputOf(predecessor->lastId())); michael@0: michael@0: if (mSuccessor->numPredecessors() > 1) { michael@0: JS_ASSERT(predecessor->mir()->numSuccessors() == 1); michael@0: if (!moveAtExit(predecessor, from, to, reg.type())) michael@0: return false; michael@0: } else { michael@0: if (!moveAtEntry(successor, from, to, reg.type())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy) michael@0: { michael@0: if (LDefinition *def = FindReusingDefinition(ins, use)) michael@0: return considerCopy || !vregs[def->virtualRegister()].mustCopyInput(); michael@0: return false; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy) michael@0: { michael@0: switch (use->policy()) { michael@0: case LUse::ANY: michael@0: return isReusedInput(use, ins, considerCopy); michael@0: michael@0: case LUse::REGISTER: michael@0: case LUse::FIXED: michael@0: return true; michael@0: michael@0: default: michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::isRegisterDefinition(LiveInterval *interval) michael@0: { michael@0: if (interval->index() != 0) michael@0: return false; michael@0: michael@0: VirtualRegister ® = vregs[interval->vreg()]; michael@0: if (reg.ins()->isPhi()) michael@0: return false; michael@0: michael@0: if (reg.def()->policy() == LDefinition::PRESET && !reg.def()->output()->isRegister()) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::reifyAllocations() michael@0: { michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(vregs[0u].numIntervals() == 0); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: VirtualRegister *reg = &vregs[i]; michael@0: michael@0: if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) michael@0: return false; michael@0: michael@0: for (size_t j = 0; j < reg->numIntervals(); j++) { michael@0: LiveInterval *interval = reg->getInterval(j); michael@0: JS_ASSERT(interval->index() == j); michael@0: michael@0: if (interval->index() == 0) { michael@0: reg->def()->setOutput(*interval->getAllocation()); michael@0: if (reg->ins()->recoversInput()) { michael@0: LSnapshot *snapshot = reg->ins()->snapshot(); michael@0: for (size_t i = 0; i < snapshot->numEntries(); i++) { michael@0: LAllocation *entry = snapshot->getEntry(i); michael@0: if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT) michael@0: *entry = *reg->def()->output(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: for (UsePositionIterator iter(interval->usesBegin()); michael@0: iter != interval->usesEnd(); michael@0: iter++) michael@0: { michael@0: LAllocation *alloc = iter->use; michael@0: *alloc = *interval->getAllocation(); michael@0: michael@0: // For any uses which feed into MUST_REUSE_INPUT definitions, michael@0: // add copies if the use and def have different allocations. michael@0: LInstruction *ins = insData[iter->pos].ins(); michael@0: if (LDefinition *def = FindReusingDefinition(ins, alloc)) { michael@0: LiveInterval *outputInterval = michael@0: vregs[def->virtualRegister()].intervalFor(outputOf(ins)); michael@0: LAllocation *res = outputInterval->getAllocation(); michael@0: LAllocation *sourceAlloc = interval->getAllocation(); michael@0: michael@0: if (*res != *alloc) { michael@0: LMoveGroup *group = getInputMoveGroup(inputOf(ins)); michael@0: if (!group->addAfter(sourceAlloc, res, def->type())) michael@0: return false; michael@0: *alloc = *res; michael@0: } michael@0: } michael@0: } michael@0: michael@0: addLiveRegistersForInterval(reg, interval); michael@0: } michael@0: } michael@0: michael@0: graph.setLocalSlotCount(stackSlotAllocator.stackHeight()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::populateSafepoints() michael@0: { michael@0: size_t firstSafepoint = 0; michael@0: michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(!vregs[0u].def()); michael@0: for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) { michael@0: BacktrackingVirtualRegister *reg = &vregs[i]; michael@0: michael@0: if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) michael@0: continue; michael@0: michael@0: firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint); michael@0: if (firstSafepoint >= graph.numSafepoints()) michael@0: break; michael@0: michael@0: // Find the furthest endpoint. michael@0: CodePosition end = reg->getInterval(0)->end(); michael@0: for (size_t j = 1; j < reg->numIntervals(); j++) michael@0: end = Max(end, reg->getInterval(j)->end()); michael@0: michael@0: for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) { michael@0: LInstruction *ins = graph.getSafepoint(j); michael@0: michael@0: // Stop processing safepoints if we know we're out of this virtual michael@0: // register's range. michael@0: if (end < outputOf(ins)) michael@0: break; michael@0: michael@0: // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT michael@0: // is not used with gcthings or nunboxes, or we would have to add the input reg michael@0: // to this safepoint. michael@0: if (ins == reg->ins() && !reg->isTemp()) { michael@0: DebugOnly def = reg->def(); michael@0: JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT, michael@0: def->type() == LDefinition::GENERAL || michael@0: def->type() == LDefinition::INT32 || michael@0: def->type() == LDefinition::FLOAT32 || michael@0: def->type() == LDefinition::DOUBLE); michael@0: continue; michael@0: } michael@0: michael@0: LSafepoint *safepoint = ins->safepoint(); michael@0: michael@0: for (size_t k = 0; k < reg->numIntervals(); k++) { michael@0: LiveInterval *interval = reg->getInterval(k); michael@0: if (!interval->covers(inputOf(ins))) michael@0: continue; michael@0: michael@0: LAllocation *a = interval->getAllocation(); michael@0: if (a->isGeneralReg() && ins->isCall()) michael@0: continue; michael@0: michael@0: switch (reg->type()) { michael@0: case LDefinition::OBJECT: michael@0: safepoint->addGcPointer(*a); michael@0: break; michael@0: case LDefinition::SLOTS: michael@0: safepoint->addSlotsOrElementsPointer(*a); michael@0: break; michael@0: #ifdef JS_NUNBOX32 michael@0: case LDefinition::TYPE: michael@0: safepoint->addNunboxType(i, *a); michael@0: break; michael@0: case LDefinition::PAYLOAD: michael@0: safepoint->addNunboxPayload(i, *a); michael@0: break; michael@0: #else michael@0: case LDefinition::BOX: michael@0: safepoint->addBoxedValue(*a); michael@0: break; michael@0: #endif michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Bad register type"); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: BacktrackingAllocator::dumpRegisterGroups() michael@0: { michael@0: fprintf(stderr, "Register groups:\n"); michael@0: michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(!vregs[0u].group()); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: VirtualRegisterGroup *group = vregs[i].group(); michael@0: if (group && i == group->canonicalReg()) { michael@0: for (size_t j = 0; j < group->registers.length(); j++) michael@0: fprintf(stderr, " v%u", group->registers[j]); michael@0: fprintf(stderr, "\n"); michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: BacktrackingAllocator::dumpLiveness() michael@0: { michael@0: #ifdef DEBUG michael@0: fprintf(stderr, "Virtual Registers:\n"); michael@0: michael@0: for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { michael@0: LBlock *block = graph.getBlock(blockIndex); michael@0: MBasicBlock *mir = block->mir(); michael@0: michael@0: fprintf(stderr, "\nBlock %lu", static_cast(blockIndex)); michael@0: for (size_t i = 0; i < mir->numSuccessors(); i++) michael@0: fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); michael@0: fprintf(stderr, "\n"); michael@0: michael@0: for (size_t i = 0; i < block->numPhis(); i++) { michael@0: LPhi *phi = block->getPhi(i); michael@0: michael@0: // Don't print the inputOf for phi nodes, since it's never used. michael@0: fprintf(stderr, "[,%u Phi [def v%u] <-", michael@0: outputOf(phi).pos(), michael@0: phi->getDef(0)->virtualRegister()); michael@0: for (size_t j = 0; j < phi->numOperands(); j++) michael@0: fprintf(stderr, " %s", phi->getOperand(j)->toString()); michael@0: fprintf(stderr, "]\n"); michael@0: } michael@0: michael@0: for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { michael@0: LInstruction *ins = *iter; michael@0: michael@0: fprintf(stderr, "[%u,%u %s]", inputOf(ins).pos(), outputOf(ins).pos(), ins->opName()); michael@0: michael@0: for (size_t i = 0; i < ins->numTemps(); i++) { michael@0: LDefinition *temp = ins->getTemp(i); michael@0: if (!temp->isBogusTemp()) michael@0: fprintf(stderr, " [temp v%u]", temp->virtualRegister()); michael@0: } michael@0: michael@0: for (size_t i = 0; i < ins->numDefs(); i++) { michael@0: LDefinition *def = ins->getDef(i); michael@0: fprintf(stderr, " [def v%u]", def->virtualRegister()); michael@0: } michael@0: michael@0: for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) michael@0: fprintf(stderr, " [use %s]", alloc->toString()); michael@0: michael@0: fprintf(stderr, "\n"); michael@0: } michael@0: } michael@0: michael@0: fprintf(stderr, "\nLive Ranges:\n\n"); michael@0: michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) michael@0: if (registers[i].allocatable) michael@0: fprintf(stderr, "reg %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->rangesToString()); michael@0: michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(vregs[0u].numIntervals() == 0); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: fprintf(stderr, "v%lu:", static_cast(i)); michael@0: VirtualRegister &vreg = vregs[i]; michael@0: for (size_t j = 0; j < vreg.numIntervals(); j++) { michael@0: if (j) michael@0: fprintf(stderr, " *"); michael@0: fprintf(stderr, "%s", vreg.getInterval(j)->rangesToString()); michael@0: } michael@0: fprintf(stderr, "\n"); michael@0: } michael@0: michael@0: fprintf(stderr, "\n"); michael@0: #endif // DEBUG michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: struct BacktrackingAllocator::PrintLiveIntervalRange michael@0: { michael@0: void operator()(const AllocatedRange &item) michael@0: { michael@0: if (item.range == item.interval->getRange(0)) { michael@0: if (item.interval->hasVreg()) michael@0: fprintf(stderr, " v%u: %s\n", michael@0: item.interval->vreg(), michael@0: item.interval->rangesToString()); michael@0: else michael@0: fprintf(stderr, " fixed: %s\n", michael@0: item.interval->rangesToString()); michael@0: } michael@0: } michael@0: }; michael@0: #endif michael@0: michael@0: void michael@0: BacktrackingAllocator::dumpAllocations() michael@0: { michael@0: #ifdef DEBUG michael@0: fprintf(stderr, "Allocations:\n"); michael@0: michael@0: // Virtual register number 0 is unused. michael@0: JS_ASSERT(vregs[0u].numIntervals() == 0); michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: fprintf(stderr, "v%lu:", static_cast(i)); michael@0: VirtualRegister &vreg = vregs[i]; michael@0: for (size_t j = 0; j < vreg.numIntervals(); j++) { michael@0: if (j) michael@0: fprintf(stderr, " *"); michael@0: LiveInterval *interval = vreg.getInterval(j); michael@0: fprintf(stderr, "%s :: %s", interval->rangesToString(), interval->getAllocation()->toString()); michael@0: } michael@0: fprintf(stderr, "\n"); michael@0: } michael@0: michael@0: fprintf(stderr, "\n"); michael@0: michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) { michael@0: if (registers[i].allocatable) { michael@0: fprintf(stderr, "reg %s:\n", AnyRegister::FromCode(i).name()); michael@0: registers[i].allocations.forEach(PrintLiveIntervalRange()); michael@0: } michael@0: } michael@0: michael@0: fprintf(stderr, "\n"); michael@0: #endif // DEBUG michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg, michael@0: LiveInterval *spillInterval, michael@0: CodePosition from, CodePosition to) michael@0: { michael@0: LiveInterval *interval = LiveInterval::New(alloc(), vreg, 0); michael@0: interval->setSpillInterval(spillInterval); michael@0: return interval->addRange(from, to) && intervals.append(interval); michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: // Heuristic Methods michael@0: /////////////////////////////////////////////////////////////////////////////// michael@0: michael@0: size_t michael@0: BacktrackingAllocator::computePriority(const LiveInterval *interval) michael@0: { michael@0: // The priority of an interval is its total length, so that longer lived michael@0: // intervals will be processed before shorter ones (even if the longer ones michael@0: // have a low spill weight). See processInterval(). michael@0: size_t lifetimeTotal = 0; michael@0: michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: const LiveInterval::Range *range = interval->getRange(i); michael@0: lifetimeTotal += range->to.pos() - range->from.pos(); michael@0: } michael@0: michael@0: return lifetimeTotal; michael@0: } michael@0: michael@0: size_t michael@0: BacktrackingAllocator::computePriority(const VirtualRegisterGroup *group) michael@0: { michael@0: size_t priority = 0; michael@0: for (size_t j = 0; j < group->registers.length(); j++) { michael@0: uint32_t vreg = group->registers[j]; michael@0: priority += computePriority(vregs[vreg].getInterval(0)); michael@0: } michael@0: return priority; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::minimalDef(const LiveInterval *interval, LInstruction *ins) michael@0: { michael@0: // Whether interval is a minimal interval capturing a definition at ins. michael@0: return (interval->end() <= minimalDefEnd(ins).next()) && michael@0: ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins)); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *ins) michael@0: { michael@0: // Whether interval is a minimal interval capturing a use at ins. michael@0: return (interval->start() == inputOf(ins)) && michael@0: (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next()); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed) michael@0: { michael@0: if (!interval->hasVreg()) { michael@0: *pfixed = true; michael@0: return true; michael@0: } michael@0: michael@0: if (interval->index() == 0) { michael@0: VirtualRegister ® = vregs[interval->vreg()]; michael@0: if (pfixed) michael@0: *pfixed = reg.def()->policy() == LDefinition::PRESET && reg.def()->output()->isRegister(); michael@0: return minimalDef(interval, reg.ins()); michael@0: } michael@0: michael@0: bool fixed = false, minimal = false; michael@0: michael@0: for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { michael@0: LUse *use = iter->use; michael@0: michael@0: switch (use->policy()) { michael@0: case LUse::FIXED: michael@0: if (fixed) michael@0: return false; michael@0: fixed = true; michael@0: if (minimalUse(interval, insData[iter->pos].ins())) michael@0: minimal = true; michael@0: break; michael@0: michael@0: case LUse::REGISTER: michael@0: if (minimalUse(interval, insData[iter->pos].ins())) michael@0: minimal = true; michael@0: break; michael@0: michael@0: default: michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (pfixed) michael@0: *pfixed = fixed; michael@0: return minimal; michael@0: } michael@0: michael@0: size_t michael@0: BacktrackingAllocator::computeSpillWeight(const LiveInterval *interval) michael@0: { michael@0: // Minimal intervals have an extremely high spill weight, to ensure they michael@0: // can evict any other intervals and be allocated to a register. michael@0: bool fixed; michael@0: if (minimalInterval(interval, &fixed)) michael@0: return fixed ? 2000000 : 1000000; michael@0: michael@0: size_t usesTotal = 0; michael@0: michael@0: if (interval->index() == 0) { michael@0: VirtualRegister *reg = &vregs[interval->vreg()]; michael@0: if (reg->def()->policy() == LDefinition::PRESET && reg->def()->output()->isRegister()) michael@0: usesTotal += 2000; michael@0: else if (!reg->ins()->isPhi()) michael@0: usesTotal += 2000; michael@0: } michael@0: michael@0: for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { michael@0: LUse *use = iter->use; michael@0: michael@0: switch (use->policy()) { michael@0: case LUse::ANY: michael@0: usesTotal += 1000; michael@0: break; michael@0: michael@0: case LUse::REGISTER: michael@0: case LUse::FIXED: michael@0: usesTotal += 2000; michael@0: break; michael@0: michael@0: case LUse::KEEPALIVE: michael@0: break; michael@0: michael@0: default: michael@0: // Note: RECOVERED_INPUT will not appear in UsePositionIterator. michael@0: MOZ_ASSUME_UNREACHABLE("Bad use"); michael@0: } michael@0: } michael@0: michael@0: // Intervals for registers in groups get higher weights. michael@0: if (interval->hint()->kind() != Requirement::NONE) michael@0: usesTotal += 2000; michael@0: michael@0: // Compute spill weight as a use density, lowering the weight for long michael@0: // lived intervals with relatively few uses. michael@0: size_t lifetimeTotal = computePriority(interval); michael@0: return lifetimeTotal ? usesTotal / lifetimeTotal : 0; michael@0: } michael@0: michael@0: size_t michael@0: BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup *group) michael@0: { michael@0: size_t maxWeight = 0; michael@0: for (size_t j = 0; j < group->registers.length(); j++) { michael@0: uint32_t vreg = group->registers[j]; michael@0: maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0))); michael@0: } michael@0: return maxWeight; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *success) michael@0: { michael@0: // If this interval has portions that are hot and portions that are cold, michael@0: // split it at the boundaries between hot and cold code. michael@0: michael@0: const LiveInterval::Range *hotRange = nullptr; michael@0: michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: AllocatedRange range(interval, interval->getRange(i)), existing; michael@0: if (hotcode.contains(range, &existing)) { michael@0: hotRange = existing.range; michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // Don't split if there is no hot code in the interval. michael@0: if (!hotRange) michael@0: return true; michael@0: michael@0: // Don't split if there is no cold code in the interval. michael@0: bool coldCode = false; michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: if (!hotRange->contains(interval->getRange(i))) { michael@0: coldCode = true; michael@0: break; michael@0: } michael@0: } michael@0: if (!coldCode) michael@0: return true; michael@0: michael@0: SplitPositionVector splitPositions; michael@0: if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to)) michael@0: return false; michael@0: *success = true; michael@0: return splitAt(interval, splitPositions); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success) michael@0: { michael@0: // If this interval's later uses do not require it to be in a register, michael@0: // split it after the last use which does require a register. If conflict michael@0: // is specified, only consider register uses before the conflict starts. michael@0: michael@0: CodePosition lastRegisterFrom, lastRegisterTo, lastUse; michael@0: michael@0: for (UsePositionIterator iter(interval->usesBegin()); michael@0: iter != interval->usesEnd(); michael@0: iter++) michael@0: { michael@0: LUse *use = iter->use; michael@0: LInstruction *ins = insData[iter->pos].ins(); michael@0: michael@0: // Uses in the interval should be sorted. michael@0: JS_ASSERT(iter->pos >= lastUse); michael@0: lastUse = inputOf(ins); michael@0: michael@0: if (!conflict || outputOf(ins) < conflict->start()) { michael@0: if (isRegisterUse(use, ins, /* considerCopy = */ true)) { michael@0: lastRegisterFrom = inputOf(ins); michael@0: lastRegisterTo = iter->pos.next(); michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) { michael@0: // Can't trim non-register uses off the end by splitting. michael@0: return true; michael@0: } michael@0: michael@0: SplitPositionVector splitPositions; michael@0: if (!splitPositions.append(lastRegisterTo)) michael@0: return false; michael@0: *success = true; michael@0: return splitAt(interval, splitPositions); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval) michael@0: { michael@0: // Split this interval so that all its register uses become minimal michael@0: // intervals and allow the vreg to be spilled throughout its range. michael@0: michael@0: LiveIntervalVector newIntervals; michael@0: uint32_t vreg = interval->vreg(); michael@0: michael@0: // If this LiveInterval is the result of an earlier split which created a michael@0: // spill interval, that spill interval covers the whole range, so we don't michael@0: // need to create a new one. michael@0: bool spillIntervalIsNew = false; michael@0: LiveInterval *spillInterval = interval->spillInterval(); michael@0: if (!spillInterval) { michael@0: spillInterval = LiveInterval::New(alloc(), vreg, 0); michael@0: spillIntervalIsNew = true; michael@0: } michael@0: michael@0: CodePosition spillStart = interval->start(); michael@0: if (isRegisterDefinition(interval)) { michael@0: // Treat the definition of the interval as a register use so that it michael@0: // can be split and spilled ASAP. michael@0: CodePosition from = interval->start(); michael@0: CodePosition to = minimalDefEnd(insData[from].ins()).next(); michael@0: if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to)) michael@0: return false; michael@0: spillStart = to; michael@0: } michael@0: michael@0: if (spillIntervalIsNew) { michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: const LiveInterval::Range *range = interval->getRange(i); michael@0: CodePosition from = range->from < spillStart ? spillStart : range->from; michael@0: if (!spillInterval->addRange(from, range->to)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: for (UsePositionIterator iter(interval->usesBegin()); michael@0: iter != interval->usesEnd(); michael@0: iter++) michael@0: { michael@0: LInstruction *ins = insData[iter->pos].ins(); michael@0: if (iter->pos < spillStart) { michael@0: newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: } else if (isRegisterUse(iter->use, ins)) { michael@0: // For register uses which are not useRegisterAtStart, pick an michael@0: // interval that covers both the instruction's input and output, so michael@0: // that the register is not reused for an output. michael@0: CodePosition from = inputOf(ins); michael@0: CodePosition to = iter->pos.next(); michael@0: michael@0: // Use the same interval for duplicate use positions, except when michael@0: // the uses are fixed (they may require incompatible registers). michael@0: if (newIntervals.empty() || newIntervals.back()->end() != to || iter->use->policy() == LUse::FIXED) { michael@0: if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to)) michael@0: return false; michael@0: } michael@0: michael@0: newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: } else { michael@0: JS_ASSERT(spillIntervalIsNew); michael@0: spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: } michael@0: } michael@0: michael@0: if (spillIntervalIsNew && !newIntervals.append(spillInterval)) michael@0: return false; michael@0: michael@0: return split(interval, newIntervals) && requeueIntervals(newIntervals); michael@0: } michael@0: michael@0: // Find the next split position after the current position. michael@0: static size_t NextSplitPosition(size_t activeSplitPosition, michael@0: const SplitPositionVector &splitPositions, michael@0: CodePosition currentPos) michael@0: { michael@0: while (activeSplitPosition < splitPositions.length() && michael@0: splitPositions[activeSplitPosition] <= currentPos) michael@0: { michael@0: ++activeSplitPosition; michael@0: } michael@0: return activeSplitPosition; michael@0: } michael@0: michael@0: // Test whether the current position has just crossed a split point. michael@0: static bool SplitHere(size_t activeSplitPosition, michael@0: const SplitPositionVector &splitPositions, michael@0: CodePosition currentPos) michael@0: { michael@0: return activeSplitPosition < splitPositions.length() && michael@0: currentPos >= splitPositions[activeSplitPosition]; michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::splitAt(LiveInterval *interval, michael@0: const SplitPositionVector &splitPositions) michael@0: { michael@0: // Split the interval at the given split points. Unlike splitAtAllRegisterUses, michael@0: // consolidate any register uses which have no intervening split points into the michael@0: // same resulting interval. michael@0: michael@0: // splitPositions should be non-empty and sorted. michael@0: JS_ASSERT(!splitPositions.empty()); michael@0: for (size_t i = 1; i < splitPositions.length(); ++i) michael@0: JS_ASSERT(splitPositions[i-1] < splitPositions[i]); michael@0: michael@0: // Don't spill the interval until after the end of its definition. michael@0: CodePosition spillStart = interval->start(); michael@0: if (isRegisterDefinition(interval)) michael@0: spillStart = minimalDefEnd(insData[interval->start()].ins()).next(); michael@0: michael@0: uint32_t vreg = interval->vreg(); michael@0: michael@0: // If this LiveInterval is the result of an earlier split which created a michael@0: // spill interval, that spill interval covers the whole range, so we don't michael@0: // need to create a new one. michael@0: bool spillIntervalIsNew = false; michael@0: LiveInterval *spillInterval = interval->spillInterval(); michael@0: if (!spillInterval) { michael@0: spillInterval = LiveInterval::New(alloc(), vreg, 0); michael@0: spillIntervalIsNew = true; michael@0: michael@0: for (size_t i = 0; i < interval->numRanges(); i++) { michael@0: const LiveInterval::Range *range = interval->getRange(i); michael@0: CodePosition from = range->from < spillStart ? spillStart : range->from; michael@0: if (!spillInterval->addRange(from, range->to)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: LiveIntervalVector newIntervals; michael@0: michael@0: CodePosition lastRegisterUse; michael@0: if (spillStart != interval->start()) { michael@0: LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0); michael@0: newInterval->setSpillInterval(spillInterval); michael@0: if (!newIntervals.append(newInterval)) michael@0: return false; michael@0: lastRegisterUse = interval->start(); michael@0: } michael@0: michael@0: size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start()); michael@0: for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) { michael@0: LInstruction *ins = insData[iter->pos].ins(); michael@0: if (iter->pos < spillStart) { michael@0: newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos); michael@0: } else if (isRegisterUse(iter->use, ins)) { michael@0: if (lastRegisterUse.pos() == 0 || michael@0: SplitHere(activeSplitPosition, splitPositions, iter->pos)) michael@0: { michael@0: // Place this register use into a different interval from the michael@0: // last one if there are any split points between the two uses. michael@0: LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0); michael@0: newInterval->setSpillInterval(spillInterval); michael@0: if (!newIntervals.append(newInterval)) michael@0: return false; michael@0: activeSplitPosition = NextSplitPosition(activeSplitPosition, michael@0: splitPositions, michael@0: iter->pos); michael@0: } michael@0: newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: lastRegisterUse = iter->pos; michael@0: } else { michael@0: JS_ASSERT(spillIntervalIsNew); michael@0: spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); michael@0: } michael@0: } michael@0: michael@0: // Compute ranges for each new interval that cover all its uses. michael@0: size_t activeRange = interval->numRanges(); michael@0: for (size_t i = 0; i < newIntervals.length(); i++) { michael@0: LiveInterval *newInterval = newIntervals[i]; michael@0: CodePosition start, end; michael@0: if (i == 0 && spillStart != interval->start()) { michael@0: start = interval->start(); michael@0: if (newInterval->usesEmpty()) michael@0: end = spillStart; michael@0: else michael@0: end = newInterval->usesBack()->pos.next(); michael@0: } else { michael@0: start = inputOf(insData[newInterval->usesBegin()->pos].ins()); michael@0: end = newInterval->usesBack()->pos.next(); michael@0: } michael@0: for (; activeRange > 0; --activeRange) { michael@0: const LiveInterval::Range *range = interval->getRange(activeRange - 1); michael@0: if (range->to <= start) michael@0: continue; michael@0: if (range->from >= end) michael@0: break; michael@0: if (!newInterval->addRange(Max(range->from, start), michael@0: Min(range->to, end))) michael@0: return false; michael@0: if (range->to >= end) michael@0: break; michael@0: } michael@0: } michael@0: michael@0: if (spillIntervalIsNew && !newIntervals.append(spillInterval)) michael@0: return false; michael@0: michael@0: return split(interval, newIntervals) && requeueIntervals(newIntervals); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::splitAcrossCalls(LiveInterval *interval) michael@0: { michael@0: // Split the interval to separate register uses and non-register uses and michael@0: // allow the vreg to be spilled across its range. michael@0: michael@0: // Find the locations of all calls in the interval's range. Fixed intervals michael@0: // are introduced by buildLivenessInfo only for calls when allocating for michael@0: // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so michael@0: // iterate through it backwards. michael@0: SplitPositionVector callPositions; michael@0: for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) { michael@0: const LiveInterval::Range *range = fixedIntervalsUnion->getRange(i - 1); michael@0: if (interval->covers(range->from) && interval->covers(range->from.previous())) { michael@0: if (!callPositions.append(range->from)) michael@0: return false; michael@0: } michael@0: } michael@0: JS_ASSERT(callPositions.length()); michael@0: michael@0: return splitAt(interval, callPositions); michael@0: } michael@0: michael@0: bool michael@0: BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict) michael@0: { michael@0: bool success = false; michael@0: michael@0: if (!trySplitAcrossHotcode(interval, &success)) michael@0: return false; michael@0: if (success) michael@0: return true; michael@0: michael@0: if (!trySplitAfterLastRegisterUse(interval, conflict, &success)) michael@0: return false; michael@0: if (success) michael@0: return true; michael@0: michael@0: return splitAtAllRegisterUses(interval); michael@0: }