michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/LinearScan.h" michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: michael@0: #include "jit/BitSet.h" michael@0: #include "jit/IonSpewer.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::DebugOnly; michael@0: michael@0: /* michael@0: * Merge virtual register intervals into the UnhandledQueue, taking advantage michael@0: * of their nearly-sorted ordering. michael@0: */ michael@0: void michael@0: LinearScanAllocator::enqueueVirtualRegisterIntervals() michael@0: { michael@0: // Cursor into the unhandled queue, iterating through start positions. michael@0: IntervalReverseIterator curr = unhandled.rbegin(); michael@0: michael@0: // Start position is non-monotonically increasing by virtual register number. michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: LiveInterval *live = vregs[i].getInterval(0); michael@0: if (live->numRanges() > 0) { michael@0: setIntervalRequirement(live); michael@0: michael@0: // Iterate backward until the next highest class of start position. michael@0: for (; curr != unhandled.rend(); curr++) { michael@0: if (curr->start() > live->start()) michael@0: break; michael@0: } michael@0: michael@0: // Insert forward from the current position, thereby michael@0: // sorting by priority within the start class. michael@0: unhandled.enqueueForward(*curr, live); michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * This function performs a preliminary allocation on the already-computed michael@0: * live intervals, storing the result in the allocation field of the intervals michael@0: * themselves. michael@0: * michael@0: * The algorithm is based on the one published in: michael@0: * michael@0: * Wimmer, Christian, and Hanspeter Mössenböck. "Optimized Interval Splitting michael@0: * in a Linear Scan Register Allocator." Proceedings of the First michael@0: * ACM/USENIX Conference on Virtual Execution Environments. Chicago, IL, michael@0: * USA, ACM. 2005. PDF. michael@0: * michael@0: * The algorithm proceeds over each interval in order of their start position. michael@0: * If a free register is available, the register that is free for the largest michael@0: * portion of the current interval is allocated. Otherwise, the interval michael@0: * with the farthest-away next use is spilled to make room for this one. In all michael@0: * cases, intervals which conflict either with other intervals or with michael@0: * use or definition constraints are split at the point of conflict to be michael@0: * assigned a compatible allocation later. michael@0: */ michael@0: bool michael@0: LinearScanAllocator::allocateRegisters() michael@0: { michael@0: // The unhandled queue currently contains only spill intervals, in sorted michael@0: // order. Intervals for virtual registers exist in sorted order based on michael@0: // start position by vreg ID, but may have priorities that require them to michael@0: // be reordered when adding to the unhandled queue. michael@0: enqueueVirtualRegisterIntervals(); michael@0: unhandled.assertSorted(); michael@0: michael@0: // Add fixed intervals with ranges to fixed. michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) { michael@0: if (fixedIntervals[i]->numRanges() > 0) michael@0: fixed.pushBack(fixedIntervals[i]); michael@0: } michael@0: michael@0: // Iterate through all intervals in ascending start order. michael@0: CodePosition prevPosition = CodePosition::MIN; michael@0: while ((current = unhandled.dequeue()) != nullptr) { michael@0: JS_ASSERT(current->getAllocation()->isUse()); michael@0: JS_ASSERT(current->numRanges() > 0); michael@0: michael@0: if (mir->shouldCancel("LSRA Allocate Registers (main loop)")) michael@0: return false; michael@0: michael@0: CodePosition position = current->start(); michael@0: const Requirement *req = current->requirement(); michael@0: const Requirement *hint = current->hint(); michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)", michael@0: current->hasVreg() ? current->vreg() : 0, current->start().pos(), michael@0: current->end().pos(), current->requirement()->priority()); michael@0: michael@0: // Shift active intervals to the inactive or handled sets as appropriate michael@0: if (position != prevPosition) { michael@0: JS_ASSERT(position > prevPosition); michael@0: prevPosition = position; michael@0: michael@0: for (IntervalIterator i(active.begin()); i != active.end(); ) { michael@0: LiveInterval *it = *i; michael@0: JS_ASSERT(it->numRanges() > 0); michael@0: michael@0: if (it->end() <= position) { michael@0: i = active.removeAt(i); michael@0: finishInterval(it); michael@0: } else if (!it->covers(position)) { michael@0: i = active.removeAt(i); michael@0: inactive.pushBack(it); michael@0: } else { michael@0: i++; michael@0: } michael@0: } michael@0: michael@0: // Shift inactive intervals to the active or handled sets as appropriate michael@0: for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) { michael@0: LiveInterval *it = *i; michael@0: JS_ASSERT(it->numRanges() > 0); michael@0: michael@0: if (it->end() <= position) { michael@0: i = inactive.removeAt(i); michael@0: finishInterval(it); michael@0: } else if (it->covers(position)) { michael@0: i = inactive.removeAt(i); michael@0: active.pushBack(it); michael@0: } else { michael@0: i++; michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Sanity check all intervals in all sets michael@0: validateIntervals(); michael@0: michael@0: // If the interval has a hard requirement, grant it. michael@0: if (req->kind() == Requirement::FIXED) { michael@0: JS_ASSERT(!req->allocation().isRegister()); michael@0: if (!assign(req->allocation())) michael@0: return false; michael@0: continue; michael@0: } michael@0: michael@0: // If we don't really need this in a register, don't allocate one michael@0: if (req->kind() != Requirement::REGISTER && hint->kind() == Requirement::NONE) { michael@0: IonSpew(IonSpew_RegAlloc, " Eagerly spilling virtual register %d", michael@0: current->hasVreg() ? current->vreg() : 0); michael@0: if (!spill()) michael@0: return false; michael@0: continue; michael@0: } michael@0: michael@0: // Try to allocate a free register michael@0: IonSpew(IonSpew_RegAlloc, " Attempting free register allocation"); michael@0: CodePosition bestFreeUntil; michael@0: AnyRegister::Code bestCode = findBestFreeRegister(&bestFreeUntil); michael@0: if (bestCode != AnyRegister::Invalid) { michael@0: AnyRegister best = AnyRegister::FromCode(bestCode); michael@0: IonSpew(IonSpew_RegAlloc, " Decided best register was %s", best.name()); michael@0: michael@0: // Split when the register is next needed if necessary michael@0: if (bestFreeUntil < current->end()) { michael@0: if (!splitInterval(current, bestFreeUntil)) michael@0: return false; michael@0: } michael@0: if (!assign(LAllocation(best))) michael@0: return false; michael@0: michael@0: continue; michael@0: } michael@0: michael@0: IonSpew(IonSpew_RegAlloc, " Attempting blocked register allocation"); michael@0: michael@0: // If we absolutely need a register or our next use is closer than the michael@0: // selected blocking register then we spill the blocker. Otherwise, we michael@0: // spill the current interval. michael@0: CodePosition bestNextUsed; michael@0: bestCode = findBestBlockedRegister(&bestNextUsed); michael@0: if (bestCode != AnyRegister::Invalid && michael@0: (req->kind() == Requirement::REGISTER || hint->pos() < bestNextUsed)) michael@0: { michael@0: AnyRegister best = AnyRegister::FromCode(bestCode); michael@0: IonSpew(IonSpew_RegAlloc, " Decided best register was %s", best.name()); michael@0: michael@0: if (!splitBlockingIntervals(LAllocation(best))) michael@0: return false; michael@0: if (!assign(LAllocation(best))) michael@0: return false; michael@0: michael@0: continue; michael@0: } michael@0: michael@0: IonSpew(IonSpew_RegAlloc, " No registers available to spill"); michael@0: JS_ASSERT(req->kind() == Requirement::NONE); michael@0: michael@0: if (!spill()) michael@0: return false; michael@0: } michael@0: michael@0: validateAllocations(); michael@0: validateVirtualRegisters(); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: /* michael@0: * This function iterates over control flow edges in the function and resolves michael@0: * conflicts wherein two predecessors of a block have different allocations michael@0: * for a virtual register than the block itself. It also turns phis into moves. michael@0: * michael@0: * The algorithm is based on the one published in "Linear Scan Register michael@0: * Allocation on SSA Form" by C. Wimmer et al., for which the full citation michael@0: * appears above. michael@0: */ michael@0: bool michael@0: LinearScanAllocator::resolveControlFlow() michael@0: { michael@0: for (size_t i = 0; i < graph.numBlocks(); i++) { michael@0: if (mir->shouldCancel("LSRA Resolve Control Flow (main loop)")) michael@0: return false; michael@0: michael@0: LBlock *successor = graph.getBlock(i); michael@0: MBasicBlock *mSuccessor = successor->mir(); michael@0: if (mSuccessor->numPredecessors() < 1) michael@0: continue; michael@0: michael@0: // Resolve phis to moves michael@0: for (size_t j = 0; j < successor->numPhis(); j++) { michael@0: LPhi *phi = successor->getPhi(j); michael@0: JS_ASSERT(phi->numDefs() == 1); michael@0: LDefinition *def = phi->getDef(0); michael@0: LinearScanVirtualRegister *vreg = &vregs[def]; michael@0: LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId())); michael@0: JS_ASSERT(to); michael@0: michael@0: for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) { michael@0: LBlock *predecessor = mSuccessor->getPredecessor(k)->lir(); michael@0: JS_ASSERT(predecessor->mir()->numSuccessors() == 1); michael@0: michael@0: LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor()); michael@0: LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId())); michael@0: JS_ASSERT(from); michael@0: michael@0: if (!moveAtExit(predecessor, from, to, def->type())) michael@0: return false; michael@0: } michael@0: michael@0: if (vreg->mustSpillAtDefinition() && !to->isSpill()) { michael@0: // Make sure this phi is spilled at the loop header. michael@0: LMoveGroup *moves = successor->getEntryMoveGroup(alloc()); michael@0: if (!moves->add(to->getAllocation(), vregs[to->vreg()].canonicalSpill(), michael@0: def->type())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Resolve split intervals with moves michael@0: BitSet *live = liveIn[mSuccessor->id()]; michael@0: michael@0: for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { michael@0: LinearScanVirtualRegister *vreg = &vregs[*liveRegId]; michael@0: LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId())); michael@0: JS_ASSERT(to); michael@0: michael@0: for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) { michael@0: LBlock *predecessor = mSuccessor->getPredecessor(j)->lir(); michael@0: LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId())); michael@0: JS_ASSERT(from); michael@0: michael@0: if (*from->getAllocation() == *to->getAllocation()) michael@0: continue; michael@0: michael@0: // If this value is spilled at its definition, other stores michael@0: // are redundant. michael@0: if (vreg->mustSpillAtDefinition() && to->getAllocation()->isStackSlot()) { michael@0: JS_ASSERT(vreg->canonicalSpill()); michael@0: JS_ASSERT(*vreg->canonicalSpill() == *to->getAllocation()); michael@0: continue; michael@0: } michael@0: michael@0: if (mSuccessor->numPredecessors() > 1) { michael@0: JS_ASSERT(predecessor->mir()->numSuccessors() == 1); michael@0: if (!moveAtExit(predecessor, from, to, vreg->type())) michael@0: return false; michael@0: } else { michael@0: if (!moveAtEntry(successor, from, to, vreg->type())) michael@0: return false; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: LinearScanAllocator::moveInputAlloc(CodePosition pos, LAllocation *from, LAllocation *to, michael@0: LDefinition::Type type) michael@0: { michael@0: if (*from == *to) michael@0: return true; michael@0: LMoveGroup *moves = getInputMoveGroup(pos); michael@0: return moves->add(from, to, type); michael@0: } michael@0: michael@0: static inline void michael@0: SetOsiPointUses(LiveInterval *interval, CodePosition defEnd, const LAllocation &allocation) michael@0: { michael@0: // Moves are inserted after OsiPoint instructions. This function sets michael@0: // any OsiPoint uses of this interval to the allocation of the value michael@0: // before the move. michael@0: michael@0: JS_ASSERT(interval->index() == 0); michael@0: michael@0: for (UsePositionIterator usePos(interval->usesBegin()); michael@0: usePos != interval->usesEnd(); michael@0: usePos++) michael@0: { michael@0: if (usePos->pos > defEnd) michael@0: break; michael@0: *static_cast(usePos->use) = allocation; michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * This function takes the allocations assigned to the live intervals and michael@0: * erases all virtual registers in the function with the allocations michael@0: * corresponding to the appropriate interval. michael@0: */ michael@0: bool michael@0: LinearScanAllocator::reifyAllocations() michael@0: { michael@0: // Iterate over each interval, ensuring that definitions are visited before uses. michael@0: for (size_t j = 1; j < graph.numVirtualRegisters(); j++) { michael@0: LinearScanVirtualRegister *reg = &vregs[j]; michael@0: if (mir->shouldCancel("LSRA Reification (main loop)")) michael@0: return false; michael@0: michael@0: for (size_t k = 0; k < reg->numIntervals(); k++) { michael@0: LiveInterval *interval = reg->getInterval(k); michael@0: JS_ASSERT(reg == &vregs[interval->vreg()]); michael@0: if (!interval->numRanges()) michael@0: continue; michael@0: michael@0: UsePositionIterator usePos(interval->usesBegin()); michael@0: for (; usePos != interval->usesEnd(); usePos++) { michael@0: if (usePos->use->isFixedRegister()) { michael@0: LiveInterval *to = fixedIntervals[GetFixedRegister(reg->def(), usePos->use).code()]; michael@0: michael@0: *static_cast(usePos->use) = *to->getAllocation(); michael@0: if (!moveInput(usePos->pos, interval, to, reg->type())) michael@0: return false; michael@0: } else { michael@0: JS_ASSERT(UseCompatibleWith(usePos->use, *interval->getAllocation())); michael@0: *static_cast(usePos->use) = *interval->getAllocation(); michael@0: } michael@0: } michael@0: michael@0: // Erase the def of this interval if it's the first one michael@0: if (interval->index() == 0) michael@0: { michael@0: LDefinition *def = reg->def(); michael@0: LAllocation *spillFrom; michael@0: michael@0: // Insert the moves after any OsiPoint or Nop instructions michael@0: // following this one. See minimalDefEnd for more info. michael@0: CodePosition defEnd = minimalDefEnd(reg->ins()); michael@0: michael@0: if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) { michael@0: AnyRegister fixedReg = def->output()->toRegister(); michael@0: LiveInterval *from = fixedIntervals[fixedReg.code()]; michael@0: michael@0: // If we insert the move after an OsiPoint that uses this vreg, michael@0: // it should use the fixed register instead. michael@0: SetOsiPointUses(interval, defEnd, LAllocation(fixedReg)); michael@0: michael@0: if (!moveAfter(defEnd, from, interval, def->type())) michael@0: return false; michael@0: spillFrom = from->getAllocation(); michael@0: } else { michael@0: if (def->policy() == LDefinition::MUST_REUSE_INPUT) { michael@0: LAllocation *inputAlloc = reg->ins()->getOperand(def->getReusedInput()); michael@0: LAllocation *origAlloc = LAllocation::New(alloc(), *inputAlloc); michael@0: michael@0: JS_ASSERT(!inputAlloc->isUse()); michael@0: michael@0: *inputAlloc = *interval->getAllocation(); michael@0: if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, inputAlloc, def->type())) michael@0: return false; michael@0: } michael@0: michael@0: JS_ASSERT(DefinitionCompatibleWith(reg->ins(), def, *interval->getAllocation())); michael@0: def->setOutput(*interval->getAllocation()); michael@0: michael@0: spillFrom = interval->getAllocation(); michael@0: } michael@0: michael@0: if (reg->ins()->recoversInput()) { michael@0: LSnapshot *snapshot = reg->ins()->snapshot(); michael@0: for (size_t i = 0; i < snapshot->numEntries(); i++) { michael@0: LAllocation *entry = snapshot->getEntry(i); michael@0: if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT) michael@0: *entry = *def->output(); michael@0: } michael@0: } michael@0: michael@0: if (reg->mustSpillAtDefinition() && !reg->ins()->isPhi() && michael@0: (*reg->canonicalSpill() != *spillFrom)) michael@0: { michael@0: // If we move the spill after an OsiPoint, the OsiPoint should michael@0: // use the original location instead. michael@0: SetOsiPointUses(interval, defEnd, *spillFrom); michael@0: michael@0: // Insert a spill after this instruction (or after any OsiPoint michael@0: // or Nop instructions). Note that we explicitly ignore phis, michael@0: // which should have been handled in resolveControlFlow(). michael@0: LMoveGroup *moves = getMoveGroupAfter(defEnd); michael@0: if (!moves->add(spillFrom, reg->canonicalSpill(), def->type())) michael@0: return false; michael@0: } michael@0: } michael@0: else if (interval->start() > inputOf(insData[interval->start()].block()->firstId()) && michael@0: (!reg->canonicalSpill() || michael@0: (reg->canonicalSpill() == interval->getAllocation() && michael@0: !reg->mustSpillAtDefinition()) || michael@0: *reg->canonicalSpill() != *interval->getAllocation())) michael@0: { michael@0: // If this virtual register has no canonical spill location, this michael@0: // is the first spill to that location, or this is a move to somewhere michael@0: // completely different, we have to emit a move for this interval. michael@0: // Don't do this if the interval starts at the first instruction of the michael@0: // block; this case should have been handled by resolveControlFlow(). michael@0: // michael@0: // If the interval starts at the output half of an instruction, we have to michael@0: // emit the move *after* this instruction, to prevent clobbering an input michael@0: // register. michael@0: LiveInterval *prevInterval = reg->getInterval(interval->index() - 1); michael@0: CodePosition start = interval->start(); michael@0: InstructionData *data = &insData[start]; michael@0: michael@0: JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins())); michael@0: michael@0: if (start.subpos() == CodePosition::INPUT) { michael@0: if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type())) michael@0: return false; michael@0: } else { michael@0: if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type())) michael@0: return false; michael@0: } michael@0: michael@0: // Mark this interval's spill position, if needed. michael@0: if (reg->canonicalSpill() == interval->getAllocation() && michael@0: !reg->mustSpillAtDefinition()) michael@0: { michael@0: reg->setSpillPosition(interval->start()); michael@0: } michael@0: } michael@0: michael@0: addLiveRegistersForInterval(reg, interval); michael@0: }} // Iteration over virtual register intervals. michael@0: michael@0: // Set the graph overall stack height michael@0: graph.setLocalSlotCount(stackSlotAllocator.stackHeight()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: inline bool michael@0: LinearScanAllocator::isSpilledAt(LiveInterval *interval, CodePosition pos) michael@0: { michael@0: LinearScanVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: if (!reg->canonicalSpill() || !reg->canonicalSpill()->isStackSlot()) michael@0: return false; michael@0: michael@0: if (reg->mustSpillAtDefinition()) { michael@0: JS_ASSERT(reg->spillPosition() <= pos); michael@0: return true; michael@0: } michael@0: michael@0: return interval->getAllocation() == reg->canonicalSpill(); michael@0: } michael@0: michael@0: bool michael@0: LinearScanAllocator::populateSafepoints() michael@0: { michael@0: size_t firstSafepoint = 0; michael@0: michael@0: for (uint32_t i = 0; i < vregs.numVirtualRegisters(); i++) { michael@0: LinearScanVirtualRegister *reg = &vregs[i]; michael@0: michael@0: if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) michael@0: continue; michael@0: michael@0: firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint); michael@0: if (firstSafepoint >= graph.numSafepoints()) michael@0: break; michael@0: michael@0: // Find the furthest endpoint. michael@0: size_t lastInterval = reg->numIntervals() - 1; michael@0: CodePosition end = reg->getInterval(lastInterval)->end(); michael@0: michael@0: for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) { michael@0: LInstruction *ins = graph.getSafepoint(j); michael@0: michael@0: // Stop processing safepoints if we know we're out of this virtual michael@0: // register's range. michael@0: if (end < inputOf(ins)) michael@0: break; michael@0: michael@0: // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT michael@0: // is not used with gcthings or nunboxes, or we would have to add the input reg michael@0: // to this safepoint. michael@0: if (ins == reg->ins() && !reg->isTemp()) { michael@0: DebugOnly def = reg->def(); michael@0: JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT, michael@0: def->type() == LDefinition::GENERAL || michael@0: def->type() == LDefinition::INT32 || michael@0: def->type() == LDefinition::FLOAT32 || michael@0: def->type() == LDefinition::DOUBLE); michael@0: continue; michael@0: } michael@0: michael@0: LSafepoint *safepoint = ins->safepoint(); michael@0: michael@0: if (IsSlotsOrElements(reg)) { michael@0: LiveInterval *interval = reg->intervalFor(inputOf(ins)); michael@0: if (!interval) michael@0: continue; michael@0: michael@0: LAllocation *a = interval->getAllocation(); michael@0: if (a->isGeneralReg() && !ins->isCall()) michael@0: safepoint->addSlotsOrElementsRegister(a->toGeneralReg()->reg()); michael@0: michael@0: if (isSpilledAt(interval, inputOf(ins))) { michael@0: if (!safepoint->addSlotsOrElementsSlot(reg->canonicalSpillSlot())) michael@0: return false; michael@0: } michael@0: } else if (!IsNunbox(reg)) { michael@0: JS_ASSERT(IsTraceable(reg)); michael@0: michael@0: LiveInterval *interval = reg->intervalFor(inputOf(ins)); michael@0: if (!interval) michael@0: continue; michael@0: michael@0: LAllocation *a = interval->getAllocation(); michael@0: if (a->isGeneralReg() && !ins->isCall()) { michael@0: #ifdef JS_PUNBOX64 michael@0: if (reg->type() == LDefinition::BOX) { michael@0: safepoint->addValueRegister(a->toGeneralReg()->reg()); michael@0: } else michael@0: #endif michael@0: { michael@0: safepoint->addGcRegister(a->toGeneralReg()->reg()); michael@0: } michael@0: } michael@0: michael@0: if (isSpilledAt(interval, inputOf(ins))) { michael@0: #ifdef JS_PUNBOX64 michael@0: if (reg->type() == LDefinition::BOX) { michael@0: if (!safepoint->addValueSlot(reg->canonicalSpillSlot())) michael@0: return false; michael@0: } else michael@0: #endif michael@0: { michael@0: if (!safepoint->addGcSlot(reg->canonicalSpillSlot())) michael@0: return false; michael@0: } michael@0: } michael@0: #ifdef JS_NUNBOX32 michael@0: } else { michael@0: LinearScanVirtualRegister *other = otherHalfOfNunbox(reg); michael@0: LinearScanVirtualRegister *type = (reg->type() == LDefinition::TYPE) ? reg : other; michael@0: LinearScanVirtualRegister *payload = (reg->type() == LDefinition::PAYLOAD) ? reg : other; michael@0: LiveInterval *typeInterval = type->intervalFor(inputOf(ins)); michael@0: LiveInterval *payloadInterval = payload->intervalFor(inputOf(ins)); michael@0: michael@0: if (!typeInterval && !payloadInterval) michael@0: continue; michael@0: michael@0: LAllocation *typeAlloc = typeInterval->getAllocation(); michael@0: LAllocation *payloadAlloc = payloadInterval->getAllocation(); michael@0: michael@0: // If the payload is an argument, we'll scan that explicitly as michael@0: // part of the frame. It is therefore safe to not add any michael@0: // safepoint entry, as long as the vreg does not have a stack michael@0: // slot as canonical spill slot. michael@0: if (payloadAlloc->isArgument() && michael@0: (!payload->canonicalSpill() || payload->canonicalSpill() == payloadAlloc)) michael@0: { michael@0: continue; michael@0: } michael@0: michael@0: if (isSpilledAt(typeInterval, inputOf(ins)) && michael@0: isSpilledAt(payloadInterval, inputOf(ins))) michael@0: { michael@0: // These two components of the Value are spilled michael@0: // contiguously, so simply keep track of the base slot. michael@0: uint32_t payloadSlot = payload->canonicalSpillSlot(); michael@0: uint32_t slot = BaseOfNunboxSlot(LDefinition::PAYLOAD, payloadSlot); michael@0: if (!safepoint->addValueSlot(slot)) michael@0: return false; michael@0: } michael@0: michael@0: if (!ins->isCall() && michael@0: (!isSpilledAt(typeInterval, inputOf(ins)) || payloadAlloc->isGeneralReg())) michael@0: { michael@0: // Either the payload is on the stack but the type is michael@0: // in a register, or the payload is in a register. In michael@0: // both cases, we don't have a contiguous spill so we michael@0: // add a torn entry. michael@0: if (!safepoint->addNunboxParts(*typeAlloc, *payloadAlloc)) michael@0: return false; michael@0: michael@0: // If the nunbox is stored in multiple places, we need to michael@0: // trace all of them to allow the GC to relocate objects. michael@0: if (payloadAlloc->isGeneralReg() && isSpilledAt(payloadInterval, inputOf(ins))) { michael@0: if (!safepoint->addNunboxParts(*typeAlloc, *payload->canonicalSpill())) michael@0: return false; michael@0: } michael@0: } michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: #ifdef JS_NUNBOX32 michael@0: if (IsNunbox(reg)) { michael@0: // Skip past the next half of this nunbox so we don't track the michael@0: // same slot twice. michael@0: JS_ASSERT(&vregs[reg->def()->virtualRegister() + 1] == otherHalfOfNunbox(reg)); michael@0: i++; michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: /* michael@0: * Split the given interval at the given position, and add the created michael@0: * interval to the unhandled queue. michael@0: */ michael@0: bool michael@0: LinearScanAllocator::splitInterval(LiveInterval *interval, CodePosition pos) michael@0: { michael@0: // Make sure we're actually splitting this interval, not some other michael@0: // interval in the same virtual register. michael@0: JS_ASSERT(interval->start() < pos && pos < interval->end()); michael@0: michael@0: LinearScanVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: michael@0: // "Bogus" intervals cannot be split. michael@0: JS_ASSERT(reg); michael@0: michael@0: // Do the split. michael@0: LiveInterval *newInterval = LiveInterval::New(alloc(), interval->vreg(), interval->index() + 1); michael@0: if (!interval->splitFrom(pos, newInterval)) michael@0: return false; michael@0: michael@0: JS_ASSERT(interval->numRanges() > 0); michael@0: JS_ASSERT(newInterval->numRanges() > 0); michael@0: michael@0: if (!reg->addInterval(newInterval)) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_RegAlloc, " Split interval to %u = [%u, %u]/[%u, %u]", michael@0: interval->vreg(), interval->start().pos(), michael@0: interval->end().pos(), newInterval->start().pos(), michael@0: newInterval->end().pos()); michael@0: michael@0: // We always want to enqueue the resulting split. We always split michael@0: // forward, and we never want to handle something forward of our michael@0: // current position. michael@0: setIntervalRequirement(newInterval); michael@0: michael@0: // splitInterval() is usually called to split the node that has just been michael@0: // popped from the unhandled queue. Therefore the split will likely be michael@0: // closer to the lower start positions in the queue. michael@0: unhandled.enqueueBackward(newInterval); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: LinearScanAllocator::splitBlockingIntervals(LAllocation allocation) michael@0: { michael@0: JS_ASSERT(allocation.isRegister()); michael@0: michael@0: // Split current before the next fixed use. michael@0: LiveInterval *fixed = fixedIntervals[allocation.toRegister().code()]; michael@0: if (fixed->numRanges() > 0) { michael@0: CodePosition fixedPos = current->intersect(fixed); michael@0: if (fixedPos != CodePosition::MIN) { michael@0: JS_ASSERT(fixedPos > current->start()); michael@0: JS_ASSERT(fixedPos < current->end()); michael@0: if (!splitInterval(current, fixedPos)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Split the blocking interval if it exists. michael@0: for (IntervalIterator i(active.begin()); i != active.end(); i++) { michael@0: if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) { michael@0: IonSpew(IonSpew_RegAlloc, " Splitting active interval %u = [%u, %u]", michael@0: vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos()); michael@0: michael@0: JS_ASSERT(i->start() != current->start()); michael@0: JS_ASSERT(i->covers(current->start())); michael@0: JS_ASSERT(i->start() != current->start()); michael@0: michael@0: if (!splitInterval(*i, current->start())) michael@0: return false; michael@0: michael@0: LiveInterval *it = *i; michael@0: active.removeAt(i); michael@0: finishInterval(it); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: // Split any inactive intervals at the next live point. michael@0: for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) { michael@0: if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) { michael@0: IonSpew(IonSpew_RegAlloc, " Splitting inactive interval %u = [%u, %u]", michael@0: vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos()); michael@0: michael@0: LiveInterval *it = *i; michael@0: CodePosition nextActive = it->nextCoveredAfter(current->start()); michael@0: JS_ASSERT(nextActive != CodePosition::MIN); michael@0: michael@0: if (!splitInterval(it, nextActive)) michael@0: return false; michael@0: michael@0: i = inactive.removeAt(i); michael@0: finishInterval(it); michael@0: } else { michael@0: i++; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: /* michael@0: * Assign the current interval the given allocation, splitting conflicting michael@0: * intervals as necessary to make the allocation stick. michael@0: */ michael@0: bool michael@0: LinearScanAllocator::assign(LAllocation allocation) michael@0: { michael@0: if (allocation.isRegister()) michael@0: IonSpew(IonSpew_RegAlloc, "Assigning register %s", allocation.toRegister().name()); michael@0: current->setAllocation(allocation); michael@0: michael@0: // Split this interval at the next incompatible one michael@0: LinearScanVirtualRegister *reg = &vregs[current->vreg()]; michael@0: if (reg) { michael@0: CodePosition splitPos = current->firstIncompatibleUse(allocation); michael@0: if (splitPos != CodePosition::MAX) { michael@0: // Split before the incompatible use. This ensures the use position is michael@0: // part of the second half of the interval and guarantees we never split michael@0: // at the end (zero-length intervals are invalid). michael@0: splitPos = splitPos.previous(); michael@0: JS_ASSERT (splitPos < current->end()); michael@0: if (!splitInterval(current, splitPos)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: bool useAsCanonicalSpillSlot = allocation.isMemory(); michael@0: // Only canonically spill argument values when frame arguments are not michael@0: // modified in the body. michael@0: if (mir->modifiesFrameArguments()) michael@0: useAsCanonicalSpillSlot = allocation.isStackSlot(); michael@0: michael@0: if (reg && useAsCanonicalSpillSlot) { michael@0: if (reg->canonicalSpill()) { michael@0: JS_ASSERT(allocation == *reg->canonicalSpill()); michael@0: michael@0: // This interval is spilled more than once, so just always spill michael@0: // it at its definition. michael@0: reg->setSpillAtDefinition(outputOf(reg->ins())); michael@0: } else { michael@0: reg->setCanonicalSpill(current->getAllocation()); michael@0: michael@0: // If this spill is inside a loop, and the definition is outside michael@0: // the loop, instead move the spill to outside the loop. michael@0: InstructionData *other = &insData[current->start()]; michael@0: uint32_t loopDepthAtDef = reg->block()->mir()->loopDepth(); michael@0: uint32_t loopDepthAtSpill = other->block()->mir()->loopDepth(); michael@0: if (loopDepthAtSpill > loopDepthAtDef) michael@0: reg->setSpillAtDefinition(outputOf(reg->ins())); michael@0: } michael@0: } michael@0: michael@0: active.pushBack(current); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: uint32_t michael@0: LinearScanAllocator::allocateSlotFor(const LiveInterval *interval) michael@0: { michael@0: LinearScanVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: michael@0: SlotList *freed; michael@0: if (reg->type() == LDefinition::DOUBLE) michael@0: freed = &finishedDoubleSlots_; michael@0: #if JS_BITS_PER_WORD == 64 michael@0: else if (reg->type() == LDefinition::GENERAL || michael@0: reg->type() == LDefinition::OBJECT || michael@0: reg->type() == LDefinition::SLOTS) michael@0: freed = &finishedDoubleSlots_; michael@0: #endif michael@0: #ifdef JS_PUNBOX64 michael@0: else if (reg->type() == LDefinition::BOX) michael@0: freed = &finishedDoubleSlots_; michael@0: #endif michael@0: #ifdef JS_NUNBOX32 michael@0: else if (IsNunbox(reg)) michael@0: freed = &finishedNunboxSlots_; michael@0: #endif michael@0: else michael@0: freed = &finishedSlots_; michael@0: michael@0: if (!freed->empty()) { michael@0: LiveInterval *maybeDead = freed->back(); michael@0: if (maybeDead->end() < reg->getInterval(0)->start()) { michael@0: // This spill slot is dead before the start of the interval trying michael@0: // to reuse the slot, so reuse is safe. Otherwise, we could michael@0: // encounter a situation where a stack slot is allocated and freed michael@0: // inside a loop, but the same allocation is then used to hold a michael@0: // loop-carried value. michael@0: // michael@0: // Note that we don't reuse the dead slot if its interval ends right michael@0: // before the current interval, to avoid conflicting slot -> reg and michael@0: // reg -> slot moves in the same movegroup. michael@0: freed->popBack(); michael@0: LinearScanVirtualRegister *dead = &vregs[maybeDead->vreg()]; michael@0: #ifdef JS_NUNBOX32 michael@0: if (IsNunbox(dead)) michael@0: return BaseOfNunboxSlot(dead->type(), dead->canonicalSpillSlot()); michael@0: #endif michael@0: return dead->canonicalSpillSlot(); michael@0: } michael@0: } michael@0: michael@0: return stackSlotAllocator.allocateSlot(reg->type()); michael@0: } michael@0: michael@0: bool michael@0: LinearScanAllocator::spill() michael@0: { michael@0: IonSpew(IonSpew_RegAlloc, " Decided to spill current interval"); michael@0: michael@0: // We can't spill bogus intervals michael@0: JS_ASSERT(current->hasVreg()); michael@0: michael@0: LinearScanVirtualRegister *reg = &vregs[current->vreg()]; michael@0: michael@0: if (reg->canonicalSpill()) { michael@0: IonSpew(IonSpew_RegAlloc, " Allocating canonical spill location"); michael@0: michael@0: return assign(*reg->canonicalSpill()); michael@0: } michael@0: michael@0: uint32_t stackSlot; michael@0: #if defined JS_NUNBOX32 michael@0: if (IsNunbox(reg)) { michael@0: LinearScanVirtualRegister *other = otherHalfOfNunbox(reg); michael@0: michael@0: if (other->canonicalSpill()) { michael@0: // The other half of this nunbox already has a spill slot. To michael@0: // ensure the Value is spilled contiguously, use the other half (it michael@0: // was allocated double-wide). michael@0: JS_ASSERT(other->canonicalSpill()->isStackSlot()); michael@0: stackSlot = BaseOfNunboxSlot(other->type(), other->canonicalSpillSlot()); michael@0: } else { michael@0: // No canonical spill location exists for this nunbox yet. Allocate michael@0: // one. michael@0: stackSlot = allocateSlotFor(current); michael@0: } michael@0: stackSlot -= OffsetOfNunboxSlot(reg->type()); michael@0: } else michael@0: #endif michael@0: { michael@0: stackSlot = allocateSlotFor(current); michael@0: } michael@0: JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight()); michael@0: michael@0: return assign(LStackSlot(stackSlot)); michael@0: } michael@0: michael@0: void michael@0: LinearScanAllocator::freeAllocation(LiveInterval *interval, LAllocation *alloc) michael@0: { michael@0: LinearScanVirtualRegister *mine = &vregs[interval->vreg()]; michael@0: if (!IsNunbox(mine)) { michael@0: if (alloc->isStackSlot()) { michael@0: if (mine->type() == LDefinition::DOUBLE) michael@0: finishedDoubleSlots_.append(interval); michael@0: #if JS_BITS_PER_WORD == 64 michael@0: else if (mine->type() == LDefinition::GENERAL || michael@0: mine->type() == LDefinition::OBJECT || michael@0: mine->type() == LDefinition::SLOTS) michael@0: finishedDoubleSlots_.append(interval); michael@0: #endif michael@0: #ifdef JS_PUNBOX64 michael@0: else if (mine->type() == LDefinition::BOX) michael@0: finishedDoubleSlots_.append(interval); michael@0: #endif michael@0: else michael@0: finishedSlots_.append(interval); michael@0: } michael@0: return; michael@0: } michael@0: michael@0: #ifdef JS_NUNBOX32 michael@0: // Special handling for nunboxes. We can only free the stack slot once we michael@0: // know both intervals have been finished. michael@0: LinearScanVirtualRegister *other = otherHalfOfNunbox(mine); michael@0: if (other->finished()) { michael@0: if (!mine->canonicalSpill() && !other->canonicalSpill()) michael@0: return; michael@0: michael@0: JS_ASSERT_IF(mine->canonicalSpill() && other->canonicalSpill(), michael@0: mine->canonicalSpill()->isStackSlot() == other->canonicalSpill()->isStackSlot()); michael@0: michael@0: LinearScanVirtualRegister *candidate = mine->canonicalSpill() ? mine : other; michael@0: if (!candidate->canonicalSpill()->isStackSlot()) michael@0: return; michael@0: michael@0: finishedNunboxSlots_.append(candidate->lastInterval()); michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: LinearScanAllocator::finishInterval(LiveInterval *interval) michael@0: { michael@0: LAllocation *alloc = interval->getAllocation(); michael@0: JS_ASSERT(!alloc->isUse()); michael@0: michael@0: // Toss out the bogus interval now that it's run its course michael@0: if (!interval->hasVreg()) michael@0: return; michael@0: michael@0: LinearScanVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: michael@0: // All spills should be equal to the canonical spill location. michael@0: JS_ASSERT_IF(alloc->isStackSlot(), *alloc == *reg->canonicalSpill()); michael@0: michael@0: bool lastInterval = interval->index() == (reg->numIntervals() - 1); michael@0: if (lastInterval) { michael@0: freeAllocation(interval, alloc); michael@0: reg->setFinished(); michael@0: } michael@0: michael@0: handled.pushBack(interval); michael@0: } michael@0: michael@0: /* michael@0: * This function locates a register which may be assigned by the register michael@0: * and is not assigned to any active interval. The register which is free michael@0: * for the longest period of time is then returned. michael@0: */ michael@0: AnyRegister::Code michael@0: LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil) michael@0: { michael@0: IonSpew(IonSpew_RegAlloc, " Computing freeUntilPos"); michael@0: michael@0: // Compute free-until positions for all registers michael@0: CodePosition freeUntilPos[AnyRegister::Total]; michael@0: bool needFloat = vregs[current->vreg()].isFloatReg(); michael@0: for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) { michael@0: AnyRegister reg = regs.takeAny(needFloat); michael@0: freeUntilPos[reg.code()] = CodePosition::MAX; michael@0: } michael@0: for (IntervalIterator i(active.begin()); i != active.end(); i++) { michael@0: LAllocation *alloc = i->getAllocation(); michael@0: if (alloc->isRegister(needFloat)) { michael@0: AnyRegister reg = alloc->toRegister(); michael@0: IonSpew(IonSpew_RegAlloc, " Register %s not free", reg.name()); michael@0: freeUntilPos[reg.code()] = CodePosition::MIN; michael@0: } michael@0: } michael@0: for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) { michael@0: LAllocation *alloc = i->getAllocation(); michael@0: if (alloc->isRegister(needFloat)) { michael@0: AnyRegister reg = alloc->toRegister(); michael@0: CodePosition pos = current->intersect(*i); michael@0: if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) { michael@0: freeUntilPos[reg.code()] = pos; michael@0: IonSpew(IonSpew_RegAlloc, " Register %s free until %u", reg.name(), pos.pos()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: CodePosition fixedPos = fixedIntervalsUnion->intersect(current); michael@0: if (fixedPos != CodePosition::MIN) { michael@0: for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) { michael@0: AnyRegister reg = i->getAllocation()->toRegister(); michael@0: if (freeUntilPos[reg.code()] != CodePosition::MIN) { michael@0: CodePosition pos = current->intersect(*i); michael@0: if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) { michael@0: freeUntilPos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos; michael@0: IonSpew(IonSpew_RegAlloc, " Register %s free until %u", reg.name(), pos.pos()); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: AnyRegister::Code bestCode = AnyRegister::Invalid; michael@0: if (current->index()) { michael@0: // As an optimization, use the allocation from the previous interval if michael@0: // it is available. michael@0: LiveInterval *previous = vregs[current->vreg()].getInterval(current->index() - 1); michael@0: LAllocation *alloc = previous->getAllocation(); michael@0: if (alloc->isRegister(needFloat)) { michael@0: AnyRegister prevReg = alloc->toRegister(); michael@0: if (freeUntilPos[prevReg.code()] != CodePosition::MIN) michael@0: bestCode = prevReg.code(); michael@0: } michael@0: } michael@0: michael@0: // Assign the register suggested by the hint if it's free. michael@0: const Requirement *hint = current->hint(); michael@0: if (hint->kind() == Requirement::FIXED && hint->allocation().isRegister()) { michael@0: AnyRegister hintReg = hint->allocation().toRegister(); michael@0: if (freeUntilPos[hintReg.code()] > hint->pos()) michael@0: bestCode = hintReg.code(); michael@0: } else if (hint->kind() == Requirement::SAME_AS_OTHER) { michael@0: LiveInterval *other = vregs[hint->virtualRegister()].intervalFor(hint->pos()); michael@0: if (other && other->getAllocation()->isRegister()) { michael@0: AnyRegister hintReg = other->getAllocation()->toRegister(); michael@0: if (freeUntilPos[hintReg.code()] > hint->pos()) michael@0: bestCode = hintReg.code(); michael@0: } michael@0: } michael@0: michael@0: if (bestCode == AnyRegister::Invalid) { michael@0: // If all else fails, search freeUntilPos for largest value michael@0: for (uint32_t i = 0; i < AnyRegister::Total; i++) { michael@0: if (freeUntilPos[i] == CodePosition::MIN) michael@0: continue; michael@0: if (bestCode == AnyRegister::Invalid || freeUntilPos[i] > freeUntilPos[bestCode]) michael@0: bestCode = AnyRegister::Code(i); michael@0: } michael@0: } michael@0: michael@0: if (bestCode != AnyRegister::Invalid) michael@0: *freeUntil = freeUntilPos[bestCode]; michael@0: return bestCode; michael@0: } michael@0: michael@0: /* michael@0: * This function locates a register which is assigned to an active interval, michael@0: * and returns the one with the furthest away next use. As a side effect, michael@0: * the nextUsePos array is updated with the next use position of all active michael@0: * intervals for use elsewhere in the algorithm. michael@0: */ michael@0: AnyRegister::Code michael@0: LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed) michael@0: { michael@0: IonSpew(IonSpew_RegAlloc, " Computing nextUsePos"); michael@0: michael@0: // Compute next-used positions for all registers michael@0: CodePosition nextUsePos[AnyRegister::Total]; michael@0: bool needFloat = vregs[current->vreg()].isFloatReg(); michael@0: for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) { michael@0: AnyRegister reg = regs.takeAny(needFloat); michael@0: nextUsePos[reg.code()] = CodePosition::MAX; michael@0: } michael@0: for (IntervalIterator i(active.begin()); i != active.end(); i++) { michael@0: LAllocation *alloc = i->getAllocation(); michael@0: if (alloc->isRegister(needFloat)) { michael@0: AnyRegister reg = alloc->toRegister(); michael@0: if (i->start() == current->start()) { michael@0: nextUsePos[reg.code()] = CodePosition::MIN; michael@0: IonSpew(IonSpew_RegAlloc, " Disqualifying %s due to recency", reg.name()); michael@0: } else if (nextUsePos[reg.code()] != CodePosition::MIN) { michael@0: nextUsePos[reg.code()] = i->nextUsePosAfter(current->start()); michael@0: IonSpew(IonSpew_RegAlloc, " Register %s next used %u", reg.name(), michael@0: nextUsePos[reg.code()].pos()); michael@0: } michael@0: } michael@0: } michael@0: for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) { michael@0: LAllocation *alloc = i->getAllocation(); michael@0: if (alloc->isRegister(needFloat)) { michael@0: AnyRegister reg = alloc->toRegister(); michael@0: CodePosition pos = i->nextUsePosAfter(current->start()); michael@0: if (pos < nextUsePos[reg.code()]) { michael@0: nextUsePos[reg.code()] = pos; michael@0: IonSpew(IonSpew_RegAlloc, " Register %s next used %u", reg.name(), pos.pos()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: CodePosition fixedPos = fixedIntervalsUnion->intersect(current); michael@0: if (fixedPos != CodePosition::MIN) { michael@0: for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) { michael@0: AnyRegister reg = i->getAllocation()->toRegister(); michael@0: if (nextUsePos[reg.code()] != CodePosition::MIN) { michael@0: CodePosition pos = i->intersect(current); michael@0: if (pos != CodePosition::MIN && pos < nextUsePos[reg.code()]) { michael@0: nextUsePos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos; michael@0: IonSpew(IonSpew_RegAlloc, " Register %s next used %u (fixed)", reg.name(), pos.pos()); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: // Search nextUsePos for largest value michael@0: AnyRegister::Code bestCode = AnyRegister::Invalid; michael@0: for (size_t i = 0; i < AnyRegister::Total; i++) { michael@0: if (nextUsePos[i] == CodePosition::MIN) michael@0: continue; michael@0: if (bestCode == AnyRegister::Invalid || nextUsePos[i] > nextUsePos[bestCode]) michael@0: bestCode = AnyRegister::Code(i); michael@0: } michael@0: michael@0: if (bestCode != AnyRegister::Invalid) michael@0: *nextUsed = nextUsePos[bestCode]; michael@0: return bestCode; michael@0: } michael@0: michael@0: /* michael@0: * Two intervals can coexist if any of the following conditions is met: michael@0: * michael@0: * - The intervals do not intersect. michael@0: * - The intervals have different allocations. michael@0: * - The intervals have the same allocation, but the allocation may be michael@0: * shared. michael@0: * michael@0: * Intuitively, it is a bug if any allocated intervals exist which can not michael@0: * coexist. michael@0: */ michael@0: bool michael@0: LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b) michael@0: { michael@0: LAllocation *aa = a->getAllocation(); michael@0: LAllocation *ba = b->getAllocation(); michael@0: if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister()) michael@0: return a->intersect(b) == CodePosition::MIN; michael@0: return true; michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: /* michael@0: * Ensure intervals appear in exactly the appropriate one of {active,inactive, michael@0: * handled}, and that active and inactive intervals do not conflict. Handled michael@0: * intervals are checked for conflicts in validateAllocations for performance michael@0: * reasons. michael@0: */ michael@0: void michael@0: LinearScanAllocator::validateIntervals() michael@0: { michael@0: if (!js_JitOptions.checkGraphConsistency) michael@0: return; michael@0: michael@0: for (IntervalIterator i(active.begin()); i != active.end(); i++) { michael@0: JS_ASSERT(i->numRanges() > 0); michael@0: JS_ASSERT(i->covers(current->start())); michael@0: michael@0: for (IntervalIterator j(active.begin()); j != i; j++) michael@0: JS_ASSERT(canCoexist(*i, *j)); michael@0: } michael@0: michael@0: for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) { michael@0: JS_ASSERT(i->numRanges() > 0); michael@0: JS_ASSERT(i->end() >= current->start()); michael@0: JS_ASSERT(!i->covers(current->start())); michael@0: michael@0: for (IntervalIterator j(active.begin()); j != active.end(); j++) { michael@0: JS_ASSERT(*i != *j); michael@0: JS_ASSERT(canCoexist(*i, *j)); michael@0: } michael@0: for (IntervalIterator j(inactive.begin()); j != i; j++) michael@0: JS_ASSERT(canCoexist(*i, *j)); michael@0: } michael@0: michael@0: for (IntervalIterator i(handled.begin()); i != handled.end(); i++) { michael@0: JS_ASSERT(!i->getAllocation()->isUse()); michael@0: JS_ASSERT(i->numRanges() > 0); michael@0: if (i->getAllocation()->isRegister()) { michael@0: JS_ASSERT(i->end() <= current->start()); michael@0: JS_ASSERT(!i->covers(current->start())); michael@0: } michael@0: michael@0: for (IntervalIterator j(active.begin()); j != active.end(); j++) michael@0: JS_ASSERT(*i != *j); michael@0: for (IntervalIterator j(inactive.begin()); j != inactive.end(); j++) michael@0: JS_ASSERT(*i != *j); michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * This function performs a nice, expensive check that all intervals michael@0: * in the function can coexist with every other interval. michael@0: */ michael@0: void michael@0: LinearScanAllocator::validateAllocations() michael@0: { michael@0: if (!js_JitOptions.checkGraphConsistency) michael@0: return; michael@0: michael@0: for (IntervalIterator i(handled.begin()); i != handled.end(); i++) { michael@0: for (IntervalIterator j(handled.begin()); j != i; j++) { michael@0: JS_ASSERT(*i != *j); michael@0: JS_ASSERT(canCoexist(*i, *j)); michael@0: } michael@0: LinearScanVirtualRegister *reg = &vregs[i->vreg()]; michael@0: bool found = false; michael@0: for (size_t j = 0; j < reg->numIntervals(); j++) { michael@0: if (reg->getInterval(j) == *i) { michael@0: JS_ASSERT(j == i->index()); michael@0: found = true; michael@0: break; michael@0: } michael@0: } michael@0: JS_ASSERT(found); michael@0: } michael@0: } michael@0: michael@0: #endif // DEBUG michael@0: michael@0: bool michael@0: LinearScanAllocator::go() michael@0: { michael@0: IonSpew(IonSpew_RegAlloc, "Beginning register allocation"); michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis"); michael@0: if (!buildLivenessInfo()) michael@0: return false; michael@0: IonSpew(IonSpew_RegAlloc, "Liveness analysis complete"); michael@0: michael@0: if (mir->shouldCancel("LSRA Liveness")) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Beginning preliminary register allocation"); michael@0: if (!allocateRegisters()) michael@0: return false; michael@0: IonSpew(IonSpew_RegAlloc, "Preliminary register allocation complete"); michael@0: michael@0: if (mir->shouldCancel("LSRA Preliminary Regalloc")) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Beginning control flow resolution"); michael@0: if (!resolveControlFlow()) michael@0: return false; michael@0: IonSpew(IonSpew_RegAlloc, "Control flow resolution complete"); michael@0: michael@0: if (mir->shouldCancel("LSRA Control Flow")) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Beginning register allocation reification"); michael@0: if (!reifyAllocations()) michael@0: return false; michael@0: IonSpew(IonSpew_RegAlloc, "Register allocation reification complete"); michael@0: michael@0: if (mir->shouldCancel("LSRA Reification")) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Beginning safepoint population."); michael@0: if (!populateSafepoints()) michael@0: return false; michael@0: IonSpew(IonSpew_RegAlloc, "Safepoint population complete."); michael@0: michael@0: if (mir->shouldCancel("LSRA Safepoints")) michael@0: return false; michael@0: michael@0: IonSpew(IonSpew_RegAlloc, "Register allocation complete"); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: LinearScanAllocator::setIntervalRequirement(LiveInterval *interval) michael@0: { michael@0: JS_ASSERT(interval->requirement()->kind() == Requirement::NONE); michael@0: JS_ASSERT(interval->hint()->kind() == Requirement::NONE); michael@0: michael@0: // This function computes requirement by virtual register, other types of michael@0: // interval should have requirements set manually michael@0: LinearScanVirtualRegister *reg = &vregs[interval->vreg()]; michael@0: michael@0: if (interval->index() == 0) { michael@0: // The first interval is the definition, so deal with any definition michael@0: // constraints/hints michael@0: michael@0: if (reg->def()->policy() == LDefinition::PRESET) { michael@0: // Preset policies get a FIXED requirement or hint. michael@0: if (reg->def()->output()->isRegister()) michael@0: interval->setHint(Requirement(*reg->def()->output())); michael@0: else michael@0: interval->setRequirement(Requirement(*reg->def()->output())); michael@0: } else if (reg->def()->policy() == LDefinition::MUST_REUSE_INPUT) { michael@0: // Reuse policies get either a FIXED requirement or a SAME_AS hint. michael@0: LUse *use = reg->ins()->getOperand(reg->def()->getReusedInput())->toUse(); michael@0: interval->setRequirement(Requirement(Requirement::REGISTER)); michael@0: interval->setHint(Requirement(use->virtualRegister(), interval->start().previous())); michael@0: } else if (reg->ins()->isPhi()) { michael@0: // Phis don't have any requirements, but they should prefer michael@0: // their input allocations, so they get a SAME_AS hint of the michael@0: // first input michael@0: LUse *use = reg->ins()->getOperand(0)->toUse(); michael@0: LBlock *predecessor = reg->block()->mir()->getPredecessor(0)->lir(); michael@0: CodePosition predEnd = outputOf(predecessor->lastId()); michael@0: interval->setHint(Requirement(use->virtualRegister(), predEnd)); michael@0: } else { michael@0: // Non-phis get a REGISTER requirement michael@0: interval->setRequirement(Requirement(Requirement::REGISTER)); michael@0: } michael@0: } michael@0: michael@0: UsePosition *fixedOp = nullptr; michael@0: UsePosition *registerOp = nullptr; michael@0: michael@0: // Search uses at the start of the interval for requirements. michael@0: UsePositionIterator usePos(interval->usesBegin()); michael@0: for (; usePos != interval->usesEnd(); usePos++) { michael@0: if (interval->start().next() < usePos->pos) michael@0: break; michael@0: michael@0: LUse::Policy policy = usePos->use->policy(); michael@0: if (policy == LUse::FIXED) { michael@0: fixedOp = *usePos; michael@0: interval->setRequirement(Requirement(Requirement::REGISTER)); michael@0: break; michael@0: } else if (policy == LUse::REGISTER) { michael@0: // Register uses get a REGISTER requirement michael@0: interval->setRequirement(Requirement(Requirement::REGISTER)); michael@0: } michael@0: } michael@0: michael@0: // Search other uses for hints. If the virtual register already has a michael@0: // canonical spill location, we will eagerly spill this interval, so we michael@0: // don't have to search for hints. michael@0: if (!fixedOp && !vregs[interval->vreg()].canonicalSpill()) { michael@0: for (; usePos != interval->usesEnd(); usePos++) { michael@0: LUse::Policy policy = usePos->use->policy(); michael@0: if (policy == LUse::FIXED) { michael@0: fixedOp = *usePos; michael@0: break; michael@0: } else if (policy == LUse::REGISTER) { michael@0: if (!registerOp) michael@0: registerOp = *usePos; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (fixedOp) { michael@0: // Intervals with a fixed use now get a FIXED hint. michael@0: AnyRegister required = GetFixedRegister(reg->def(), fixedOp->use); michael@0: interval->setHint(Requirement(LAllocation(required), fixedOp->pos)); michael@0: } else if (registerOp) { michael@0: // Intervals with register uses get a REGISTER hint. We may have already michael@0: // assigned a SAME_AS hint, make sure we don't overwrite it with a weaker michael@0: // hint. michael@0: if (interval->hint()->kind() == Requirement::NONE) michael@0: interval->setHint(Requirement(Requirement::REGISTER, registerOp->pos)); michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Enqueue by iteration starting from the node with the lowest start position. michael@0: * michael@0: * If we assign registers to intervals in order of their start positions michael@0: * without regard to their requirements, we can end up in situations where michael@0: * intervals that do not require registers block intervals that must have michael@0: * registers from getting one. To avoid this, we ensure that intervals are michael@0: * ordered by position and priority so intervals with more stringent michael@0: * requirements are handled first. michael@0: */ michael@0: void michael@0: LinearScanAllocator::UnhandledQueue::enqueueBackward(LiveInterval *interval) michael@0: { michael@0: IntervalReverseIterator i(rbegin()); michael@0: michael@0: for (; i != rend(); i++) { michael@0: if (i->start() > interval->start()) michael@0: break; michael@0: if (i->start() == interval->start() && michael@0: i->requirement()->priority() >= interval->requirement()->priority()) michael@0: { michael@0: break; michael@0: } michael@0: } michael@0: insertAfter(*i, interval); michael@0: } michael@0: michael@0: /* michael@0: * Enqueue by iteration from high start position to low start position, michael@0: * after a provided node. michael@0: */ michael@0: void michael@0: LinearScanAllocator::UnhandledQueue::enqueueForward(LiveInterval *after, LiveInterval *interval) michael@0: { michael@0: IntervalIterator i(begin(after)); michael@0: i++; // Skip the initial node. michael@0: michael@0: for (; i != end(); i++) { michael@0: if (i->start() < interval->start()) michael@0: break; michael@0: if (i->start() == interval->start() && michael@0: i->requirement()->priority() < interval->requirement()->priority()) michael@0: { michael@0: break; michael@0: } michael@0: } michael@0: insertBefore(*i, interval); michael@0: } michael@0: michael@0: void michael@0: LinearScanAllocator::UnhandledQueue::assertSorted() michael@0: { michael@0: #ifdef DEBUG michael@0: LiveInterval *prev = nullptr; michael@0: for (IntervalIterator i(begin()); i != end(); i++) { michael@0: if (prev) { michael@0: JS_ASSERT(prev->start() >= i->start()); michael@0: JS_ASSERT_IF(prev->start() == i->start(), michael@0: prev->requirement()->priority() >= i->requirement()->priority()); michael@0: } michael@0: prev = *i; michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: LiveInterval * michael@0: LinearScanAllocator::UnhandledQueue::dequeue() michael@0: { michael@0: if (rbegin() == rend()) michael@0: return nullptr; michael@0: michael@0: LiveInterval *result = *rbegin(); michael@0: remove(result); michael@0: return result; michael@0: }