michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_LiveRangeAllocator_h michael@0: #define jit_LiveRangeAllocator_h michael@0: michael@0: #include "mozilla/Array.h" michael@0: #include "mozilla/DebugOnly.h" michael@0: michael@0: #include "jit/RegisterAllocator.h" michael@0: #include "jit/StackSlotAllocator.h" michael@0: michael@0: // Common structures and functions used by register allocators that operate on michael@0: // virtual register live ranges. michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: class Requirement michael@0: { michael@0: public: michael@0: enum Kind { michael@0: NONE, michael@0: REGISTER, michael@0: FIXED, michael@0: SAME_AS_OTHER michael@0: }; michael@0: michael@0: Requirement() michael@0: : kind_(NONE) michael@0: { } michael@0: michael@0: Requirement(Kind kind) michael@0: : kind_(kind) michael@0: { michael@0: // These have dedicated constructors. michael@0: JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER); michael@0: } michael@0: michael@0: Requirement(Kind kind, CodePosition at) michael@0: : kind_(kind), michael@0: position_(at) michael@0: { michael@0: // These have dedicated constructors. michael@0: JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER); michael@0: } michael@0: michael@0: Requirement(LAllocation fixed) michael@0: : kind_(FIXED), michael@0: allocation_(fixed) michael@0: { michael@0: JS_ASSERT(fixed == LAllocation() || !fixed.isUse()); michael@0: } michael@0: michael@0: // Only useful as a hint, encodes where the fixed requirement is used to michael@0: // avoid allocating a fixed register too early. michael@0: Requirement(LAllocation fixed, CodePosition at) michael@0: : kind_(FIXED), michael@0: allocation_(fixed), michael@0: position_(at) michael@0: { michael@0: JS_ASSERT(fixed == LAllocation() || !fixed.isUse()); michael@0: } michael@0: michael@0: Requirement(uint32_t vreg, CodePosition at) michael@0: : kind_(SAME_AS_OTHER), michael@0: allocation_(LUse(vreg, LUse::ANY)), michael@0: position_(at) michael@0: { } michael@0: michael@0: Kind kind() const { michael@0: return kind_; michael@0: } michael@0: michael@0: LAllocation allocation() const { michael@0: JS_ASSERT(!allocation_.isUse()); michael@0: return allocation_; michael@0: } michael@0: michael@0: uint32_t virtualRegister() const { michael@0: JS_ASSERT(allocation_.isUse()); michael@0: JS_ASSERT(kind() == SAME_AS_OTHER); michael@0: return allocation_.toUse()->virtualRegister(); michael@0: } michael@0: michael@0: CodePosition pos() const { michael@0: return position_; michael@0: } michael@0: michael@0: int priority() const; michael@0: michael@0: private: michael@0: Kind kind_; michael@0: LAllocation allocation_; michael@0: CodePosition position_; michael@0: }; michael@0: michael@0: struct UsePosition : public TempObject, michael@0: public InlineForwardListNode michael@0: { michael@0: LUse *use; michael@0: CodePosition pos; michael@0: michael@0: UsePosition(LUse *use, CodePosition pos) : michael@0: use(use), michael@0: pos(pos) michael@0: { } michael@0: }; michael@0: michael@0: typedef InlineForwardListIterator UsePositionIterator; michael@0: michael@0: static inline bool michael@0: UseCompatibleWith(const LUse *use, LAllocation alloc) michael@0: { michael@0: switch (use->policy()) { michael@0: case LUse::ANY: michael@0: case LUse::KEEPALIVE: michael@0: return alloc.isRegister() || alloc.isMemory(); michael@0: case LUse::REGISTER: michael@0: return alloc.isRegister(); michael@0: case LUse::FIXED: michael@0: // Fixed uses are handled using fixed intervals. The michael@0: // UsePosition is only used as hint. michael@0: return alloc.isRegister(); michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unknown use policy"); michael@0: } michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: michael@0: static inline bool michael@0: DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation alloc) michael@0: { michael@0: if (ins->isPhi()) { michael@0: if (def->isFloatReg()) michael@0: return alloc.isFloatReg() || alloc.isStackSlot(); michael@0: return alloc.isGeneralReg() || alloc.isStackSlot(); michael@0: } michael@0: michael@0: switch (def->policy()) { michael@0: case LDefinition::DEFAULT: michael@0: if (!alloc.isRegister()) michael@0: return false; michael@0: return alloc.isFloatReg() == def->isFloatReg(); michael@0: case LDefinition::PRESET: michael@0: return alloc == *def->output(); michael@0: case LDefinition::MUST_REUSE_INPUT: michael@0: if (!alloc.isRegister() || !ins->numOperands()) michael@0: return false; michael@0: return alloc == *ins->getOperand(def->getReusedInput()); michael@0: case LDefinition::PASSTHROUGH: michael@0: return true; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unknown definition policy"); michael@0: } michael@0: } michael@0: michael@0: #endif // DEBUG michael@0: michael@0: static inline LDefinition * michael@0: FindReusingDefinition(LInstruction *ins, LAllocation *alloc) michael@0: { michael@0: for (size_t i = 0; i < ins->numDefs(); i++) { michael@0: LDefinition *def = ins->getDef(i); michael@0: if (def->policy() == LDefinition::MUST_REUSE_INPUT && michael@0: ins->getOperand(def->getReusedInput()) == alloc) michael@0: return def; michael@0: } michael@0: for (size_t i = 0; i < ins->numTemps(); i++) { michael@0: LDefinition *def = ins->getTemp(i); michael@0: if (def->policy() == LDefinition::MUST_REUSE_INPUT && michael@0: ins->getOperand(def->getReusedInput()) == alloc) michael@0: return def; michael@0: } michael@0: return nullptr; michael@0: } michael@0: michael@0: /* michael@0: * A live interval is a set of disjoint ranges of code positions where a michael@0: * virtual register is live. Register allocation operates on these intervals, michael@0: * splitting them as necessary and assigning allocations to them as it runs. michael@0: */ michael@0: class LiveInterval michael@0: : public InlineListNode, michael@0: public TempObject michael@0: { michael@0: public: michael@0: /* michael@0: * A range is a contiguous sequence of CodePositions where the virtual michael@0: * register associated with this interval is live. michael@0: */ michael@0: struct Range { michael@0: Range() michael@0: : from(), michael@0: to() michael@0: { } michael@0: Range(CodePosition f, CodePosition t) michael@0: : from(f), michael@0: to(t) michael@0: { michael@0: JS_ASSERT(from < to); michael@0: } michael@0: CodePosition from; michael@0: michael@0: // The end of this range, exclusive. michael@0: CodePosition to; michael@0: michael@0: bool empty() const { michael@0: return from >= to; michael@0: } michael@0: michael@0: // Whether this range wholly contains other. michael@0: bool contains(const Range *other) const; michael@0: michael@0: // Intersect this range with other, returning the subranges of this michael@0: // that are before, inside, or after other. michael@0: void intersect(const Range *other, Range *pre, Range *inside, Range *post) const; michael@0: }; michael@0: michael@0: private: michael@0: Vector ranges_; michael@0: LAllocation alloc_; michael@0: LiveInterval *spillInterval_; michael@0: uint32_t vreg_; michael@0: uint32_t index_; michael@0: Requirement requirement_; michael@0: Requirement hint_; michael@0: InlineForwardList uses_; michael@0: size_t lastProcessedRange_; michael@0: michael@0: LiveInterval(TempAllocator &alloc, uint32_t vreg, uint32_t index) michael@0: : ranges_(alloc), michael@0: spillInterval_(nullptr), michael@0: vreg_(vreg), michael@0: index_(index), michael@0: lastProcessedRange_(size_t(-1)) michael@0: { } michael@0: michael@0: LiveInterval(TempAllocator &alloc, uint32_t index) michael@0: : ranges_(alloc), michael@0: spillInterval_(nullptr), michael@0: vreg_(UINT32_MAX), michael@0: index_(index), michael@0: lastProcessedRange_(size_t(-1)) michael@0: { } michael@0: michael@0: public: michael@0: static LiveInterval *New(TempAllocator &alloc, uint32_t vreg, uint32_t index) { michael@0: return new(alloc) LiveInterval(alloc, vreg, index); michael@0: } michael@0: static LiveInterval *New(TempAllocator &alloc, uint32_t index) { michael@0: return new(alloc) LiveInterval(alloc, index); michael@0: } michael@0: michael@0: bool addRange(CodePosition from, CodePosition to); michael@0: bool addRangeAtHead(CodePosition from, CodePosition to); michael@0: void setFrom(CodePosition from); michael@0: CodePosition intersect(LiveInterval *other); michael@0: bool covers(CodePosition pos); michael@0: CodePosition nextCoveredAfter(CodePosition pos); michael@0: michael@0: CodePosition start() const { michael@0: JS_ASSERT(!ranges_.empty()); michael@0: return ranges_.back().from; michael@0: } michael@0: michael@0: CodePosition end() const { michael@0: JS_ASSERT(!ranges_.empty()); michael@0: return ranges_.begin()->to; michael@0: } michael@0: michael@0: size_t numRanges() const { michael@0: return ranges_.length(); michael@0: } michael@0: const Range *getRange(size_t i) const { michael@0: return &ranges_[i]; michael@0: } michael@0: void setLastProcessedRange(size_t range, mozilla::DebugOnly pos) { michael@0: // If the range starts after pos, we may not be able to use michael@0: // it in the next lastProcessedRangeIfValid call. michael@0: JS_ASSERT(ranges_[range].from <= pos); michael@0: lastProcessedRange_ = range; michael@0: } michael@0: size_t lastProcessedRangeIfValid(CodePosition pos) const { michael@0: if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos) michael@0: return lastProcessedRange_; michael@0: return ranges_.length() - 1; michael@0: } michael@0: michael@0: LAllocation *getAllocation() { michael@0: return &alloc_; michael@0: } michael@0: void setAllocation(LAllocation alloc) { michael@0: alloc_ = alloc; michael@0: } michael@0: void setSpillInterval(LiveInterval *spill) { michael@0: spillInterval_ = spill; michael@0: } michael@0: LiveInterval *spillInterval() { michael@0: return spillInterval_; michael@0: } michael@0: bool hasVreg() const { michael@0: return vreg_ != UINT32_MAX; michael@0: } michael@0: uint32_t vreg() const { michael@0: JS_ASSERT(hasVreg()); michael@0: return vreg_; michael@0: } michael@0: uint32_t index() const { michael@0: return index_; michael@0: } michael@0: void setIndex(uint32_t index) { michael@0: index_ = index; michael@0: } michael@0: const Requirement *requirement() const { michael@0: return &requirement_; michael@0: } michael@0: void setRequirement(const Requirement &requirement) { michael@0: // A SAME_AS_OTHER requirement complicates regalloc too much; it michael@0: // should only be used as hint. michael@0: JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER); michael@0: requirement_ = requirement; michael@0: } michael@0: bool addRequirement(const Requirement &newRequirement) { michael@0: // Merge newRequirement with any existing requirement, returning false michael@0: // if the new and old requirements conflict. michael@0: JS_ASSERT(newRequirement.kind() != Requirement::SAME_AS_OTHER); michael@0: michael@0: if (newRequirement.kind() == Requirement::FIXED) { michael@0: if (requirement_.kind() == Requirement::FIXED) michael@0: return newRequirement.allocation() == requirement_.allocation(); michael@0: requirement_ = newRequirement; michael@0: return true; michael@0: } michael@0: michael@0: JS_ASSERT(newRequirement.kind() == Requirement::REGISTER); michael@0: if (requirement_.kind() == Requirement::FIXED) michael@0: return requirement_.allocation().isRegister(); michael@0: michael@0: requirement_ = newRequirement; michael@0: return true; michael@0: } michael@0: const Requirement *hint() const { michael@0: return &hint_; michael@0: } michael@0: void setHint(const Requirement &hint) { michael@0: hint_ = hint; michael@0: } michael@0: bool isSpill() const { michael@0: return alloc_.isStackSlot(); michael@0: } michael@0: bool splitFrom(CodePosition pos, LiveInterval *after); michael@0: michael@0: void addUse(UsePosition *use); michael@0: void addUseAtEnd(UsePosition *use); michael@0: UsePosition *nextUseAfter(CodePosition pos); michael@0: CodePosition nextUsePosAfter(CodePosition pos); michael@0: CodePosition firstIncompatibleUse(LAllocation alloc); michael@0: michael@0: UsePositionIterator usesBegin() const { michael@0: return uses_.begin(); michael@0: } michael@0: michael@0: UsePositionIterator usesEnd() const { michael@0: return uses_.end(); michael@0: } michael@0: michael@0: bool usesEmpty() const { michael@0: return uses_.empty(); michael@0: } michael@0: michael@0: UsePosition *usesBack() { michael@0: return uses_.back(); michael@0: } michael@0: michael@0: #ifdef DEBUG michael@0: void validateRanges(); michael@0: #endif michael@0: michael@0: // Return a string describing the ranges in this LiveInterval. This is michael@0: // not re-entrant! michael@0: const char *rangesToString() const; michael@0: michael@0: void dump(); michael@0: }; michael@0: michael@0: /* michael@0: * Represents all of the register allocation state associated with a virtual michael@0: * register, including all associated intervals and pointers to relevant LIR michael@0: * structures. michael@0: */ michael@0: class VirtualRegister michael@0: { michael@0: LBlock *block_; michael@0: LInstruction *ins_; michael@0: LDefinition *def_; michael@0: Vector intervals_; michael@0: michael@0: // Whether def_ is a temp or an output. michael@0: bool isTemp_ : 1; michael@0: michael@0: void operator=(const VirtualRegister &) MOZ_DELETE; michael@0: VirtualRegister(const VirtualRegister &) MOZ_DELETE; michael@0: michael@0: protected: michael@0: explicit VirtualRegister(TempAllocator &alloc) michael@0: : intervals_(alloc) michael@0: {} michael@0: michael@0: public: michael@0: bool init(TempAllocator &alloc, LBlock *block, LInstruction *ins, LDefinition *def, michael@0: bool isTemp) michael@0: { michael@0: JS_ASSERT(block && !block_); michael@0: block_ = block; michael@0: ins_ = ins; michael@0: def_ = def; michael@0: isTemp_ = isTemp; michael@0: LiveInterval *initial = LiveInterval::New(alloc, def->virtualRegister(), 0); michael@0: if (!initial) michael@0: return false; michael@0: return intervals_.append(initial); michael@0: } michael@0: LBlock *block() { michael@0: return block_; michael@0: } michael@0: LInstruction *ins() { michael@0: return ins_; michael@0: } michael@0: LDefinition *def() const { michael@0: return def_; michael@0: } michael@0: LDefinition::Type type() const { michael@0: return def()->type(); michael@0: } michael@0: bool isTemp() const { michael@0: return isTemp_; michael@0: } michael@0: size_t numIntervals() const { michael@0: return intervals_.length(); michael@0: } michael@0: LiveInterval *getInterval(size_t i) const { michael@0: return intervals_[i]; michael@0: } michael@0: LiveInterval *lastInterval() const { michael@0: JS_ASSERT(numIntervals() > 0); michael@0: return getInterval(numIntervals() - 1); michael@0: } michael@0: void replaceInterval(LiveInterval *old, LiveInterval *interval) { michael@0: JS_ASSERT(intervals_[old->index()] == old); michael@0: interval->setIndex(old->index()); michael@0: intervals_[old->index()] = interval; michael@0: } michael@0: bool addInterval(LiveInterval *interval) { michael@0: JS_ASSERT(interval->numRanges()); michael@0: michael@0: // Preserve ascending order for faster lookups. michael@0: LiveInterval **found = nullptr; michael@0: LiveInterval **i; michael@0: for (i = intervals_.begin(); i != intervals_.end(); i++) { michael@0: if (!found && interval->start() < (*i)->start()) michael@0: found = i; michael@0: if (found) michael@0: (*i)->setIndex((*i)->index() + 1); michael@0: } michael@0: if (!found) michael@0: found = intervals_.end(); michael@0: interval->setIndex(found - intervals_.begin()); michael@0: return intervals_.insert(found, interval); michael@0: } michael@0: bool isFloatReg() const { michael@0: return def_->isFloatReg(); michael@0: } michael@0: michael@0: LiveInterval *intervalFor(CodePosition pos); michael@0: LiveInterval *getFirstInterval(); michael@0: }; michael@0: michael@0: // Index of the virtual registers in a graph. VREG is a subclass of michael@0: // VirtualRegister extended with any allocator specific state for the vreg. michael@0: template michael@0: class VirtualRegisterMap michael@0: { michael@0: private: michael@0: VREG *vregs_; michael@0: uint32_t numVregs_; michael@0: michael@0: void operator=(const VirtualRegisterMap &) MOZ_DELETE; michael@0: VirtualRegisterMap(const VirtualRegisterMap &) MOZ_DELETE; michael@0: michael@0: public: michael@0: VirtualRegisterMap() michael@0: : vregs_(nullptr), michael@0: numVregs_(0) michael@0: { } michael@0: michael@0: bool init(MIRGenerator *gen, uint32_t numVregs) { michael@0: vregs_ = gen->allocate(numVregs); michael@0: numVregs_ = numVregs; michael@0: if (!vregs_) michael@0: return false; michael@0: memset(vregs_, 0, sizeof(VREG) * numVregs); michael@0: TempAllocator &alloc = gen->alloc(); michael@0: for (uint32_t i = 0; i < numVregs; i++) michael@0: new(&vregs_[i]) VREG(alloc); michael@0: return true; michael@0: } michael@0: VREG &operator[](unsigned int index) { michael@0: JS_ASSERT(index < numVregs_); michael@0: return vregs_[index]; michael@0: } michael@0: VREG &operator[](const LAllocation *alloc) { michael@0: JS_ASSERT(alloc->isUse()); michael@0: JS_ASSERT(alloc->toUse()->virtualRegister() < numVregs_); michael@0: return vregs_[alloc->toUse()->virtualRegister()]; michael@0: } michael@0: VREG &operator[](const LDefinition *def) { michael@0: JS_ASSERT(def->virtualRegister() < numVregs_); michael@0: return vregs_[def->virtualRegister()]; michael@0: } michael@0: uint32_t numVirtualRegisters() const { michael@0: return numVregs_; michael@0: } michael@0: }; michael@0: michael@0: static inline bool michael@0: IsNunbox(VirtualRegister *vreg) michael@0: { michael@0: #ifdef JS_NUNBOX32 michael@0: return (vreg->type() == LDefinition::TYPE || michael@0: vreg->type() == LDefinition::PAYLOAD); michael@0: #else michael@0: return false; michael@0: #endif michael@0: } michael@0: michael@0: static inline bool michael@0: IsSlotsOrElements(VirtualRegister *vreg) michael@0: { michael@0: return vreg->type() == LDefinition::SLOTS; michael@0: } michael@0: michael@0: static inline bool michael@0: IsTraceable(VirtualRegister *reg) michael@0: { michael@0: if (reg->type() == LDefinition::OBJECT) michael@0: return true; michael@0: #ifdef JS_PUNBOX64 michael@0: if (reg->type() == LDefinition::BOX) michael@0: return true; michael@0: #endif michael@0: return false; michael@0: } michael@0: michael@0: typedef InlineList::iterator IntervalIterator; michael@0: typedef InlineList::reverse_iterator IntervalReverseIterator; michael@0: michael@0: // The forLSRA parameter indicates whether the underlying allocator is LSRA. michael@0: // This changes the generated live ranges in various ways: inserting additional michael@0: // fixed uses of registers, and shifting the boundaries of live ranges by small michael@0: // amounts. This exists because different allocators handle live ranges michael@0: // differently; ideally, they would all treat live ranges in the same way. michael@0: template michael@0: class LiveRangeAllocator : protected RegisterAllocator michael@0: { michael@0: protected: michael@0: // Computed inforamtion michael@0: BitSet **liveIn; michael@0: VirtualRegisterMap vregs; michael@0: mozilla::Array fixedIntervals; michael@0: michael@0: // Union of all ranges in fixedIntervals, used to quickly determine michael@0: // whether an interval intersects with a fixed register. michael@0: LiveInterval *fixedIntervalsUnion; michael@0: michael@0: // Allocation state michael@0: StackSlotAllocator stackSlotAllocator; michael@0: michael@0: LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph) michael@0: : RegisterAllocator(mir, lir, graph), michael@0: liveIn(nullptr), michael@0: fixedIntervalsUnion(nullptr) michael@0: { michael@0: } michael@0: michael@0: bool buildLivenessInfo(); michael@0: michael@0: bool init(); michael@0: michael@0: bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) { michael@0: if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to)) michael@0: return false; michael@0: return fixedIntervalsUnion->addRangeAtHead(from, to); michael@0: } michael@0: michael@0: void validateVirtualRegisters() michael@0: { michael@0: #ifdef DEBUG michael@0: if (!js_JitOptions.checkGraphConsistency) michael@0: return; michael@0: michael@0: for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { michael@0: VirtualRegister *reg = &vregs[i]; michael@0: michael@0: LiveInterval *prev = nullptr; michael@0: for (size_t j = 0; j < reg->numIntervals(); j++) { michael@0: LiveInterval *interval = reg->getInterval(j); michael@0: JS_ASSERT(interval->vreg() == i); michael@0: JS_ASSERT(interval->index() == j); michael@0: michael@0: if (interval->numRanges() == 0) michael@0: continue; michael@0: michael@0: JS_ASSERT_IF(prev, prev->end() <= interval->start()); michael@0: interval->validateRanges(); michael@0: michael@0: prev = interval; michael@0: } michael@0: } michael@0: #endif michael@0: } michael@0: michael@0: #ifdef JS_NUNBOX32 michael@0: VREG *otherHalfOfNunbox(VirtualRegister *vreg) { michael@0: signed offset = OffsetToOtherHalfOfNunbox(vreg->type()); michael@0: VREG *other = &vregs[vreg->def()->virtualRegister() + offset]; michael@0: AssertTypesFormANunbox(vreg->type(), other->type()); michael@0: return other; michael@0: } michael@0: #endif michael@0: michael@0: bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to, LDefinition::Type type) { michael@0: JS_ASSERT(*from->getAllocation() != *to->getAllocation()); michael@0: return moves->add(from->getAllocation(), to->getAllocation(), type); michael@0: } michael@0: michael@0: bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to, LDefinition::Type type) { michael@0: if (*from->getAllocation() == *to->getAllocation()) michael@0: return true; michael@0: LMoveGroup *moves = getInputMoveGroup(pos); michael@0: return addMove(moves, from, to, type); michael@0: } michael@0: michael@0: bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to, LDefinition::Type type) { michael@0: if (*from->getAllocation() == *to->getAllocation()) michael@0: return true; michael@0: LMoveGroup *moves = getMoveGroupAfter(pos); michael@0: return addMove(moves, from, to, type); michael@0: } michael@0: michael@0: bool moveAtExit(LBlock *block, LiveInterval *from, LiveInterval *to, LDefinition::Type type) { michael@0: if (*from->getAllocation() == *to->getAllocation()) michael@0: return true; michael@0: LMoveGroup *moves = block->getExitMoveGroup(alloc()); michael@0: return addMove(moves, from, to, type); michael@0: } michael@0: michael@0: bool moveAtEntry(LBlock *block, LiveInterval *from, LiveInterval *to, LDefinition::Type type) { michael@0: if (*from->getAllocation() == *to->getAllocation()) michael@0: return true; michael@0: LMoveGroup *moves = block->getEntryMoveGroup(alloc()); michael@0: return addMove(moves, from, to, type); michael@0: } michael@0: michael@0: size_t findFirstNonCallSafepoint(CodePosition from) const michael@0: { michael@0: size_t i = 0; michael@0: for (; i < graph.numNonCallSafepoints(); i++) { michael@0: const LInstruction *ins = graph.getNonCallSafepoint(i); michael@0: if (from <= inputOf(ins)) michael@0: break; michael@0: } michael@0: return i; michael@0: } michael@0: michael@0: void addLiveRegistersForInterval(VirtualRegister *reg, LiveInterval *interval) michael@0: { michael@0: // Fill in the live register sets for all non-call safepoints. michael@0: LAllocation *a = interval->getAllocation(); michael@0: if (!a->isRegister()) michael@0: return; michael@0: michael@0: // Don't add output registers to the safepoint. michael@0: CodePosition start = interval->start(); michael@0: if (interval->index() == 0 && !reg->isTemp()) { michael@0: #ifdef CHECK_OSIPOINT_REGISTERS michael@0: // We don't add the output register to the safepoint, michael@0: // but it still might get added as one of the inputs. michael@0: // So eagerly add this reg to the safepoint clobbered registers. michael@0: if (LSafepoint *safepoint = reg->ins()->safepoint()) michael@0: safepoint->addClobberedRegister(a->toRegister()); michael@0: #endif michael@0: start = start.next(); michael@0: } michael@0: michael@0: size_t i = findFirstNonCallSafepoint(start); michael@0: for (; i < graph.numNonCallSafepoints(); i++) { michael@0: LInstruction *ins = graph.getNonCallSafepoint(i); michael@0: CodePosition pos = inputOf(ins); michael@0: michael@0: // Safepoints are sorted, so we can shortcut out of this loop michael@0: // if we go out of range. michael@0: if (interval->end() <= pos) michael@0: break; michael@0: michael@0: if (!interval->covers(pos)) michael@0: continue; michael@0: michael@0: LSafepoint *safepoint = ins->safepoint(); michael@0: safepoint->addLiveRegister(a->toRegister()); michael@0: michael@0: #ifdef CHECK_OSIPOINT_REGISTERS michael@0: if (reg->isTemp()) michael@0: safepoint->addClobberedRegister(a->toRegister()); michael@0: #endif michael@0: } michael@0: } michael@0: michael@0: // Finds the first safepoint that is within range of an interval. michael@0: size_t findFirstSafepoint(const LiveInterval *interval, size_t startFrom) const michael@0: { michael@0: size_t i = startFrom; michael@0: for (; i < graph.numSafepoints(); i++) { michael@0: LInstruction *ins = graph.getSafepoint(i); michael@0: if (interval->start() <= inputOf(ins)) michael@0: break; michael@0: } michael@0: return i; michael@0: } michael@0: }; michael@0: michael@0: } // namespace jit michael@0: } // namespace js michael@0: michael@0: #endif /* jit_LiveRangeAllocator_h */