js/src/jit/LiveRangeAllocator.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #ifndef jit_LiveRangeAllocator_h
     8 #define jit_LiveRangeAllocator_h
    10 #include "mozilla/Array.h"
    11 #include "mozilla/DebugOnly.h"
    13 #include "jit/RegisterAllocator.h"
    14 #include "jit/StackSlotAllocator.h"
    16 // Common structures and functions used by register allocators that operate on
    17 // virtual register live ranges.
    19 namespace js {
    20 namespace jit {
    22 class Requirement
    23 {
    24   public:
    25     enum Kind {
    26         NONE,
    27         REGISTER,
    28         FIXED,
    29         SAME_AS_OTHER
    30     };
    32     Requirement()
    33       : kind_(NONE)
    34     { }
    36     Requirement(Kind kind)
    37       : kind_(kind)
    38     {
    39         // These have dedicated constructors.
    40         JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER);
    41     }
    43     Requirement(Kind kind, CodePosition at)
    44       : kind_(kind),
    45         position_(at)
    46     {
    47         // These have dedicated constructors.
    48         JS_ASSERT(kind != FIXED && kind != SAME_AS_OTHER);
    49     }
    51     Requirement(LAllocation fixed)
    52       : kind_(FIXED),
    53         allocation_(fixed)
    54     {
    55         JS_ASSERT(fixed == LAllocation() || !fixed.isUse());
    56     }
    58     // Only useful as a hint, encodes where the fixed requirement is used to
    59     // avoid allocating a fixed register too early.
    60     Requirement(LAllocation fixed, CodePosition at)
    61       : kind_(FIXED),
    62         allocation_(fixed),
    63         position_(at)
    64     {
    65         JS_ASSERT(fixed == LAllocation() || !fixed.isUse());
    66     }
    68     Requirement(uint32_t vreg, CodePosition at)
    69       : kind_(SAME_AS_OTHER),
    70         allocation_(LUse(vreg, LUse::ANY)),
    71         position_(at)
    72     { }
    74     Kind kind() const {
    75         return kind_;
    76     }
    78     LAllocation allocation() const {
    79         JS_ASSERT(!allocation_.isUse());
    80         return allocation_;
    81     }
    83     uint32_t virtualRegister() const {
    84         JS_ASSERT(allocation_.isUse());
    85         JS_ASSERT(kind() == SAME_AS_OTHER);
    86         return allocation_.toUse()->virtualRegister();
    87     }
    89     CodePosition pos() const {
    90         return position_;
    91     }
    93     int priority() const;
    95   private:
    96     Kind kind_;
    97     LAllocation allocation_;
    98     CodePosition position_;
    99 };
   101 struct UsePosition : public TempObject,
   102                      public InlineForwardListNode<UsePosition>
   103 {
   104     LUse *use;
   105     CodePosition pos;
   107     UsePosition(LUse *use, CodePosition pos) :
   108         use(use),
   109         pos(pos)
   110     { }
   111 };
   113 typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
   115 static inline bool
   116 UseCompatibleWith(const LUse *use, LAllocation alloc)
   117 {
   118     switch (use->policy()) {
   119       case LUse::ANY:
   120       case LUse::KEEPALIVE:
   121         return alloc.isRegister() || alloc.isMemory();
   122       case LUse::REGISTER:
   123         return alloc.isRegister();
   124       case LUse::FIXED:
   125           // Fixed uses are handled using fixed intervals. The
   126           // UsePosition is only used as hint.
   127         return alloc.isRegister();
   128       default:
   129         MOZ_ASSUME_UNREACHABLE("Unknown use policy");
   130     }
   131 }
   133 #ifdef DEBUG
   135 static inline bool
   136 DefinitionCompatibleWith(LInstruction *ins, const LDefinition *def, LAllocation alloc)
   137 {
   138     if (ins->isPhi()) {
   139         if (def->isFloatReg())
   140             return alloc.isFloatReg() || alloc.isStackSlot();
   141         return alloc.isGeneralReg() || alloc.isStackSlot();
   142     }
   144     switch (def->policy()) {
   145       case LDefinition::DEFAULT:
   146         if (!alloc.isRegister())
   147             return false;
   148         return alloc.isFloatReg() == def->isFloatReg();
   149       case LDefinition::PRESET:
   150         return alloc == *def->output();
   151       case LDefinition::MUST_REUSE_INPUT:
   152         if (!alloc.isRegister() || !ins->numOperands())
   153             return false;
   154         return alloc == *ins->getOperand(def->getReusedInput());
   155       case LDefinition::PASSTHROUGH:
   156         return true;
   157       default:
   158         MOZ_ASSUME_UNREACHABLE("Unknown definition policy");
   159     }
   160 }
   162 #endif // DEBUG
   164 static inline LDefinition *
   165 FindReusingDefinition(LInstruction *ins, LAllocation *alloc)
   166 {
   167     for (size_t i = 0; i < ins->numDefs(); i++) {
   168         LDefinition *def = ins->getDef(i);
   169         if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
   170             ins->getOperand(def->getReusedInput()) == alloc)
   171             return def;
   172     }
   173     for (size_t i = 0; i < ins->numTemps(); i++) {
   174         LDefinition *def = ins->getTemp(i);
   175         if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
   176             ins->getOperand(def->getReusedInput()) == alloc)
   177             return def;
   178     }
   179     return nullptr;
   180 }
   182 /*
   183  * A live interval is a set of disjoint ranges of code positions where a
   184  * virtual register is live. Register allocation operates on these intervals,
   185  * splitting them as necessary and assigning allocations to them as it runs.
   186  */
   187 class LiveInterval
   188   : public InlineListNode<LiveInterval>,
   189     public TempObject
   190 {
   191   public:
   192     /*
   193      * A range is a contiguous sequence of CodePositions where the virtual
   194      * register associated with this interval is live.
   195      */
   196     struct Range {
   197         Range()
   198           : from(),
   199             to()
   200         { }
   201         Range(CodePosition f, CodePosition t)
   202           : from(f),
   203             to(t)
   204         {
   205             JS_ASSERT(from < to);
   206         }
   207         CodePosition from;
   209         // The end of this range, exclusive.
   210         CodePosition to;
   212         bool empty() const {
   213             return from >= to;
   214         }
   216         // Whether this range wholly contains other.
   217         bool contains(const Range *other) const;
   219         // Intersect this range with other, returning the subranges of this
   220         // that are before, inside, or after other.
   221         void intersect(const Range *other, Range *pre, Range *inside, Range *post) const;
   222     };
   224   private:
   225     Vector<Range, 1, IonAllocPolicy> ranges_;
   226     LAllocation alloc_;
   227     LiveInterval *spillInterval_;
   228     uint32_t vreg_;
   229     uint32_t index_;
   230     Requirement requirement_;
   231     Requirement hint_;
   232     InlineForwardList<UsePosition> uses_;
   233     size_t lastProcessedRange_;
   235     LiveInterval(TempAllocator &alloc, uint32_t vreg, uint32_t index)
   236       : ranges_(alloc),
   237         spillInterval_(nullptr),
   238         vreg_(vreg),
   239         index_(index),
   240         lastProcessedRange_(size_t(-1))
   241     { }
   243     LiveInterval(TempAllocator &alloc, uint32_t index)
   244       : ranges_(alloc),
   245         spillInterval_(nullptr),
   246         vreg_(UINT32_MAX),
   247         index_(index),
   248         lastProcessedRange_(size_t(-1))
   249     { }
   251   public:
   252     static LiveInterval *New(TempAllocator &alloc, uint32_t vreg, uint32_t index) {
   253         return new(alloc) LiveInterval(alloc, vreg, index);
   254     }
   255     static LiveInterval *New(TempAllocator &alloc, uint32_t index) {
   256         return new(alloc) LiveInterval(alloc, index);
   257     }
   259     bool addRange(CodePosition from, CodePosition to);
   260     bool addRangeAtHead(CodePosition from, CodePosition to);
   261     void setFrom(CodePosition from);
   262     CodePosition intersect(LiveInterval *other);
   263     bool covers(CodePosition pos);
   264     CodePosition nextCoveredAfter(CodePosition pos);
   266     CodePosition start() const {
   267         JS_ASSERT(!ranges_.empty());
   268         return ranges_.back().from;
   269     }
   271     CodePosition end() const {
   272         JS_ASSERT(!ranges_.empty());
   273         return ranges_.begin()->to;
   274     }
   276     size_t numRanges() const {
   277         return ranges_.length();
   278     }
   279     const Range *getRange(size_t i) const {
   280         return &ranges_[i];
   281     }
   282     void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
   283         // If the range starts after pos, we may not be able to use
   284         // it in the next lastProcessedRangeIfValid call.
   285         JS_ASSERT(ranges_[range].from <= pos);
   286         lastProcessedRange_ = range;
   287     }
   288     size_t lastProcessedRangeIfValid(CodePosition pos) const {
   289         if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
   290             return lastProcessedRange_;
   291         return ranges_.length() - 1;
   292     }
   294     LAllocation *getAllocation() {
   295         return &alloc_;
   296     }
   297     void setAllocation(LAllocation alloc) {
   298         alloc_ = alloc;
   299     }
   300     void setSpillInterval(LiveInterval *spill) {
   301         spillInterval_ = spill;
   302     }
   303     LiveInterval *spillInterval() {
   304         return spillInterval_;
   305     }
   306     bool hasVreg() const {
   307         return vreg_ != UINT32_MAX;
   308     }
   309     uint32_t vreg() const {
   310         JS_ASSERT(hasVreg());
   311         return vreg_;
   312     }
   313     uint32_t index() const {
   314         return index_;
   315     }
   316     void setIndex(uint32_t index) {
   317         index_ = index;
   318     }
   319     const Requirement *requirement() const {
   320         return &requirement_;
   321     }
   322     void setRequirement(const Requirement &requirement) {
   323         // A SAME_AS_OTHER requirement complicates regalloc too much; it
   324         // should only be used as hint.
   325         JS_ASSERT(requirement.kind() != Requirement::SAME_AS_OTHER);
   326         requirement_ = requirement;
   327     }
   328     bool addRequirement(const Requirement &newRequirement) {
   329         // Merge newRequirement with any existing requirement, returning false
   330         // if the new and old requirements conflict.
   331         JS_ASSERT(newRequirement.kind() != Requirement::SAME_AS_OTHER);
   333         if (newRequirement.kind() == Requirement::FIXED) {
   334             if (requirement_.kind() == Requirement::FIXED)
   335                 return newRequirement.allocation() == requirement_.allocation();
   336             requirement_ = newRequirement;
   337             return true;
   338         }
   340         JS_ASSERT(newRequirement.kind() == Requirement::REGISTER);
   341         if (requirement_.kind() == Requirement::FIXED)
   342             return requirement_.allocation().isRegister();
   344         requirement_ = newRequirement;
   345         return true;
   346     }
   347     const Requirement *hint() const {
   348         return &hint_;
   349     }
   350     void setHint(const Requirement &hint) {
   351         hint_ = hint;
   352     }
   353     bool isSpill() const {
   354         return alloc_.isStackSlot();
   355     }
   356     bool splitFrom(CodePosition pos, LiveInterval *after);
   358     void addUse(UsePosition *use);
   359     void addUseAtEnd(UsePosition *use);
   360     UsePosition *nextUseAfter(CodePosition pos);
   361     CodePosition nextUsePosAfter(CodePosition pos);
   362     CodePosition firstIncompatibleUse(LAllocation alloc);
   364     UsePositionIterator usesBegin() const {
   365         return uses_.begin();
   366     }
   368     UsePositionIterator usesEnd() const {
   369         return uses_.end();
   370     }
   372     bool usesEmpty() const {
   373         return uses_.empty();
   374     }
   376     UsePosition *usesBack() {
   377         return uses_.back();
   378     }
   380 #ifdef DEBUG
   381     void validateRanges();
   382 #endif
   384     // Return a string describing the ranges in this LiveInterval. This is
   385     // not re-entrant!
   386     const char *rangesToString() const;
   388     void dump();
   389 };
   391 /*
   392  * Represents all of the register allocation state associated with a virtual
   393  * register, including all associated intervals and pointers to relevant LIR
   394  * structures.
   395  */
   396 class VirtualRegister
   397 {
   398     LBlock *block_;
   399     LInstruction *ins_;
   400     LDefinition *def_;
   401     Vector<LiveInterval *, 1, IonAllocPolicy> intervals_;
   403     // Whether def_ is a temp or an output.
   404     bool isTemp_ : 1;
   406     void operator=(const VirtualRegister &) MOZ_DELETE;
   407     VirtualRegister(const VirtualRegister &) MOZ_DELETE;
   409   protected:
   410     explicit VirtualRegister(TempAllocator &alloc)
   411       : intervals_(alloc)
   412     {}
   414   public:
   415     bool init(TempAllocator &alloc, LBlock *block, LInstruction *ins, LDefinition *def,
   416               bool isTemp)
   417     {
   418         JS_ASSERT(block && !block_);
   419         block_ = block;
   420         ins_ = ins;
   421         def_ = def;
   422         isTemp_ = isTemp;
   423         LiveInterval *initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
   424         if (!initial)
   425             return false;
   426         return intervals_.append(initial);
   427     }
   428     LBlock *block() {
   429         return block_;
   430     }
   431     LInstruction *ins() {
   432         return ins_;
   433     }
   434     LDefinition *def() const {
   435         return def_;
   436     }
   437     LDefinition::Type type() const {
   438         return def()->type();
   439     }
   440     bool isTemp() const {
   441         return isTemp_;
   442     }
   443     size_t numIntervals() const {
   444         return intervals_.length();
   445     }
   446     LiveInterval *getInterval(size_t i) const {
   447         return intervals_[i];
   448     }
   449     LiveInterval *lastInterval() const {
   450         JS_ASSERT(numIntervals() > 0);
   451         return getInterval(numIntervals() - 1);
   452     }
   453     void replaceInterval(LiveInterval *old, LiveInterval *interval) {
   454         JS_ASSERT(intervals_[old->index()] == old);
   455         interval->setIndex(old->index());
   456         intervals_[old->index()] = interval;
   457     }
   458     bool addInterval(LiveInterval *interval) {
   459         JS_ASSERT(interval->numRanges());
   461         // Preserve ascending order for faster lookups.
   462         LiveInterval **found = nullptr;
   463         LiveInterval **i;
   464         for (i = intervals_.begin(); i != intervals_.end(); i++) {
   465             if (!found && interval->start() < (*i)->start())
   466                 found = i;
   467             if (found)
   468                 (*i)->setIndex((*i)->index() + 1);
   469         }
   470         if (!found)
   471             found = intervals_.end();
   472         interval->setIndex(found - intervals_.begin());
   473         return intervals_.insert(found, interval);
   474     }
   475     bool isFloatReg() const {
   476         return def_->isFloatReg();
   477     }
   479     LiveInterval *intervalFor(CodePosition pos);
   480     LiveInterval *getFirstInterval();
   481 };
   483 // Index of the virtual registers in a graph. VREG is a subclass of
   484 // VirtualRegister extended with any allocator specific state for the vreg.
   485 template <typename VREG>
   486 class VirtualRegisterMap
   487 {
   488   private:
   489     VREG *vregs_;
   490     uint32_t numVregs_;
   492     void operator=(const VirtualRegisterMap &) MOZ_DELETE;
   493     VirtualRegisterMap(const VirtualRegisterMap &) MOZ_DELETE;
   495   public:
   496     VirtualRegisterMap()
   497       : vregs_(nullptr),
   498         numVregs_(0)
   499     { }
   501     bool init(MIRGenerator *gen, uint32_t numVregs) {
   502         vregs_ = gen->allocate<VREG>(numVregs);
   503         numVregs_ = numVregs;
   504         if (!vregs_)
   505             return false;
   506         memset(vregs_, 0, sizeof(VREG) * numVregs);
   507         TempAllocator &alloc = gen->alloc();
   508         for (uint32_t i = 0; i < numVregs; i++)
   509             new(&vregs_[i]) VREG(alloc);
   510         return true;
   511     }
   512     VREG &operator[](unsigned int index) {
   513         JS_ASSERT(index < numVregs_);
   514         return vregs_[index];
   515     }
   516     VREG &operator[](const LAllocation *alloc) {
   517         JS_ASSERT(alloc->isUse());
   518         JS_ASSERT(alloc->toUse()->virtualRegister() < numVregs_);
   519         return vregs_[alloc->toUse()->virtualRegister()];
   520     }
   521     VREG &operator[](const LDefinition *def) {
   522         JS_ASSERT(def->virtualRegister() < numVregs_);
   523         return vregs_[def->virtualRegister()];
   524     }
   525     uint32_t numVirtualRegisters() const {
   526         return numVregs_;
   527     }
   528 };
   530 static inline bool
   531 IsNunbox(VirtualRegister *vreg)
   532 {
   533 #ifdef JS_NUNBOX32
   534     return (vreg->type() == LDefinition::TYPE ||
   535             vreg->type() == LDefinition::PAYLOAD);
   536 #else
   537     return false;
   538 #endif
   539 }
   541 static inline bool
   542 IsSlotsOrElements(VirtualRegister *vreg)
   543 {
   544     return vreg->type() == LDefinition::SLOTS;
   545 }
   547 static inline bool
   548 IsTraceable(VirtualRegister *reg)
   549 {
   550     if (reg->type() == LDefinition::OBJECT)
   551         return true;
   552 #ifdef JS_PUNBOX64
   553     if (reg->type() == LDefinition::BOX)
   554         return true;
   555 #endif
   556     return false;
   557 }
   559 typedef InlineList<LiveInterval>::iterator IntervalIterator;
   560 typedef InlineList<LiveInterval>::reverse_iterator IntervalReverseIterator;
   562 // The forLSRA parameter indicates whether the underlying allocator is LSRA.
   563 // This changes the generated live ranges in various ways: inserting additional
   564 // fixed uses of registers, and shifting the boundaries of live ranges by small
   565 // amounts. This exists because different allocators handle live ranges
   566 // differently; ideally, they would all treat live ranges in the same way.
   567 template <typename VREG, bool forLSRA>
   568 class LiveRangeAllocator : protected RegisterAllocator
   569 {
   570   protected:
   571     // Computed inforamtion
   572     BitSet **liveIn;
   573     VirtualRegisterMap<VREG> vregs;
   574     mozilla::Array<LiveInterval *, AnyRegister::Total> fixedIntervals;
   576     // Union of all ranges in fixedIntervals, used to quickly determine
   577     // whether an interval intersects with a fixed register.
   578     LiveInterval *fixedIntervalsUnion;
   580     // Allocation state
   581     StackSlotAllocator stackSlotAllocator;
   583     LiveRangeAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
   584       : RegisterAllocator(mir, lir, graph),
   585         liveIn(nullptr),
   586         fixedIntervalsUnion(nullptr)
   587     {
   588     }
   590     bool buildLivenessInfo();
   592     bool init();
   594     bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
   595         if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
   596             return false;
   597         return fixedIntervalsUnion->addRangeAtHead(from, to);
   598     }
   600     void validateVirtualRegisters()
   601     {
   602 #ifdef DEBUG
   603         if (!js_JitOptions.checkGraphConsistency)
   604             return;
   606         for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
   607             VirtualRegister *reg = &vregs[i];
   609             LiveInterval *prev = nullptr;
   610             for (size_t j = 0; j < reg->numIntervals(); j++) {
   611                 LiveInterval *interval = reg->getInterval(j);
   612                 JS_ASSERT(interval->vreg() == i);
   613                 JS_ASSERT(interval->index() == j);
   615                 if (interval->numRanges() == 0)
   616                     continue;
   618                 JS_ASSERT_IF(prev, prev->end() <= interval->start());
   619                 interval->validateRanges();
   621                 prev = interval;
   622             }
   623         }
   624 #endif
   625     }
   627 #ifdef JS_NUNBOX32
   628     VREG *otherHalfOfNunbox(VirtualRegister *vreg) {
   629         signed offset = OffsetToOtherHalfOfNunbox(vreg->type());
   630         VREG *other = &vregs[vreg->def()->virtualRegister() + offset];
   631         AssertTypesFormANunbox(vreg->type(), other->type());
   632         return other;
   633     }
   634 #endif
   636     bool addMove(LMoveGroup *moves, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
   637         JS_ASSERT(*from->getAllocation() != *to->getAllocation());
   638         return moves->add(from->getAllocation(), to->getAllocation(), type);
   639     }
   641     bool moveInput(CodePosition pos, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
   642         if (*from->getAllocation() == *to->getAllocation())
   643             return true;
   644         LMoveGroup *moves = getInputMoveGroup(pos);
   645         return addMove(moves, from, to, type);
   646     }
   648     bool moveAfter(CodePosition pos, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
   649         if (*from->getAllocation() == *to->getAllocation())
   650             return true;
   651         LMoveGroup *moves = getMoveGroupAfter(pos);
   652         return addMove(moves, from, to, type);
   653     }
   655     bool moveAtExit(LBlock *block, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
   656         if (*from->getAllocation() == *to->getAllocation())
   657             return true;
   658         LMoveGroup *moves = block->getExitMoveGroup(alloc());
   659         return addMove(moves, from, to, type);
   660     }
   662     bool moveAtEntry(LBlock *block, LiveInterval *from, LiveInterval *to, LDefinition::Type type) {
   663         if (*from->getAllocation() == *to->getAllocation())
   664             return true;
   665         LMoveGroup *moves = block->getEntryMoveGroup(alloc());
   666         return addMove(moves, from, to, type);
   667     }
   669     size_t findFirstNonCallSafepoint(CodePosition from) const
   670     {
   671         size_t i = 0;
   672         for (; i < graph.numNonCallSafepoints(); i++) {
   673             const LInstruction *ins = graph.getNonCallSafepoint(i);
   674             if (from <= inputOf(ins))
   675                 break;
   676         }
   677         return i;
   678     }
   680     void addLiveRegistersForInterval(VirtualRegister *reg, LiveInterval *interval)
   681     {
   682         // Fill in the live register sets for all non-call safepoints.
   683         LAllocation *a = interval->getAllocation();
   684         if (!a->isRegister())
   685             return;
   687         // Don't add output registers to the safepoint.
   688         CodePosition start = interval->start();
   689         if (interval->index() == 0 && !reg->isTemp()) {
   690 #ifdef CHECK_OSIPOINT_REGISTERS
   691             // We don't add the output register to the safepoint,
   692             // but it still might get added as one of the inputs.
   693             // So eagerly add this reg to the safepoint clobbered registers.
   694             if (LSafepoint *safepoint = reg->ins()->safepoint())
   695                 safepoint->addClobberedRegister(a->toRegister());
   696 #endif
   697             start = start.next();
   698         }
   700         size_t i = findFirstNonCallSafepoint(start);
   701         for (; i < graph.numNonCallSafepoints(); i++) {
   702             LInstruction *ins = graph.getNonCallSafepoint(i);
   703             CodePosition pos = inputOf(ins);
   705             // Safepoints are sorted, so we can shortcut out of this loop
   706             // if we go out of range.
   707             if (interval->end() <= pos)
   708                 break;
   710             if (!interval->covers(pos))
   711                 continue;
   713             LSafepoint *safepoint = ins->safepoint();
   714             safepoint->addLiveRegister(a->toRegister());
   716 #ifdef CHECK_OSIPOINT_REGISTERS
   717             if (reg->isTemp())
   718                 safepoint->addClobberedRegister(a->toRegister());
   719 #endif
   720         }
   721     }
   723     // Finds the first safepoint that is within range of an interval.
   724     size_t findFirstSafepoint(const LiveInterval *interval, size_t startFrom) const
   725     {
   726         size_t i = startFrom;
   727         for (; i < graph.numSafepoints(); i++) {
   728             LInstruction *ins = graph.getSafepoint(i);
   729             if (interval->start() <= inputOf(ins))
   730                 break;
   731         }
   732         return i;
   733     }
   734 };
   736 } // namespace jit
   737 } // namespace js
   739 #endif /* jit_LiveRangeAllocator_h */

mercurial