js/src/jit/LinearScan.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/LinearScan.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1444 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#include "jit/LinearScan.h"
    1.11 +
    1.12 +#include "mozilla/DebugOnly.h"
    1.13 +
    1.14 +#include "jit/BitSet.h"
    1.15 +#include "jit/IonSpewer.h"
    1.16 +
    1.17 +using namespace js;
    1.18 +using namespace js::jit;
    1.19 +
    1.20 +using mozilla::DebugOnly;
    1.21 +
    1.22 +/*
    1.23 + * Merge virtual register intervals into the UnhandledQueue, taking advantage
    1.24 + * of their nearly-sorted ordering.
    1.25 + */
    1.26 +void
    1.27 +LinearScanAllocator::enqueueVirtualRegisterIntervals()
    1.28 +{
    1.29 +    // Cursor into the unhandled queue, iterating through start positions.
    1.30 +    IntervalReverseIterator curr = unhandled.rbegin();
    1.31 +
    1.32 +    // Start position is non-monotonically increasing by virtual register number.
    1.33 +    for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1.34 +        LiveInterval *live = vregs[i].getInterval(0);
    1.35 +        if (live->numRanges() > 0) {
    1.36 +            setIntervalRequirement(live);
    1.37 +
    1.38 +            // Iterate backward until the next highest class of start position.
    1.39 +            for (; curr != unhandled.rend(); curr++) {
    1.40 +                if (curr->start() > live->start())
    1.41 +                    break;
    1.42 +            }
    1.43 +
    1.44 +            // Insert forward from the current position, thereby
    1.45 +            // sorting by priority within the start class.
    1.46 +            unhandled.enqueueForward(*curr, live);
    1.47 +        }
    1.48 +    }
    1.49 +}
    1.50 +
    1.51 +/*
    1.52 + * This function performs a preliminary allocation on the already-computed
    1.53 + * live intervals, storing the result in the allocation field of the intervals
    1.54 + * themselves.
    1.55 + *
    1.56 + * The algorithm is based on the one published in:
    1.57 + *
    1.58 + * Wimmer, Christian, and Hanspeter Mössenböck. "Optimized Interval Splitting
    1.59 + *     in a Linear Scan Register Allocator." Proceedings of the First
    1.60 + *     ACM/USENIX Conference on Virtual Execution Environments. Chicago, IL,
    1.61 + *     USA, ACM. 2005. PDF.
    1.62 + *
    1.63 + * The algorithm proceeds over each interval in order of their start position.
    1.64 + * If a free register is available, the register that is free for the largest
    1.65 + * portion of the current interval is allocated. Otherwise, the interval
    1.66 + * with the farthest-away next use is spilled to make room for this one. In all
    1.67 + * cases, intervals which conflict either with other intervals or with
    1.68 + * use or definition constraints are split at the point of conflict to be
    1.69 + * assigned a compatible allocation later.
    1.70 + */
    1.71 +bool
    1.72 +LinearScanAllocator::allocateRegisters()
    1.73 +{
    1.74 +    // The unhandled queue currently contains only spill intervals, in sorted
    1.75 +    // order. Intervals for virtual registers exist in sorted order based on
    1.76 +    // start position by vreg ID, but may have priorities that require them to
    1.77 +    // be reordered when adding to the unhandled queue.
    1.78 +    enqueueVirtualRegisterIntervals();
    1.79 +    unhandled.assertSorted();
    1.80 +
    1.81 +    // Add fixed intervals with ranges to fixed.
    1.82 +    for (size_t i = 0; i < AnyRegister::Total; i++) {
    1.83 +        if (fixedIntervals[i]->numRanges() > 0)
    1.84 +            fixed.pushBack(fixedIntervals[i]);
    1.85 +    }
    1.86 +
    1.87 +    // Iterate through all intervals in ascending start order.
    1.88 +    CodePosition prevPosition = CodePosition::MIN;
    1.89 +    while ((current = unhandled.dequeue()) != nullptr) {
    1.90 +        JS_ASSERT(current->getAllocation()->isUse());
    1.91 +        JS_ASSERT(current->numRanges() > 0);
    1.92 +
    1.93 +        if (mir->shouldCancel("LSRA Allocate Registers (main loop)"))
    1.94 +            return false;
    1.95 +
    1.96 +        CodePosition position = current->start();
    1.97 +        const Requirement *req = current->requirement();
    1.98 +        const Requirement *hint = current->hint();
    1.99 +
   1.100 +        IonSpew(IonSpew_RegAlloc, "Processing %d = [%u, %u] (pri=%d)",
   1.101 +                current->hasVreg() ? current->vreg() : 0, current->start().pos(),
   1.102 +                current->end().pos(), current->requirement()->priority());
   1.103 +
   1.104 +        // Shift active intervals to the inactive or handled sets as appropriate
   1.105 +        if (position != prevPosition) {
   1.106 +            JS_ASSERT(position > prevPosition);
   1.107 +            prevPosition = position;
   1.108 +
   1.109 +            for (IntervalIterator i(active.begin()); i != active.end(); ) {
   1.110 +                LiveInterval *it = *i;
   1.111 +                JS_ASSERT(it->numRanges() > 0);
   1.112 +
   1.113 +                if (it->end() <= position) {
   1.114 +                    i = active.removeAt(i);
   1.115 +                    finishInterval(it);
   1.116 +                } else if (!it->covers(position)) {
   1.117 +                    i = active.removeAt(i);
   1.118 +                    inactive.pushBack(it);
   1.119 +                } else {
   1.120 +                    i++;
   1.121 +                }
   1.122 +            }
   1.123 +
   1.124 +            // Shift inactive intervals to the active or handled sets as appropriate
   1.125 +            for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
   1.126 +                LiveInterval *it = *i;
   1.127 +                JS_ASSERT(it->numRanges() > 0);
   1.128 +
   1.129 +                if (it->end() <= position) {
   1.130 +                    i = inactive.removeAt(i);
   1.131 +                    finishInterval(it);
   1.132 +                } else if (it->covers(position)) {
   1.133 +                    i = inactive.removeAt(i);
   1.134 +                    active.pushBack(it);
   1.135 +                } else {
   1.136 +                    i++;
   1.137 +                }
   1.138 +            }
   1.139 +        }
   1.140 +
   1.141 +        // Sanity check all intervals in all sets
   1.142 +        validateIntervals();
   1.143 +
   1.144 +        // If the interval has a hard requirement, grant it.
   1.145 +        if (req->kind() == Requirement::FIXED) {
   1.146 +            JS_ASSERT(!req->allocation().isRegister());
   1.147 +            if (!assign(req->allocation()))
   1.148 +                return false;
   1.149 +            continue;
   1.150 +        }
   1.151 +
   1.152 +        // If we don't really need this in a register, don't allocate one
   1.153 +        if (req->kind() != Requirement::REGISTER && hint->kind() == Requirement::NONE) {
   1.154 +            IonSpew(IonSpew_RegAlloc, "  Eagerly spilling virtual register %d",
   1.155 +                    current->hasVreg() ? current->vreg() : 0);
   1.156 +            if (!spill())
   1.157 +                return false;
   1.158 +            continue;
   1.159 +        }
   1.160 +
   1.161 +        // Try to allocate a free register
   1.162 +        IonSpew(IonSpew_RegAlloc, " Attempting free register allocation");
   1.163 +        CodePosition bestFreeUntil;
   1.164 +        AnyRegister::Code bestCode = findBestFreeRegister(&bestFreeUntil);
   1.165 +        if (bestCode != AnyRegister::Invalid) {
   1.166 +            AnyRegister best = AnyRegister::FromCode(bestCode);
   1.167 +            IonSpew(IonSpew_RegAlloc, "  Decided best register was %s", best.name());
   1.168 +
   1.169 +            // Split when the register is next needed if necessary
   1.170 +            if (bestFreeUntil < current->end()) {
   1.171 +                if (!splitInterval(current, bestFreeUntil))
   1.172 +                    return false;
   1.173 +            }
   1.174 +            if (!assign(LAllocation(best)))
   1.175 +                return false;
   1.176 +
   1.177 +            continue;
   1.178 +        }
   1.179 +
   1.180 +        IonSpew(IonSpew_RegAlloc, " Attempting blocked register allocation");
   1.181 +
   1.182 +        // If we absolutely need a register or our next use is closer than the
   1.183 +        // selected blocking register then we spill the blocker. Otherwise, we
   1.184 +        // spill the current interval.
   1.185 +        CodePosition bestNextUsed;
   1.186 +        bestCode = findBestBlockedRegister(&bestNextUsed);
   1.187 +        if (bestCode != AnyRegister::Invalid &&
   1.188 +            (req->kind() == Requirement::REGISTER || hint->pos() < bestNextUsed))
   1.189 +        {
   1.190 +            AnyRegister best = AnyRegister::FromCode(bestCode);
   1.191 +            IonSpew(IonSpew_RegAlloc, "  Decided best register was %s", best.name());
   1.192 +
   1.193 +            if (!splitBlockingIntervals(LAllocation(best)))
   1.194 +                return false;
   1.195 +            if (!assign(LAllocation(best)))
   1.196 +                return false;
   1.197 +
   1.198 +            continue;
   1.199 +        }
   1.200 +
   1.201 +        IonSpew(IonSpew_RegAlloc, "  No registers available to spill");
   1.202 +        JS_ASSERT(req->kind() == Requirement::NONE);
   1.203 +
   1.204 +        if (!spill())
   1.205 +            return false;
   1.206 +    }
   1.207 +
   1.208 +    validateAllocations();
   1.209 +    validateVirtualRegisters();
   1.210 +
   1.211 +    return true;
   1.212 +}
   1.213 +
   1.214 +/*
   1.215 + * This function iterates over control flow edges in the function and resolves
   1.216 + * conflicts wherein two predecessors of a block have different allocations
   1.217 + * for a virtual register than the block itself. It also turns phis into moves.
   1.218 + *
   1.219 + * The algorithm is based on the one published in "Linear Scan Register
   1.220 + * Allocation on SSA Form" by C. Wimmer et al., for which the full citation
   1.221 + * appears above.
   1.222 + */
   1.223 +bool
   1.224 +LinearScanAllocator::resolveControlFlow()
   1.225 +{
   1.226 +    for (size_t i = 0; i < graph.numBlocks(); i++) {
   1.227 +        if (mir->shouldCancel("LSRA Resolve Control Flow (main loop)"))
   1.228 +            return false;
   1.229 +
   1.230 +        LBlock *successor = graph.getBlock(i);
   1.231 +        MBasicBlock *mSuccessor = successor->mir();
   1.232 +        if (mSuccessor->numPredecessors() < 1)
   1.233 +            continue;
   1.234 +
   1.235 +        // Resolve phis to moves
   1.236 +        for (size_t j = 0; j < successor->numPhis(); j++) {
   1.237 +            LPhi *phi = successor->getPhi(j);
   1.238 +            JS_ASSERT(phi->numDefs() == 1);
   1.239 +            LDefinition *def = phi->getDef(0);
   1.240 +            LinearScanVirtualRegister *vreg = &vregs[def];
   1.241 +            LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
   1.242 +            JS_ASSERT(to);
   1.243 +
   1.244 +            for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
   1.245 +                LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
   1.246 +                JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
   1.247 +
   1.248 +                LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
   1.249 +                LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
   1.250 +                JS_ASSERT(from);
   1.251 +
   1.252 +                if (!moveAtExit(predecessor, from, to, def->type()))
   1.253 +                    return false;
   1.254 +            }
   1.255 +
   1.256 +            if (vreg->mustSpillAtDefinition() && !to->isSpill()) {
   1.257 +                // Make sure this phi is spilled at the loop header.
   1.258 +                LMoveGroup *moves = successor->getEntryMoveGroup(alloc());
   1.259 +                if (!moves->add(to->getAllocation(), vregs[to->vreg()].canonicalSpill(),
   1.260 +                                def->type()))
   1.261 +                    return false;
   1.262 +            }
   1.263 +        }
   1.264 +
   1.265 +        // Resolve split intervals with moves
   1.266 +        BitSet *live = liveIn[mSuccessor->id()];
   1.267 +
   1.268 +        for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
   1.269 +            LinearScanVirtualRegister *vreg = &vregs[*liveRegId];
   1.270 +            LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
   1.271 +            JS_ASSERT(to);
   1.272 +
   1.273 +            for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
   1.274 +                LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
   1.275 +                LiveInterval *from = vregs[*liveRegId].intervalFor(outputOf(predecessor->lastId()));
   1.276 +                JS_ASSERT(from);
   1.277 +
   1.278 +                if (*from->getAllocation() == *to->getAllocation())
   1.279 +                    continue;
   1.280 +
   1.281 +                // If this value is spilled at its definition, other stores
   1.282 +                // are redundant.
   1.283 +                if (vreg->mustSpillAtDefinition() && to->getAllocation()->isStackSlot()) {
   1.284 +                    JS_ASSERT(vreg->canonicalSpill());
   1.285 +                    JS_ASSERT(*vreg->canonicalSpill() == *to->getAllocation());
   1.286 +                    continue;
   1.287 +                }
   1.288 +
   1.289 +                if (mSuccessor->numPredecessors() > 1) {
   1.290 +                    JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
   1.291 +                    if (!moveAtExit(predecessor, from, to, vreg->type()))
   1.292 +                        return false;
   1.293 +                } else {
   1.294 +                    if (!moveAtEntry(successor, from, to, vreg->type()))
   1.295 +                        return false;
   1.296 +                }
   1.297 +            }
   1.298 +        }
   1.299 +    }
   1.300 +
   1.301 +    return true;
   1.302 +}
   1.303 +
   1.304 +bool
   1.305 +LinearScanAllocator::moveInputAlloc(CodePosition pos, LAllocation *from, LAllocation *to,
   1.306 +                                    LDefinition::Type type)
   1.307 +{
   1.308 +    if (*from == *to)
   1.309 +        return true;
   1.310 +    LMoveGroup *moves = getInputMoveGroup(pos);
   1.311 +    return moves->add(from, to, type);
   1.312 +}
   1.313 +
   1.314 +static inline void
   1.315 +SetOsiPointUses(LiveInterval *interval, CodePosition defEnd, const LAllocation &allocation)
   1.316 +{
   1.317 +    // Moves are inserted after OsiPoint instructions. This function sets
   1.318 +    // any OsiPoint uses of this interval to the allocation of the value
   1.319 +    // before the move.
   1.320 +
   1.321 +    JS_ASSERT(interval->index() == 0);
   1.322 +
   1.323 +    for (UsePositionIterator usePos(interval->usesBegin());
   1.324 +         usePos != interval->usesEnd();
   1.325 +         usePos++)
   1.326 +    {
   1.327 +        if (usePos->pos > defEnd)
   1.328 +            break;
   1.329 +        *static_cast<LAllocation *>(usePos->use) = allocation;
   1.330 +    }
   1.331 +}
   1.332 +
   1.333 +/*
   1.334 + * This function takes the allocations assigned to the live intervals and
   1.335 + * erases all virtual registers in the function with the allocations
   1.336 + * corresponding to the appropriate interval.
   1.337 + */
   1.338 +bool
   1.339 +LinearScanAllocator::reifyAllocations()
   1.340 +{
   1.341 +    // Iterate over each interval, ensuring that definitions are visited before uses.
   1.342 +    for (size_t j = 1; j < graph.numVirtualRegisters(); j++) {
   1.343 +        LinearScanVirtualRegister *reg = &vregs[j];
   1.344 +        if (mir->shouldCancel("LSRA Reification (main loop)"))
   1.345 +            return false;
   1.346 +
   1.347 +    for (size_t k = 0; k < reg->numIntervals(); k++) {
   1.348 +        LiveInterval *interval = reg->getInterval(k);
   1.349 +        JS_ASSERT(reg == &vregs[interval->vreg()]);
   1.350 +        if (!interval->numRanges())
   1.351 +            continue;
   1.352 +
   1.353 +        UsePositionIterator usePos(interval->usesBegin());
   1.354 +        for (; usePos != interval->usesEnd(); usePos++) {
   1.355 +            if (usePos->use->isFixedRegister()) {
   1.356 +                LiveInterval *to = fixedIntervals[GetFixedRegister(reg->def(), usePos->use).code()];
   1.357 +
   1.358 +                *static_cast<LAllocation *>(usePos->use) = *to->getAllocation();
   1.359 +                if (!moveInput(usePos->pos, interval, to, reg->type()))
   1.360 +                    return false;
   1.361 +            } else {
   1.362 +                JS_ASSERT(UseCompatibleWith(usePos->use, *interval->getAllocation()));
   1.363 +                *static_cast<LAllocation *>(usePos->use) = *interval->getAllocation();
   1.364 +            }
   1.365 +        }
   1.366 +
   1.367 +        // Erase the def of this interval if it's the first one
   1.368 +        if (interval->index() == 0)
   1.369 +        {
   1.370 +            LDefinition *def = reg->def();
   1.371 +            LAllocation *spillFrom;
   1.372 +
   1.373 +            // Insert the moves after any OsiPoint or Nop instructions
   1.374 +            // following this one. See minimalDefEnd for more info.
   1.375 +            CodePosition defEnd = minimalDefEnd(reg->ins());
   1.376 +
   1.377 +            if (def->policy() == LDefinition::PRESET && def->output()->isRegister()) {
   1.378 +                AnyRegister fixedReg = def->output()->toRegister();
   1.379 +                LiveInterval *from = fixedIntervals[fixedReg.code()];
   1.380 +
   1.381 +                // If we insert the move after an OsiPoint that uses this vreg,
   1.382 +                // it should use the fixed register instead.
   1.383 +                SetOsiPointUses(interval, defEnd, LAllocation(fixedReg));
   1.384 +
   1.385 +                if (!moveAfter(defEnd, from, interval, def->type()))
   1.386 +                    return false;
   1.387 +                spillFrom = from->getAllocation();
   1.388 +            } else {
   1.389 +                if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
   1.390 +                    LAllocation *inputAlloc = reg->ins()->getOperand(def->getReusedInput());
   1.391 +                    LAllocation *origAlloc = LAllocation::New(alloc(), *inputAlloc);
   1.392 +
   1.393 +                    JS_ASSERT(!inputAlloc->isUse());
   1.394 +
   1.395 +                    *inputAlloc = *interval->getAllocation();
   1.396 +                    if (!moveInputAlloc(inputOf(reg->ins()), origAlloc, inputAlloc, def->type()))
   1.397 +                        return false;
   1.398 +                }
   1.399 +
   1.400 +                JS_ASSERT(DefinitionCompatibleWith(reg->ins(), def, *interval->getAllocation()));
   1.401 +                def->setOutput(*interval->getAllocation());
   1.402 +
   1.403 +                spillFrom = interval->getAllocation();
   1.404 +            }
   1.405 +
   1.406 +            if (reg->ins()->recoversInput()) {
   1.407 +                LSnapshot *snapshot = reg->ins()->snapshot();
   1.408 +                for (size_t i = 0; i < snapshot->numEntries(); i++) {
   1.409 +                    LAllocation *entry = snapshot->getEntry(i);
   1.410 +                    if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
   1.411 +                        *entry = *def->output();
   1.412 +                }
   1.413 +            }
   1.414 +
   1.415 +            if (reg->mustSpillAtDefinition() && !reg->ins()->isPhi() &&
   1.416 +                (*reg->canonicalSpill() != *spillFrom))
   1.417 +            {
   1.418 +                // If we move the spill after an OsiPoint, the OsiPoint should
   1.419 +                // use the original location instead.
   1.420 +                SetOsiPointUses(interval, defEnd, *spillFrom);
   1.421 +
   1.422 +                // Insert a spill after this instruction (or after any OsiPoint
   1.423 +                // or Nop instructions). Note that we explicitly ignore phis,
   1.424 +                // which should have been handled in resolveControlFlow().
   1.425 +                LMoveGroup *moves = getMoveGroupAfter(defEnd);
   1.426 +                if (!moves->add(spillFrom, reg->canonicalSpill(), def->type()))
   1.427 +                    return false;
   1.428 +            }
   1.429 +        }
   1.430 +        else if (interval->start() > inputOf(insData[interval->start()].block()->firstId()) &&
   1.431 +                 (!reg->canonicalSpill() ||
   1.432 +                  (reg->canonicalSpill() == interval->getAllocation() &&
   1.433 +                   !reg->mustSpillAtDefinition()) ||
   1.434 +                  *reg->canonicalSpill() != *interval->getAllocation()))
   1.435 +        {
   1.436 +            // If this virtual register has no canonical spill location, this
   1.437 +            // is the first spill to that location, or this is a move to somewhere
   1.438 +            // completely different, we have to emit a move for this interval.
   1.439 +            // Don't do this if the interval starts at the first instruction of the
   1.440 +            // block; this case should have been handled by resolveControlFlow().
   1.441 +            //
   1.442 +            // If the interval starts at the output half of an instruction, we have to
   1.443 +            // emit the move *after* this instruction, to prevent clobbering an input
   1.444 +            // register.
   1.445 +            LiveInterval *prevInterval = reg->getInterval(interval->index() - 1);
   1.446 +            CodePosition start = interval->start();
   1.447 +            InstructionData *data = &insData[start];
   1.448 +
   1.449 +            JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins()));
   1.450 +
   1.451 +            if (start.subpos() == CodePosition::INPUT) {
   1.452 +                if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type()))
   1.453 +                    return false;
   1.454 +            } else {
   1.455 +                if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type()))
   1.456 +                    return false;
   1.457 +            }
   1.458 +
   1.459 +            // Mark this interval's spill position, if needed.
   1.460 +            if (reg->canonicalSpill() == interval->getAllocation() &&
   1.461 +                !reg->mustSpillAtDefinition())
   1.462 +            {
   1.463 +                reg->setSpillPosition(interval->start());
   1.464 +            }
   1.465 +        }
   1.466 +
   1.467 +        addLiveRegistersForInterval(reg, interval);
   1.468 +    }} // Iteration over virtual register intervals.
   1.469 +
   1.470 +    // Set the graph overall stack height
   1.471 +    graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
   1.472 +
   1.473 +    return true;
   1.474 +}
   1.475 +
   1.476 +inline bool
   1.477 +LinearScanAllocator::isSpilledAt(LiveInterval *interval, CodePosition pos)
   1.478 +{
   1.479 +    LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   1.480 +    if (!reg->canonicalSpill() || !reg->canonicalSpill()->isStackSlot())
   1.481 +        return false;
   1.482 +
   1.483 +    if (reg->mustSpillAtDefinition()) {
   1.484 +        JS_ASSERT(reg->spillPosition() <= pos);
   1.485 +        return true;
   1.486 +    }
   1.487 +
   1.488 +    return interval->getAllocation() == reg->canonicalSpill();
   1.489 +}
   1.490 +
   1.491 +bool
   1.492 +LinearScanAllocator::populateSafepoints()
   1.493 +{
   1.494 +    size_t firstSafepoint = 0;
   1.495 +
   1.496 +    for (uint32_t i = 0; i < vregs.numVirtualRegisters(); i++) {
   1.497 +        LinearScanVirtualRegister *reg = &vregs[i];
   1.498 +
   1.499 +        if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
   1.500 +            continue;
   1.501 +
   1.502 +        firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
   1.503 +        if (firstSafepoint >= graph.numSafepoints())
   1.504 +            break;
   1.505 +
   1.506 +        // Find the furthest endpoint.
   1.507 +        size_t lastInterval = reg->numIntervals() - 1;
   1.508 +        CodePosition end = reg->getInterval(lastInterval)->end();
   1.509 +
   1.510 +        for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
   1.511 +            LInstruction *ins = graph.getSafepoint(j);
   1.512 +
   1.513 +            // Stop processing safepoints if we know we're out of this virtual
   1.514 +            // register's range.
   1.515 +            if (end < inputOf(ins))
   1.516 +                break;
   1.517 +
   1.518 +            // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
   1.519 +            // is not used with gcthings or nunboxes, or we would have to add the input reg
   1.520 +            // to this safepoint.
   1.521 +            if (ins == reg->ins() && !reg->isTemp()) {
   1.522 +                DebugOnly<LDefinition*> def = reg->def();
   1.523 +                JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
   1.524 +                             def->type() == LDefinition::GENERAL ||
   1.525 +                             def->type() == LDefinition::INT32 ||
   1.526 +                             def->type() == LDefinition::FLOAT32 ||
   1.527 +                             def->type() == LDefinition::DOUBLE);
   1.528 +                continue;
   1.529 +            }
   1.530 +
   1.531 +            LSafepoint *safepoint = ins->safepoint();
   1.532 +
   1.533 +            if (IsSlotsOrElements(reg)) {
   1.534 +                LiveInterval *interval = reg->intervalFor(inputOf(ins));
   1.535 +                if (!interval)
   1.536 +                    continue;
   1.537 +
   1.538 +                LAllocation *a = interval->getAllocation();
   1.539 +                if (a->isGeneralReg() && !ins->isCall())
   1.540 +                    safepoint->addSlotsOrElementsRegister(a->toGeneralReg()->reg());
   1.541 +
   1.542 +                if (isSpilledAt(interval, inputOf(ins))) {
   1.543 +                    if (!safepoint->addSlotsOrElementsSlot(reg->canonicalSpillSlot()))
   1.544 +                        return false;
   1.545 +                }
   1.546 +            } else if (!IsNunbox(reg)) {
   1.547 +                JS_ASSERT(IsTraceable(reg));
   1.548 +
   1.549 +                LiveInterval *interval = reg->intervalFor(inputOf(ins));
   1.550 +                if (!interval)
   1.551 +                    continue;
   1.552 +
   1.553 +                LAllocation *a = interval->getAllocation();
   1.554 +                if (a->isGeneralReg() && !ins->isCall()) {
   1.555 +#ifdef JS_PUNBOX64
   1.556 +                    if (reg->type() == LDefinition::BOX) {
   1.557 +                        safepoint->addValueRegister(a->toGeneralReg()->reg());
   1.558 +                    } else
   1.559 +#endif
   1.560 +                    {
   1.561 +                        safepoint->addGcRegister(a->toGeneralReg()->reg());
   1.562 +                    }
   1.563 +                }
   1.564 +
   1.565 +                if (isSpilledAt(interval, inputOf(ins))) {
   1.566 +#ifdef JS_PUNBOX64
   1.567 +                    if (reg->type() == LDefinition::BOX) {
   1.568 +                        if (!safepoint->addValueSlot(reg->canonicalSpillSlot()))
   1.569 +                            return false;
   1.570 +                    } else
   1.571 +#endif
   1.572 +                    {
   1.573 +                        if (!safepoint->addGcSlot(reg->canonicalSpillSlot()))
   1.574 +                            return false;
   1.575 +                    }
   1.576 +                }
   1.577 +#ifdef JS_NUNBOX32
   1.578 +            } else {
   1.579 +                LinearScanVirtualRegister *other = otherHalfOfNunbox(reg);
   1.580 +                LinearScanVirtualRegister *type = (reg->type() == LDefinition::TYPE) ? reg : other;
   1.581 +                LinearScanVirtualRegister *payload = (reg->type() == LDefinition::PAYLOAD) ? reg : other;
   1.582 +                LiveInterval *typeInterval = type->intervalFor(inputOf(ins));
   1.583 +                LiveInterval *payloadInterval = payload->intervalFor(inputOf(ins));
   1.584 +
   1.585 +                if (!typeInterval && !payloadInterval)
   1.586 +                    continue;
   1.587 +
   1.588 +                LAllocation *typeAlloc = typeInterval->getAllocation();
   1.589 +                LAllocation *payloadAlloc = payloadInterval->getAllocation();
   1.590 +
   1.591 +                // If the payload is an argument, we'll scan that explicitly as
   1.592 +                // part of the frame. It is therefore safe to not add any
   1.593 +                // safepoint entry, as long as the vreg does not have a stack
   1.594 +                // slot as canonical spill slot.
   1.595 +                if (payloadAlloc->isArgument() &&
   1.596 +                    (!payload->canonicalSpill() || payload->canonicalSpill() == payloadAlloc))
   1.597 +                {
   1.598 +                    continue;
   1.599 +                }
   1.600 +
   1.601 +                if (isSpilledAt(typeInterval, inputOf(ins)) &&
   1.602 +                    isSpilledAt(payloadInterval, inputOf(ins)))
   1.603 +                {
   1.604 +                    // These two components of the Value are spilled
   1.605 +                    // contiguously, so simply keep track of the base slot.
   1.606 +                    uint32_t payloadSlot = payload->canonicalSpillSlot();
   1.607 +                    uint32_t slot = BaseOfNunboxSlot(LDefinition::PAYLOAD, payloadSlot);
   1.608 +                    if (!safepoint->addValueSlot(slot))
   1.609 +                        return false;
   1.610 +                }
   1.611 +
   1.612 +                if (!ins->isCall() &&
   1.613 +                    (!isSpilledAt(typeInterval, inputOf(ins)) || payloadAlloc->isGeneralReg()))
   1.614 +                {
   1.615 +                    // Either the payload is on the stack but the type is
   1.616 +                    // in a register, or the payload is in a register. In
   1.617 +                    // both cases, we don't have a contiguous spill so we
   1.618 +                    // add a torn entry.
   1.619 +                    if (!safepoint->addNunboxParts(*typeAlloc, *payloadAlloc))
   1.620 +                        return false;
   1.621 +
   1.622 +                    // If the nunbox is stored in multiple places, we need to
   1.623 +                    // trace all of them to allow the GC to relocate objects.
   1.624 +                    if (payloadAlloc->isGeneralReg() && isSpilledAt(payloadInterval, inputOf(ins))) {
   1.625 +                        if (!safepoint->addNunboxParts(*typeAlloc, *payload->canonicalSpill()))
   1.626 +                            return false;
   1.627 +                    }
   1.628 +                }
   1.629 +#endif
   1.630 +            }
   1.631 +        }
   1.632 +
   1.633 +#ifdef JS_NUNBOX32
   1.634 +        if (IsNunbox(reg)) {
   1.635 +            // Skip past the next half of this nunbox so we don't track the
   1.636 +            // same slot twice.
   1.637 +            JS_ASSERT(&vregs[reg->def()->virtualRegister() + 1] == otherHalfOfNunbox(reg));
   1.638 +            i++;
   1.639 +        }
   1.640 +#endif
   1.641 +    }
   1.642 +
   1.643 +    return true;
   1.644 +}
   1.645 +
   1.646 +/*
   1.647 + * Split the given interval at the given position, and add the created
   1.648 + * interval to the unhandled queue.
   1.649 + */
   1.650 +bool
   1.651 +LinearScanAllocator::splitInterval(LiveInterval *interval, CodePosition pos)
   1.652 +{
   1.653 +    // Make sure we're actually splitting this interval, not some other
   1.654 +    // interval in the same virtual register.
   1.655 +    JS_ASSERT(interval->start() < pos && pos < interval->end());
   1.656 +
   1.657 +    LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   1.658 +
   1.659 +    // "Bogus" intervals cannot be split.
   1.660 +    JS_ASSERT(reg);
   1.661 +
   1.662 +    // Do the split.
   1.663 +    LiveInterval *newInterval = LiveInterval::New(alloc(), interval->vreg(), interval->index() + 1);
   1.664 +    if (!interval->splitFrom(pos, newInterval))
   1.665 +        return false;
   1.666 +
   1.667 +    JS_ASSERT(interval->numRanges() > 0);
   1.668 +    JS_ASSERT(newInterval->numRanges() > 0);
   1.669 +
   1.670 +    if (!reg->addInterval(newInterval))
   1.671 +        return false;
   1.672 +
   1.673 +    IonSpew(IonSpew_RegAlloc, "  Split interval to %u = [%u, %u]/[%u, %u]",
   1.674 +            interval->vreg(), interval->start().pos(),
   1.675 +            interval->end().pos(), newInterval->start().pos(),
   1.676 +            newInterval->end().pos());
   1.677 +
   1.678 +    // We always want to enqueue the resulting split. We always split
   1.679 +    // forward, and we never want to handle something forward of our
   1.680 +    // current position.
   1.681 +    setIntervalRequirement(newInterval);
   1.682 +
   1.683 +    // splitInterval() is usually called to split the node that has just been
   1.684 +    // popped from the unhandled queue. Therefore the split will likely be
   1.685 +    // closer to the lower start positions in the queue.
   1.686 +    unhandled.enqueueBackward(newInterval);
   1.687 +
   1.688 +    return true;
   1.689 +}
   1.690 +
   1.691 +bool
   1.692 +LinearScanAllocator::splitBlockingIntervals(LAllocation allocation)
   1.693 +{
   1.694 +    JS_ASSERT(allocation.isRegister());
   1.695 +
   1.696 +    // Split current before the next fixed use.
   1.697 +    LiveInterval *fixed = fixedIntervals[allocation.toRegister().code()];
   1.698 +    if (fixed->numRanges() > 0) {
   1.699 +        CodePosition fixedPos = current->intersect(fixed);
   1.700 +        if (fixedPos != CodePosition::MIN) {
   1.701 +            JS_ASSERT(fixedPos > current->start());
   1.702 +            JS_ASSERT(fixedPos < current->end());
   1.703 +            if (!splitInterval(current, fixedPos))
   1.704 +                return false;
   1.705 +        }
   1.706 +    }
   1.707 +
   1.708 +    // Split the blocking interval if it exists.
   1.709 +    for (IntervalIterator i(active.begin()); i != active.end(); i++) {
   1.710 +        if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
   1.711 +            IonSpew(IonSpew_RegAlloc, " Splitting active interval %u = [%u, %u]",
   1.712 +                    vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos());
   1.713 +
   1.714 +            JS_ASSERT(i->start() != current->start());
   1.715 +            JS_ASSERT(i->covers(current->start()));
   1.716 +            JS_ASSERT(i->start() != current->start());
   1.717 +
   1.718 +            if (!splitInterval(*i, current->start()))
   1.719 +                return false;
   1.720 +
   1.721 +            LiveInterval *it = *i;
   1.722 +            active.removeAt(i);
   1.723 +            finishInterval(it);
   1.724 +            break;
   1.725 +        }
   1.726 +    }
   1.727 +
   1.728 +    // Split any inactive intervals at the next live point.
   1.729 +    for (IntervalIterator i(inactive.begin()); i != inactive.end(); ) {
   1.730 +        if (i->getAllocation()->isRegister() && *i->getAllocation() == allocation) {
   1.731 +            IonSpew(IonSpew_RegAlloc, " Splitting inactive interval %u = [%u, %u]",
   1.732 +                    vregs[i->vreg()].ins()->id(), i->start().pos(), i->end().pos());
   1.733 +
   1.734 +            LiveInterval *it = *i;
   1.735 +            CodePosition nextActive = it->nextCoveredAfter(current->start());
   1.736 +            JS_ASSERT(nextActive != CodePosition::MIN);
   1.737 +
   1.738 +            if (!splitInterval(it, nextActive))
   1.739 +                return false;
   1.740 +
   1.741 +            i = inactive.removeAt(i);
   1.742 +            finishInterval(it);
   1.743 +        } else {
   1.744 +            i++;
   1.745 +        }
   1.746 +    }
   1.747 +
   1.748 +    return true;
   1.749 +}
   1.750 +
   1.751 +/*
   1.752 + * Assign the current interval the given allocation, splitting conflicting
   1.753 + * intervals as necessary to make the allocation stick.
   1.754 + */
   1.755 +bool
   1.756 +LinearScanAllocator::assign(LAllocation allocation)
   1.757 +{
   1.758 +    if (allocation.isRegister())
   1.759 +        IonSpew(IonSpew_RegAlloc, "Assigning register %s", allocation.toRegister().name());
   1.760 +    current->setAllocation(allocation);
   1.761 +
   1.762 +    // Split this interval at the next incompatible one
   1.763 +    LinearScanVirtualRegister *reg = &vregs[current->vreg()];
   1.764 +    if (reg) {
   1.765 +        CodePosition splitPos = current->firstIncompatibleUse(allocation);
   1.766 +        if (splitPos != CodePosition::MAX) {
   1.767 +            // Split before the incompatible use. This ensures the use position is
   1.768 +            // part of the second half of the interval and guarantees we never split
   1.769 +            // at the end (zero-length intervals are invalid).
   1.770 +            splitPos = splitPos.previous();
   1.771 +            JS_ASSERT (splitPos < current->end());
   1.772 +            if (!splitInterval(current, splitPos))
   1.773 +                return false;
   1.774 +        }
   1.775 +    }
   1.776 +
   1.777 +    bool useAsCanonicalSpillSlot = allocation.isMemory();
   1.778 +    // Only canonically spill argument values when frame arguments are not
   1.779 +    // modified in the body.
   1.780 +    if (mir->modifiesFrameArguments())
   1.781 +        useAsCanonicalSpillSlot = allocation.isStackSlot();
   1.782 +
   1.783 +    if (reg && useAsCanonicalSpillSlot) {
   1.784 +        if (reg->canonicalSpill()) {
   1.785 +            JS_ASSERT(allocation == *reg->canonicalSpill());
   1.786 +
   1.787 +            // This interval is spilled more than once, so just always spill
   1.788 +            // it at its definition.
   1.789 +            reg->setSpillAtDefinition(outputOf(reg->ins()));
   1.790 +        } else {
   1.791 +            reg->setCanonicalSpill(current->getAllocation());
   1.792 +
   1.793 +            // If this spill is inside a loop, and the definition is outside
   1.794 +            // the loop, instead move the spill to outside the loop.
   1.795 +            InstructionData *other = &insData[current->start()];
   1.796 +            uint32_t loopDepthAtDef = reg->block()->mir()->loopDepth();
   1.797 +            uint32_t loopDepthAtSpill = other->block()->mir()->loopDepth();
   1.798 +            if (loopDepthAtSpill > loopDepthAtDef)
   1.799 +                reg->setSpillAtDefinition(outputOf(reg->ins()));
   1.800 +        }
   1.801 +    }
   1.802 +
   1.803 +    active.pushBack(current);
   1.804 +
   1.805 +    return true;
   1.806 +}
   1.807 +
   1.808 +uint32_t
   1.809 +LinearScanAllocator::allocateSlotFor(const LiveInterval *interval)
   1.810 +{
   1.811 +    LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   1.812 +
   1.813 +    SlotList *freed;
   1.814 +    if (reg->type() == LDefinition::DOUBLE)
   1.815 +        freed = &finishedDoubleSlots_;
   1.816 +#if JS_BITS_PER_WORD == 64
   1.817 +    else if (reg->type() == LDefinition::GENERAL ||
   1.818 +             reg->type() == LDefinition::OBJECT ||
   1.819 +             reg->type() == LDefinition::SLOTS)
   1.820 +        freed = &finishedDoubleSlots_;
   1.821 +#endif
   1.822 +#ifdef JS_PUNBOX64
   1.823 +    else if (reg->type() == LDefinition::BOX)
   1.824 +        freed = &finishedDoubleSlots_;
   1.825 +#endif
   1.826 +#ifdef JS_NUNBOX32
   1.827 +    else if (IsNunbox(reg))
   1.828 +        freed = &finishedNunboxSlots_;
   1.829 +#endif
   1.830 +    else
   1.831 +        freed = &finishedSlots_;
   1.832 +
   1.833 +    if (!freed->empty()) {
   1.834 +        LiveInterval *maybeDead = freed->back();
   1.835 +        if (maybeDead->end() < reg->getInterval(0)->start()) {
   1.836 +            // This spill slot is dead before the start of the interval trying
   1.837 +            // to reuse the slot, so reuse is safe. Otherwise, we could
   1.838 +            // encounter a situation where a stack slot is allocated and freed
   1.839 +            // inside a loop, but the same allocation is then used to hold a
   1.840 +            // loop-carried value.
   1.841 +            //
   1.842 +            // Note that we don't reuse the dead slot if its interval ends right
   1.843 +            // before the current interval, to avoid conflicting slot -> reg and
   1.844 +            // reg -> slot moves in the same movegroup.
   1.845 +            freed->popBack();
   1.846 +            LinearScanVirtualRegister *dead = &vregs[maybeDead->vreg()];
   1.847 +#ifdef JS_NUNBOX32
   1.848 +            if (IsNunbox(dead))
   1.849 +                return BaseOfNunboxSlot(dead->type(), dead->canonicalSpillSlot());
   1.850 +#endif
   1.851 +            return dead->canonicalSpillSlot();
   1.852 +        }
   1.853 +    }
   1.854 +
   1.855 +    return stackSlotAllocator.allocateSlot(reg->type());
   1.856 +}
   1.857 +
   1.858 +bool
   1.859 +LinearScanAllocator::spill()
   1.860 +{
   1.861 +    IonSpew(IonSpew_RegAlloc, "  Decided to spill current interval");
   1.862 +
   1.863 +    // We can't spill bogus intervals
   1.864 +    JS_ASSERT(current->hasVreg());
   1.865 +
   1.866 +    LinearScanVirtualRegister *reg = &vregs[current->vreg()];
   1.867 +
   1.868 +    if (reg->canonicalSpill()) {
   1.869 +        IonSpew(IonSpew_RegAlloc, "  Allocating canonical spill location");
   1.870 +
   1.871 +        return assign(*reg->canonicalSpill());
   1.872 +    }
   1.873 +
   1.874 +    uint32_t stackSlot;
   1.875 +#if defined JS_NUNBOX32
   1.876 +    if (IsNunbox(reg)) {
   1.877 +        LinearScanVirtualRegister *other = otherHalfOfNunbox(reg);
   1.878 +
   1.879 +        if (other->canonicalSpill()) {
   1.880 +            // The other half of this nunbox already has a spill slot. To
   1.881 +            // ensure the Value is spilled contiguously, use the other half (it
   1.882 +            // was allocated double-wide).
   1.883 +            JS_ASSERT(other->canonicalSpill()->isStackSlot());
   1.884 +            stackSlot = BaseOfNunboxSlot(other->type(), other->canonicalSpillSlot());
   1.885 +        } else {
   1.886 +            // No canonical spill location exists for this nunbox yet. Allocate
   1.887 +            // one.
   1.888 +            stackSlot = allocateSlotFor(current);
   1.889 +        }
   1.890 +        stackSlot -= OffsetOfNunboxSlot(reg->type());
   1.891 +    } else
   1.892 +#endif
   1.893 +    {
   1.894 +        stackSlot = allocateSlotFor(current);
   1.895 +    }
   1.896 +    JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
   1.897 +
   1.898 +    return assign(LStackSlot(stackSlot));
   1.899 +}
   1.900 +
   1.901 +void
   1.902 +LinearScanAllocator::freeAllocation(LiveInterval *interval, LAllocation *alloc)
   1.903 +{
   1.904 +    LinearScanVirtualRegister *mine = &vregs[interval->vreg()];
   1.905 +    if (!IsNunbox(mine)) {
   1.906 +        if (alloc->isStackSlot()) {
   1.907 +            if (mine->type() == LDefinition::DOUBLE)
   1.908 +                finishedDoubleSlots_.append(interval);
   1.909 +#if JS_BITS_PER_WORD == 64
   1.910 +            else if (mine->type() == LDefinition::GENERAL ||
   1.911 +                     mine->type() == LDefinition::OBJECT ||
   1.912 +                     mine->type() == LDefinition::SLOTS)
   1.913 +                finishedDoubleSlots_.append(interval);
   1.914 +#endif
   1.915 +#ifdef JS_PUNBOX64
   1.916 +            else if (mine->type() == LDefinition::BOX)
   1.917 +                finishedDoubleSlots_.append(interval);
   1.918 +#endif
   1.919 +            else
   1.920 +                finishedSlots_.append(interval);
   1.921 +        }
   1.922 +        return;
   1.923 +    }
   1.924 +
   1.925 +#ifdef JS_NUNBOX32
   1.926 +    // Special handling for nunboxes. We can only free the stack slot once we
   1.927 +    // know both intervals have been finished.
   1.928 +    LinearScanVirtualRegister *other = otherHalfOfNunbox(mine);
   1.929 +    if (other->finished()) {
   1.930 +        if (!mine->canonicalSpill() && !other->canonicalSpill())
   1.931 +            return;
   1.932 +
   1.933 +        JS_ASSERT_IF(mine->canonicalSpill() && other->canonicalSpill(),
   1.934 +                     mine->canonicalSpill()->isStackSlot() == other->canonicalSpill()->isStackSlot());
   1.935 +
   1.936 +        LinearScanVirtualRegister *candidate = mine->canonicalSpill() ? mine : other;
   1.937 +        if (!candidate->canonicalSpill()->isStackSlot())
   1.938 +            return;
   1.939 +
   1.940 +        finishedNunboxSlots_.append(candidate->lastInterval());
   1.941 +    }
   1.942 +#endif
   1.943 +}
   1.944 +
   1.945 +void
   1.946 +LinearScanAllocator::finishInterval(LiveInterval *interval)
   1.947 +{
   1.948 +    LAllocation *alloc = interval->getAllocation();
   1.949 +    JS_ASSERT(!alloc->isUse());
   1.950 +
   1.951 +    // Toss out the bogus interval now that it's run its course
   1.952 +    if (!interval->hasVreg())
   1.953 +        return;
   1.954 +
   1.955 +    LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
   1.956 +
   1.957 +    // All spills should be equal to the canonical spill location.
   1.958 +    JS_ASSERT_IF(alloc->isStackSlot(), *alloc == *reg->canonicalSpill());
   1.959 +
   1.960 +    bool lastInterval = interval->index() == (reg->numIntervals() - 1);
   1.961 +    if (lastInterval) {
   1.962 +        freeAllocation(interval, alloc);
   1.963 +        reg->setFinished();
   1.964 +    }
   1.965 +
   1.966 +    handled.pushBack(interval);
   1.967 +}
   1.968 +
   1.969 +/*
   1.970 + * This function locates a register which may be assigned by the register
   1.971 + * and is not assigned to any active interval. The register which is free
   1.972 + * for the longest period of time is then returned.
   1.973 + */
   1.974 +AnyRegister::Code
   1.975 +LinearScanAllocator::findBestFreeRegister(CodePosition *freeUntil)
   1.976 +{
   1.977 +    IonSpew(IonSpew_RegAlloc, "  Computing freeUntilPos");
   1.978 +
   1.979 +    // Compute free-until positions for all registers
   1.980 +    CodePosition freeUntilPos[AnyRegister::Total];
   1.981 +    bool needFloat = vregs[current->vreg()].isFloatReg();
   1.982 +    for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) {
   1.983 +        AnyRegister reg = regs.takeAny(needFloat);
   1.984 +        freeUntilPos[reg.code()] = CodePosition::MAX;
   1.985 +    }
   1.986 +    for (IntervalIterator i(active.begin()); i != active.end(); i++) {
   1.987 +        LAllocation *alloc = i->getAllocation();
   1.988 +        if (alloc->isRegister(needFloat)) {
   1.989 +            AnyRegister reg = alloc->toRegister();
   1.990 +            IonSpew(IonSpew_RegAlloc, "   Register %s not free", reg.name());
   1.991 +            freeUntilPos[reg.code()] = CodePosition::MIN;
   1.992 +        }
   1.993 +    }
   1.994 +    for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
   1.995 +        LAllocation *alloc = i->getAllocation();
   1.996 +        if (alloc->isRegister(needFloat)) {
   1.997 +            AnyRegister reg = alloc->toRegister();
   1.998 +            CodePosition pos = current->intersect(*i);
   1.999 +            if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
  1.1000 +                freeUntilPos[reg.code()] = pos;
  1.1001 +                IonSpew(IonSpew_RegAlloc, "   Register %s free until %u", reg.name(), pos.pos());
  1.1002 +            }
  1.1003 +        }
  1.1004 +    }
  1.1005 +
  1.1006 +    CodePosition fixedPos = fixedIntervalsUnion->intersect(current);
  1.1007 +    if (fixedPos != CodePosition::MIN) {
  1.1008 +        for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) {
  1.1009 +            AnyRegister reg = i->getAllocation()->toRegister();
  1.1010 +            if (freeUntilPos[reg.code()] != CodePosition::MIN) {
  1.1011 +                CodePosition pos = current->intersect(*i);
  1.1012 +                if (pos != CodePosition::MIN && pos < freeUntilPos[reg.code()]) {
  1.1013 +                    freeUntilPos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos;
  1.1014 +                    IonSpew(IonSpew_RegAlloc, "   Register %s free until %u", reg.name(), pos.pos());
  1.1015 +                }
  1.1016 +            }
  1.1017 +        }
  1.1018 +    }
  1.1019 +
  1.1020 +    AnyRegister::Code bestCode = AnyRegister::Invalid;
  1.1021 +    if (current->index()) {
  1.1022 +        // As an optimization, use the allocation from the previous interval if
  1.1023 +        // it is available.
  1.1024 +        LiveInterval *previous = vregs[current->vreg()].getInterval(current->index() - 1);
  1.1025 +        LAllocation *alloc = previous->getAllocation();
  1.1026 +        if (alloc->isRegister(needFloat)) {
  1.1027 +            AnyRegister prevReg = alloc->toRegister();
  1.1028 +            if (freeUntilPos[prevReg.code()] != CodePosition::MIN)
  1.1029 +                bestCode = prevReg.code();
  1.1030 +        }
  1.1031 +    }
  1.1032 +
  1.1033 +    // Assign the register suggested by the hint if it's free.
  1.1034 +    const Requirement *hint = current->hint();
  1.1035 +    if (hint->kind() == Requirement::FIXED && hint->allocation().isRegister()) {
  1.1036 +        AnyRegister hintReg = hint->allocation().toRegister();
  1.1037 +        if (freeUntilPos[hintReg.code()] > hint->pos())
  1.1038 +            bestCode = hintReg.code();
  1.1039 +    } else if (hint->kind() == Requirement::SAME_AS_OTHER) {
  1.1040 +        LiveInterval *other = vregs[hint->virtualRegister()].intervalFor(hint->pos());
  1.1041 +        if (other && other->getAllocation()->isRegister()) {
  1.1042 +            AnyRegister hintReg = other->getAllocation()->toRegister();
  1.1043 +            if (freeUntilPos[hintReg.code()] > hint->pos())
  1.1044 +                bestCode = hintReg.code();
  1.1045 +        }
  1.1046 +    }
  1.1047 +
  1.1048 +    if (bestCode == AnyRegister::Invalid) {
  1.1049 +        // If all else fails, search freeUntilPos for largest value
  1.1050 +        for (uint32_t i = 0; i < AnyRegister::Total; i++) {
  1.1051 +            if (freeUntilPos[i] == CodePosition::MIN)
  1.1052 +                continue;
  1.1053 +            if (bestCode == AnyRegister::Invalid || freeUntilPos[i] > freeUntilPos[bestCode])
  1.1054 +                bestCode = AnyRegister::Code(i);
  1.1055 +        }
  1.1056 +    }
  1.1057 +
  1.1058 +    if (bestCode != AnyRegister::Invalid)
  1.1059 +        *freeUntil = freeUntilPos[bestCode];
  1.1060 +    return bestCode;
  1.1061 +}
  1.1062 +
  1.1063 +/*
  1.1064 + * This function locates a register which is assigned to an active interval,
  1.1065 + * and returns the one with the furthest away next use. As a side effect,
  1.1066 + * the nextUsePos array is updated with the next use position of all active
  1.1067 + * intervals for use elsewhere in the algorithm.
  1.1068 + */
  1.1069 +AnyRegister::Code
  1.1070 +LinearScanAllocator::findBestBlockedRegister(CodePosition *nextUsed)
  1.1071 +{
  1.1072 +    IonSpew(IonSpew_RegAlloc, "  Computing nextUsePos");
  1.1073 +
  1.1074 +    // Compute next-used positions for all registers
  1.1075 +    CodePosition nextUsePos[AnyRegister::Total];
  1.1076 +    bool needFloat = vregs[current->vreg()].isFloatReg();
  1.1077 +    for (RegisterSet regs(allRegisters_); !regs.empty(needFloat); ) {
  1.1078 +        AnyRegister reg = regs.takeAny(needFloat);
  1.1079 +        nextUsePos[reg.code()] = CodePosition::MAX;
  1.1080 +    }
  1.1081 +    for (IntervalIterator i(active.begin()); i != active.end(); i++) {
  1.1082 +        LAllocation *alloc = i->getAllocation();
  1.1083 +        if (alloc->isRegister(needFloat)) {
  1.1084 +            AnyRegister reg = alloc->toRegister();
  1.1085 +            if (i->start() == current->start()) {
  1.1086 +                nextUsePos[reg.code()] = CodePosition::MIN;
  1.1087 +                IonSpew(IonSpew_RegAlloc, "   Disqualifying %s due to recency", reg.name());
  1.1088 +            } else if (nextUsePos[reg.code()] != CodePosition::MIN) {
  1.1089 +                nextUsePos[reg.code()] = i->nextUsePosAfter(current->start());
  1.1090 +                IonSpew(IonSpew_RegAlloc, "   Register %s next used %u", reg.name(),
  1.1091 +                        nextUsePos[reg.code()].pos());
  1.1092 +            }
  1.1093 +        }
  1.1094 +    }
  1.1095 +    for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
  1.1096 +        LAllocation *alloc = i->getAllocation();
  1.1097 +        if (alloc->isRegister(needFloat)) {
  1.1098 +            AnyRegister reg = alloc->toRegister();
  1.1099 +            CodePosition pos = i->nextUsePosAfter(current->start());
  1.1100 +            if (pos < nextUsePos[reg.code()]) {
  1.1101 +                nextUsePos[reg.code()] = pos;
  1.1102 +                IonSpew(IonSpew_RegAlloc, "   Register %s next used %u", reg.name(), pos.pos());
  1.1103 +            }
  1.1104 +        }
  1.1105 +    }
  1.1106 +
  1.1107 +    CodePosition fixedPos = fixedIntervalsUnion->intersect(current);
  1.1108 +    if (fixedPos != CodePosition::MIN) {
  1.1109 +        for (IntervalIterator i(fixed.begin()); i != fixed.end(); i++) {
  1.1110 +            AnyRegister reg = i->getAllocation()->toRegister();
  1.1111 +            if (nextUsePos[reg.code()] != CodePosition::MIN) {
  1.1112 +                CodePosition pos = i->intersect(current);
  1.1113 +                if (pos != CodePosition::MIN && pos < nextUsePos[reg.code()]) {
  1.1114 +                    nextUsePos[reg.code()] = (pos == current->start()) ? CodePosition::MIN : pos;
  1.1115 +                    IonSpew(IonSpew_RegAlloc, "   Register %s next used %u (fixed)", reg.name(), pos.pos());
  1.1116 +                }
  1.1117 +            }
  1.1118 +        }
  1.1119 +    }
  1.1120 +
  1.1121 +    // Search nextUsePos for largest value
  1.1122 +    AnyRegister::Code bestCode = AnyRegister::Invalid;
  1.1123 +    for (size_t i = 0; i < AnyRegister::Total; i++) {
  1.1124 +        if (nextUsePos[i] == CodePosition::MIN)
  1.1125 +            continue;
  1.1126 +        if (bestCode == AnyRegister::Invalid || nextUsePos[i] > nextUsePos[bestCode])
  1.1127 +            bestCode = AnyRegister::Code(i);
  1.1128 +    }
  1.1129 +
  1.1130 +    if (bestCode != AnyRegister::Invalid)
  1.1131 +        *nextUsed = nextUsePos[bestCode];
  1.1132 +    return bestCode;
  1.1133 +}
  1.1134 +
  1.1135 +/*
  1.1136 + * Two intervals can coexist if any of the following conditions is met:
  1.1137 + *
  1.1138 + *   - The intervals do not intersect.
  1.1139 + *   - The intervals have different allocations.
  1.1140 + *   - The intervals have the same allocation, but the allocation may be
  1.1141 + *     shared.
  1.1142 + *
  1.1143 + * Intuitively, it is a bug if any allocated intervals exist which can not
  1.1144 + * coexist.
  1.1145 + */
  1.1146 +bool
  1.1147 +LinearScanAllocator::canCoexist(LiveInterval *a, LiveInterval *b)
  1.1148 +{
  1.1149 +    LAllocation *aa = a->getAllocation();
  1.1150 +    LAllocation *ba = b->getAllocation();
  1.1151 +    if (aa->isRegister() && ba->isRegister() && aa->toRegister() == ba->toRegister())
  1.1152 +        return a->intersect(b) == CodePosition::MIN;
  1.1153 +    return true;
  1.1154 +}
  1.1155 +
  1.1156 +#ifdef DEBUG
  1.1157 +/*
  1.1158 + * Ensure intervals appear in exactly the appropriate one of {active,inactive,
  1.1159 + * handled}, and that active and inactive intervals do not conflict. Handled
  1.1160 + * intervals are checked for conflicts in validateAllocations for performance
  1.1161 + * reasons.
  1.1162 + */
  1.1163 +void
  1.1164 +LinearScanAllocator::validateIntervals()
  1.1165 +{
  1.1166 +    if (!js_JitOptions.checkGraphConsistency)
  1.1167 +        return;
  1.1168 +
  1.1169 +    for (IntervalIterator i(active.begin()); i != active.end(); i++) {
  1.1170 +        JS_ASSERT(i->numRanges() > 0);
  1.1171 +        JS_ASSERT(i->covers(current->start()));
  1.1172 +
  1.1173 +        for (IntervalIterator j(active.begin()); j != i; j++)
  1.1174 +            JS_ASSERT(canCoexist(*i, *j));
  1.1175 +    }
  1.1176 +
  1.1177 +    for (IntervalIterator i(inactive.begin()); i != inactive.end(); i++) {
  1.1178 +        JS_ASSERT(i->numRanges() > 0);
  1.1179 +        JS_ASSERT(i->end() >= current->start());
  1.1180 +        JS_ASSERT(!i->covers(current->start()));
  1.1181 +
  1.1182 +        for (IntervalIterator j(active.begin()); j != active.end(); j++) {
  1.1183 +            JS_ASSERT(*i != *j);
  1.1184 +            JS_ASSERT(canCoexist(*i, *j));
  1.1185 +        }
  1.1186 +        for (IntervalIterator j(inactive.begin()); j != i; j++)
  1.1187 +            JS_ASSERT(canCoexist(*i, *j));
  1.1188 +    }
  1.1189 +
  1.1190 +    for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
  1.1191 +        JS_ASSERT(!i->getAllocation()->isUse());
  1.1192 +        JS_ASSERT(i->numRanges() > 0);
  1.1193 +        if (i->getAllocation()->isRegister()) {
  1.1194 +            JS_ASSERT(i->end() <= current->start());
  1.1195 +            JS_ASSERT(!i->covers(current->start()));
  1.1196 +        }
  1.1197 +
  1.1198 +        for (IntervalIterator j(active.begin()); j != active.end(); j++)
  1.1199 +            JS_ASSERT(*i != *j);
  1.1200 +        for (IntervalIterator j(inactive.begin()); j != inactive.end(); j++)
  1.1201 +            JS_ASSERT(*i != *j);
  1.1202 +    }
  1.1203 +}
  1.1204 +
  1.1205 +/*
  1.1206 + * This function performs a nice, expensive check that all intervals
  1.1207 + * in the function can coexist with every other interval.
  1.1208 + */
  1.1209 +void
  1.1210 +LinearScanAllocator::validateAllocations()
  1.1211 +{
  1.1212 +    if (!js_JitOptions.checkGraphConsistency)
  1.1213 +        return;
  1.1214 +
  1.1215 +    for (IntervalIterator i(handled.begin()); i != handled.end(); i++) {
  1.1216 +        for (IntervalIterator j(handled.begin()); j != i; j++) {
  1.1217 +            JS_ASSERT(*i != *j);
  1.1218 +            JS_ASSERT(canCoexist(*i, *j));
  1.1219 +        }
  1.1220 +        LinearScanVirtualRegister *reg = &vregs[i->vreg()];
  1.1221 +        bool found = false;
  1.1222 +        for (size_t j = 0; j < reg->numIntervals(); j++) {
  1.1223 +            if (reg->getInterval(j) == *i) {
  1.1224 +                JS_ASSERT(j == i->index());
  1.1225 +                found = true;
  1.1226 +                break;
  1.1227 +            }
  1.1228 +        }
  1.1229 +        JS_ASSERT(found);
  1.1230 +    }
  1.1231 +}
  1.1232 +
  1.1233 +#endif // DEBUG
  1.1234 +
  1.1235 +bool
  1.1236 +LinearScanAllocator::go()
  1.1237 +{
  1.1238 +    IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
  1.1239 +
  1.1240 +    IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
  1.1241 +    if (!buildLivenessInfo())
  1.1242 +        return false;
  1.1243 +    IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
  1.1244 +
  1.1245 +    if (mir->shouldCancel("LSRA Liveness"))
  1.1246 +        return false;
  1.1247 +
  1.1248 +    IonSpew(IonSpew_RegAlloc, "Beginning preliminary register allocation");
  1.1249 +    if (!allocateRegisters())
  1.1250 +        return false;
  1.1251 +    IonSpew(IonSpew_RegAlloc, "Preliminary register allocation complete");
  1.1252 +
  1.1253 +    if (mir->shouldCancel("LSRA Preliminary Regalloc"))
  1.1254 +        return false;
  1.1255 +
  1.1256 +    IonSpew(IonSpew_RegAlloc, "Beginning control flow resolution");
  1.1257 +    if (!resolveControlFlow())
  1.1258 +        return false;
  1.1259 +    IonSpew(IonSpew_RegAlloc, "Control flow resolution complete");
  1.1260 +
  1.1261 +    if (mir->shouldCancel("LSRA Control Flow"))
  1.1262 +        return false;
  1.1263 +
  1.1264 +    IonSpew(IonSpew_RegAlloc, "Beginning register allocation reification");
  1.1265 +    if (!reifyAllocations())
  1.1266 +        return false;
  1.1267 +    IonSpew(IonSpew_RegAlloc, "Register allocation reification complete");
  1.1268 +
  1.1269 +    if (mir->shouldCancel("LSRA Reification"))
  1.1270 +        return false;
  1.1271 +
  1.1272 +    IonSpew(IonSpew_RegAlloc, "Beginning safepoint population.");
  1.1273 +    if (!populateSafepoints())
  1.1274 +        return false;
  1.1275 +    IonSpew(IonSpew_RegAlloc, "Safepoint population complete.");
  1.1276 +
  1.1277 +    if (mir->shouldCancel("LSRA Safepoints"))
  1.1278 +        return false;
  1.1279 +
  1.1280 +    IonSpew(IonSpew_RegAlloc, "Register allocation complete");
  1.1281 +
  1.1282 +    return true;
  1.1283 +}
  1.1284 +
  1.1285 +void
  1.1286 +LinearScanAllocator::setIntervalRequirement(LiveInterval *interval)
  1.1287 +{
  1.1288 +    JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
  1.1289 +    JS_ASSERT(interval->hint()->kind() == Requirement::NONE);
  1.1290 +
  1.1291 +    // This function computes requirement by virtual register, other types of
  1.1292 +    // interval should have requirements set manually
  1.1293 +    LinearScanVirtualRegister *reg = &vregs[interval->vreg()];
  1.1294 +
  1.1295 +    if (interval->index() == 0) {
  1.1296 +        // The first interval is the definition, so deal with any definition
  1.1297 +        // constraints/hints
  1.1298 +
  1.1299 +        if (reg->def()->policy() == LDefinition::PRESET) {
  1.1300 +            // Preset policies get a FIXED requirement or hint.
  1.1301 +            if (reg->def()->output()->isRegister())
  1.1302 +                interval->setHint(Requirement(*reg->def()->output()));
  1.1303 +            else
  1.1304 +                interval->setRequirement(Requirement(*reg->def()->output()));
  1.1305 +        } else if (reg->def()->policy() == LDefinition::MUST_REUSE_INPUT) {
  1.1306 +            // Reuse policies get either a FIXED requirement or a SAME_AS hint.
  1.1307 +            LUse *use = reg->ins()->getOperand(reg->def()->getReusedInput())->toUse();
  1.1308 +            interval->setRequirement(Requirement(Requirement::REGISTER));
  1.1309 +            interval->setHint(Requirement(use->virtualRegister(), interval->start().previous()));
  1.1310 +        } else if (reg->ins()->isPhi()) {
  1.1311 +            // Phis don't have any requirements, but they should prefer
  1.1312 +            // their input allocations, so they get a SAME_AS hint of the
  1.1313 +            // first input
  1.1314 +            LUse *use = reg->ins()->getOperand(0)->toUse();
  1.1315 +            LBlock *predecessor = reg->block()->mir()->getPredecessor(0)->lir();
  1.1316 +            CodePosition predEnd = outputOf(predecessor->lastId());
  1.1317 +            interval->setHint(Requirement(use->virtualRegister(), predEnd));
  1.1318 +        } else {
  1.1319 +            // Non-phis get a REGISTER requirement
  1.1320 +            interval->setRequirement(Requirement(Requirement::REGISTER));
  1.1321 +        }
  1.1322 +    }
  1.1323 +
  1.1324 +    UsePosition *fixedOp = nullptr;
  1.1325 +    UsePosition *registerOp = nullptr;
  1.1326 +
  1.1327 +    // Search uses at the start of the interval for requirements.
  1.1328 +    UsePositionIterator usePos(interval->usesBegin());
  1.1329 +    for (; usePos != interval->usesEnd(); usePos++) {
  1.1330 +        if (interval->start().next() < usePos->pos)
  1.1331 +            break;
  1.1332 +
  1.1333 +        LUse::Policy policy = usePos->use->policy();
  1.1334 +        if (policy == LUse::FIXED) {
  1.1335 +            fixedOp = *usePos;
  1.1336 +            interval->setRequirement(Requirement(Requirement::REGISTER));
  1.1337 +            break;
  1.1338 +        } else if (policy == LUse::REGISTER) {
  1.1339 +            // Register uses get a REGISTER requirement
  1.1340 +            interval->setRequirement(Requirement(Requirement::REGISTER));
  1.1341 +        }
  1.1342 +    }
  1.1343 +
  1.1344 +    // Search other uses for hints. If the virtual register already has a
  1.1345 +    // canonical spill location, we will eagerly spill this interval, so we
  1.1346 +    // don't have to search for hints.
  1.1347 +    if (!fixedOp && !vregs[interval->vreg()].canonicalSpill()) {
  1.1348 +        for (; usePos != interval->usesEnd(); usePos++) {
  1.1349 +            LUse::Policy policy = usePos->use->policy();
  1.1350 +            if (policy == LUse::FIXED) {
  1.1351 +                fixedOp = *usePos;
  1.1352 +                break;
  1.1353 +            } else if (policy == LUse::REGISTER) {
  1.1354 +                if (!registerOp)
  1.1355 +                    registerOp = *usePos;
  1.1356 +            }
  1.1357 +        }
  1.1358 +    }
  1.1359 +
  1.1360 +    if (fixedOp) {
  1.1361 +        // Intervals with a fixed use now get a FIXED hint.
  1.1362 +        AnyRegister required = GetFixedRegister(reg->def(), fixedOp->use);
  1.1363 +        interval->setHint(Requirement(LAllocation(required), fixedOp->pos));
  1.1364 +    } else if (registerOp) {
  1.1365 +        // Intervals with register uses get a REGISTER hint. We may have already
  1.1366 +        // assigned a SAME_AS hint, make sure we don't overwrite it with a weaker
  1.1367 +        // hint.
  1.1368 +        if (interval->hint()->kind() == Requirement::NONE)
  1.1369 +            interval->setHint(Requirement(Requirement::REGISTER, registerOp->pos));
  1.1370 +    }
  1.1371 +}
  1.1372 +
  1.1373 +/*
  1.1374 + * Enqueue by iteration starting from the node with the lowest start position.
  1.1375 + *
  1.1376 + * If we assign registers to intervals in order of their start positions
  1.1377 + * without regard to their requirements, we can end up in situations where
  1.1378 + * intervals that do not require registers block intervals that must have
  1.1379 + * registers from getting one. To avoid this, we ensure that intervals are
  1.1380 + * ordered by position and priority so intervals with more stringent
  1.1381 + * requirements are handled first.
  1.1382 + */
  1.1383 +void
  1.1384 +LinearScanAllocator::UnhandledQueue::enqueueBackward(LiveInterval *interval)
  1.1385 +{
  1.1386 +    IntervalReverseIterator i(rbegin());
  1.1387 +
  1.1388 +    for (; i != rend(); i++) {
  1.1389 +        if (i->start() > interval->start())
  1.1390 +            break;
  1.1391 +        if (i->start() == interval->start() &&
  1.1392 +            i->requirement()->priority() >= interval->requirement()->priority())
  1.1393 +        {
  1.1394 +            break;
  1.1395 +        }
  1.1396 +    }
  1.1397 +    insertAfter(*i, interval);
  1.1398 +}
  1.1399 +
  1.1400 +/*
  1.1401 + * Enqueue by iteration from high start position to low start position,
  1.1402 + * after a provided node.
  1.1403 + */
  1.1404 +void
  1.1405 +LinearScanAllocator::UnhandledQueue::enqueueForward(LiveInterval *after, LiveInterval *interval)
  1.1406 +{
  1.1407 +    IntervalIterator i(begin(after));
  1.1408 +    i++; // Skip the initial node.
  1.1409 +
  1.1410 +    for (; i != end(); i++) {
  1.1411 +        if (i->start() < interval->start())
  1.1412 +            break;
  1.1413 +        if (i->start() == interval->start() &&
  1.1414 +            i->requirement()->priority() < interval->requirement()->priority())
  1.1415 +        {
  1.1416 +            break;
  1.1417 +        }
  1.1418 +    }
  1.1419 +    insertBefore(*i, interval);
  1.1420 +}
  1.1421 +
  1.1422 +void
  1.1423 +LinearScanAllocator::UnhandledQueue::assertSorted()
  1.1424 +{
  1.1425 +#ifdef DEBUG
  1.1426 +    LiveInterval *prev = nullptr;
  1.1427 +    for (IntervalIterator i(begin()); i != end(); i++) {
  1.1428 +        if (prev) {
  1.1429 +            JS_ASSERT(prev->start() >= i->start());
  1.1430 +            JS_ASSERT_IF(prev->start() == i->start(),
  1.1431 +                         prev->requirement()->priority() >= i->requirement()->priority());
  1.1432 +        }
  1.1433 +        prev = *i;
  1.1434 +    }
  1.1435 +#endif
  1.1436 +}
  1.1437 +
  1.1438 +LiveInterval *
  1.1439 +LinearScanAllocator::UnhandledQueue::dequeue()
  1.1440 +{
  1.1441 +    if (rbegin() == rend())
  1.1442 +        return nullptr;
  1.1443 +
  1.1444 +    LiveInterval *result = *rbegin();
  1.1445 +    remove(result);
  1.1446 +    return result;
  1.1447 +}

mercurial