1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/jit/BacktrackingAllocator.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1816 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * This Source Code Form is subject to the terms of the Mozilla Public 1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.9 + 1.10 +#include "jit/BacktrackingAllocator.h" 1.11 +#include "jit/BitSet.h" 1.12 + 1.13 +using namespace js; 1.14 +using namespace js::jit; 1.15 + 1.16 +using mozilla::DebugOnly; 1.17 + 1.18 +bool 1.19 +BacktrackingAllocator::init() 1.20 +{ 1.21 + RegisterSet remainingRegisters(allRegisters_); 1.22 + while (!remainingRegisters.empty(/* float = */ false)) { 1.23 + AnyRegister reg = AnyRegister(remainingRegisters.takeGeneral()); 1.24 + registers[reg.code()].allocatable = true; 1.25 + } 1.26 + while (!remainingRegisters.empty(/* float = */ true)) { 1.27 + AnyRegister reg = AnyRegister(remainingRegisters.takeFloat()); 1.28 + registers[reg.code()].allocatable = true; 1.29 + } 1.30 + 1.31 + LifoAlloc *lifoAlloc = mir->alloc().lifoAlloc(); 1.32 + for (size_t i = 0; i < AnyRegister::Total; i++) { 1.33 + registers[i].reg = AnyRegister::FromCode(i); 1.34 + registers[i].allocations.setAllocator(lifoAlloc); 1.35 + 1.36 + LiveInterval *fixed = fixedIntervals[i]; 1.37 + for (size_t j = 0; j < fixed->numRanges(); j++) { 1.38 + AllocatedRange range(fixed, fixed->getRange(j)); 1.39 + if (!registers[i].allocations.insert(range)) 1.40 + return false; 1.41 + } 1.42 + } 1.43 + 1.44 + hotcode.setAllocator(lifoAlloc); 1.45 + 1.46 + // Partition the graph into hot and cold sections, for helping to make 1.47 + // splitting decisions. Since we don't have any profiling data this is a 1.48 + // crapshoot, so just mark the bodies of inner loops as hot and everything 1.49 + // else as cold. 1.50 + 1.51 + LiveInterval *hotcodeInterval = LiveInterval::New(alloc(), 0); 1.52 + 1.53 + LBlock *backedge = nullptr; 1.54 + for (size_t i = 0; i < graph.numBlocks(); i++) { 1.55 + LBlock *block = graph.getBlock(i); 1.56 + 1.57 + // If we see a loop header, mark the backedge so we know when we have 1.58 + // hit the end of the loop. Don't process the loop immediately, so that 1.59 + // if there is an inner loop we will ignore the outer backedge. 1.60 + if (block->mir()->isLoopHeader()) 1.61 + backedge = block->mir()->backedge()->lir(); 1.62 + 1.63 + if (block == backedge) { 1.64 + LBlock *header = block->mir()->loopHeaderOfBackedge()->lir(); 1.65 + CodePosition from = inputOf(header->firstId()); 1.66 + CodePosition to = outputOf(block->lastId()).next(); 1.67 + if (!hotcodeInterval->addRange(from, to)) 1.68 + return false; 1.69 + } 1.70 + } 1.71 + 1.72 + for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) { 1.73 + AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i)); 1.74 + if (!hotcode.insert(range)) 1.75 + return false; 1.76 + } 1.77 + 1.78 + return true; 1.79 +} 1.80 + 1.81 +bool 1.82 +BacktrackingAllocator::go() 1.83 +{ 1.84 + IonSpew(IonSpew_RegAlloc, "Beginning register allocation"); 1.85 + 1.86 + IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis"); 1.87 + if (!buildLivenessInfo()) 1.88 + return false; 1.89 + IonSpew(IonSpew_RegAlloc, "Liveness analysis complete"); 1.90 + 1.91 + if (!init()) 1.92 + return false; 1.93 + 1.94 + if (IonSpewEnabled(IonSpew_RegAlloc)) 1.95 + dumpLiveness(); 1.96 + 1.97 + if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2)) 1.98 + return false; 1.99 + 1.100 + if (!groupAndQueueRegisters()) 1.101 + return false; 1.102 + 1.103 + if (IonSpewEnabled(IonSpew_RegAlloc)) 1.104 + dumpRegisterGroups(); 1.105 + 1.106 + // Allocate, spill and split register intervals until finished. 1.107 + while (!allocationQueue.empty()) { 1.108 + if (mir->shouldCancel("Backtracking Allocation")) 1.109 + return false; 1.110 + 1.111 + QueueItem item = allocationQueue.removeHighest(); 1.112 + if (item.interval ? !processInterval(item.interval) : !processGroup(item.group)) 1.113 + return false; 1.114 + } 1.115 + 1.116 + if (IonSpewEnabled(IonSpew_RegAlloc)) 1.117 + dumpAllocations(); 1.118 + 1.119 + return resolveControlFlow() && reifyAllocations() && populateSafepoints(); 1.120 +} 1.121 + 1.122 +static bool 1.123 +LifetimesOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1) 1.124 +{ 1.125 + // Registers may have been eagerly split in two, see tryGroupReusedRegister. 1.126 + // In such cases, only consider the first interval. 1.127 + JS_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2); 1.128 + 1.129 + LiveInterval *interval0 = reg0->getInterval(0), *interval1 = reg1->getInterval(0); 1.130 + 1.131 + // Interval ranges are sorted in reverse order. The lifetimes overlap if 1.132 + // any of their ranges overlap. 1.133 + size_t index0 = 0, index1 = 0; 1.134 + while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) { 1.135 + const LiveInterval::Range 1.136 + *range0 = interval0->getRange(index0), 1.137 + *range1 = interval1->getRange(index1); 1.138 + if (range0->from >= range1->to) 1.139 + index0++; 1.140 + else if (range1->from >= range0->to) 1.141 + index1++; 1.142 + else 1.143 + return true; 1.144 + } 1.145 + 1.146 + return false; 1.147 +} 1.148 + 1.149 +bool 1.150 +BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg) 1.151 +{ 1.152 + for (size_t i = 0; i < group->registers.length(); i++) { 1.153 + if (LifetimesOverlap(reg, &vregs[group->registers[i]])) 1.154 + return false; 1.155 + } 1.156 + return true; 1.157 +} 1.158 + 1.159 +bool 1.160 +BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1) 1.161 +{ 1.162 + // See if reg0 and reg1 can be placed in the same group, following the 1.163 + // restrictions imposed by VirtualRegisterGroup and any other registers 1.164 + // already grouped with reg0 or reg1. 1.165 + BacktrackingVirtualRegister *reg0 = &vregs[vreg0], *reg1 = &vregs[vreg1]; 1.166 + 1.167 + if (reg0->isFloatReg() != reg1->isFloatReg()) 1.168 + return true; 1.169 + 1.170 + VirtualRegisterGroup *group0 = reg0->group(), *group1 = reg1->group(); 1.171 + 1.172 + if (!group0 && group1) 1.173 + return tryGroupRegisters(vreg1, vreg0); 1.174 + 1.175 + if (group0) { 1.176 + if (group1) { 1.177 + if (group0 == group1) { 1.178 + // The registers are already grouped together. 1.179 + return true; 1.180 + } 1.181 + // Try to unify the two distinct groups. 1.182 + for (size_t i = 0; i < group1->registers.length(); i++) { 1.183 + if (!canAddToGroup(group0, &vregs[group1->registers[i]])) 1.184 + return true; 1.185 + } 1.186 + for (size_t i = 0; i < group1->registers.length(); i++) { 1.187 + uint32_t vreg = group1->registers[i]; 1.188 + if (!group0->registers.append(vreg)) 1.189 + return false; 1.190 + vregs[vreg].setGroup(group0); 1.191 + } 1.192 + return true; 1.193 + } 1.194 + if (!canAddToGroup(group0, reg1)) 1.195 + return true; 1.196 + if (!group0->registers.append(vreg1)) 1.197 + return false; 1.198 + reg1->setGroup(group0); 1.199 + return true; 1.200 + } 1.201 + 1.202 + if (LifetimesOverlap(reg0, reg1)) 1.203 + return true; 1.204 + 1.205 + VirtualRegisterGroup *group = new(alloc()) VirtualRegisterGroup(alloc()); 1.206 + if (!group->registers.append(vreg0) || !group->registers.append(vreg1)) 1.207 + return false; 1.208 + 1.209 + reg0->setGroup(group); 1.210 + reg1->setGroup(group); 1.211 + return true; 1.212 +} 1.213 + 1.214 +bool 1.215 +BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use) 1.216 +{ 1.217 + BacktrackingVirtualRegister ® = vregs[def], &usedReg = vregs[use]; 1.218 + 1.219 + // reg is a vreg which reuses its input usedReg for its output physical 1.220 + // register. Try to group reg with usedReg if at all possible, as avoiding 1.221 + // copies before reg's instruction is crucial for the quality of the 1.222 + // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions 1.223 + // on x86/x64). 1.224 + 1.225 + if (reg.intervalFor(inputOf(reg.ins()))) { 1.226 + JS_ASSERT(reg.isTemp()); 1.227 + reg.setMustCopyInput(); 1.228 + return true; 1.229 + } 1.230 + 1.231 + if (!usedReg.intervalFor(outputOf(reg.ins()))) { 1.232 + // The input is not live after the instruction, either in a safepoint 1.233 + // for the instruction or in subsequent code. The input and output 1.234 + // can thus be in the same group. 1.235 + return tryGroupRegisters(use, def); 1.236 + } 1.237 + 1.238 + // The input is live afterwards, either in future instructions or in a 1.239 + // safepoint for the reusing instruction. This is impossible to satisfy 1.240 + // without copying the input. 1.241 + // 1.242 + // It may or may not be better to split the interval at the point of the 1.243 + // definition, which may permit grouping. One case where it is definitely 1.244 + // better to split is if the input never has any register uses after the 1.245 + // instruction. Handle this splitting eagerly. 1.246 + 1.247 + if (usedReg.numIntervals() != 1 || 1.248 + (usedReg.def()->isPreset() && !usedReg.def()->output()->isRegister())) { 1.249 + reg.setMustCopyInput(); 1.250 + return true; 1.251 + } 1.252 + LiveInterval *interval = usedReg.getInterval(0); 1.253 + LBlock *block = insData[reg.ins()].block(); 1.254 + 1.255 + // The input's lifetime must end within the same block as the definition, 1.256 + // otherwise it could live on in phis elsewhere. 1.257 + if (interval->end() > outputOf(block->lastId())) { 1.258 + reg.setMustCopyInput(); 1.259 + return true; 1.260 + } 1.261 + 1.262 + for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { 1.263 + if (iter->pos <= inputOf(reg.ins())) 1.264 + continue; 1.265 + 1.266 + LUse *use = iter->use; 1.267 + if (FindReusingDefinition(insData[iter->pos].ins(), use)) { 1.268 + reg.setMustCopyInput(); 1.269 + return true; 1.270 + } 1.271 + if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) { 1.272 + reg.setMustCopyInput(); 1.273 + return true; 1.274 + } 1.275 + } 1.276 + 1.277 + LiveInterval *preInterval = LiveInterval::New(alloc(), interval->vreg(), 0); 1.278 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.279 + const LiveInterval::Range *range = interval->getRange(i); 1.280 + JS_ASSERT(range->from <= inputOf(reg.ins())); 1.281 + 1.282 + CodePosition to = (range->to <= outputOf(reg.ins())) ? range->to : outputOf(reg.ins()); 1.283 + if (!preInterval->addRange(range->from, to)) 1.284 + return false; 1.285 + } 1.286 + 1.287 + LiveInterval *postInterval = LiveInterval::New(alloc(), interval->vreg(), 0); 1.288 + if (!postInterval->addRange(inputOf(reg.ins()), interval->end())) 1.289 + return false; 1.290 + 1.291 + LiveIntervalVector newIntervals; 1.292 + if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval)) 1.293 + return false; 1.294 + 1.295 + distributeUses(interval, newIntervals); 1.296 + 1.297 + if (!split(interval, newIntervals)) 1.298 + return false; 1.299 + 1.300 + JS_ASSERT(usedReg.numIntervals() == 2); 1.301 + 1.302 + usedReg.setCanonicalSpillExclude(inputOf(reg.ins())); 1.303 + 1.304 + return tryGroupRegisters(use, def); 1.305 +} 1.306 + 1.307 +bool 1.308 +BacktrackingAllocator::groupAndQueueRegisters() 1.309 +{ 1.310 + // Try to group registers with their reused inputs. 1.311 + // Virtual register number 0 is unused. 1.312 + JS_ASSERT(vregs[0u].numIntervals() == 0); 1.313 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.314 + BacktrackingVirtualRegister ® = vregs[i]; 1.315 + if (!reg.numIntervals()) 1.316 + continue; 1.317 + 1.318 + if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) { 1.319 + LUse *use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse(); 1.320 + if (!tryGroupReusedRegister(i, use->virtualRegister())) 1.321 + return false; 1.322 + } 1.323 + } 1.324 + 1.325 + // Try to group phis with their inputs. 1.326 + for (size_t i = 0; i < graph.numBlocks(); i++) { 1.327 + LBlock *block = graph.getBlock(i); 1.328 + for (size_t j = 0; j < block->numPhis(); j++) { 1.329 + LPhi *phi = block->getPhi(j); 1.330 + uint32_t output = phi->getDef(0)->virtualRegister(); 1.331 + for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) { 1.332 + uint32_t input = phi->getOperand(k)->toUse()->virtualRegister(); 1.333 + if (!tryGroupRegisters(input, output)) 1.334 + return false; 1.335 + } 1.336 + } 1.337 + } 1.338 + 1.339 + // Virtual register number 0 is unused. 1.340 + JS_ASSERT(vregs[0u].numIntervals() == 0); 1.341 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.342 + if (mir->shouldCancel("Backtracking Enqueue Registers")) 1.343 + return false; 1.344 + 1.345 + BacktrackingVirtualRegister ® = vregs[i]; 1.346 + JS_ASSERT(reg.numIntervals() <= 2); 1.347 + JS_ASSERT(!reg.canonicalSpill()); 1.348 + 1.349 + if (!reg.numIntervals()) 1.350 + continue; 1.351 + 1.352 + // Disable this for now; see bugs 906858, 931487, and 932465. 1.353 +#if 0 1.354 + // Eagerly set the canonical spill slot for registers which are preset 1.355 + // for that slot, and reuse it for other registers in the group. 1.356 + LDefinition *def = reg.def(); 1.357 + if (def->policy() == LDefinition::PRESET && !def->output()->isRegister()) { 1.358 + reg.setCanonicalSpill(*def->output()); 1.359 + if (reg.group() && reg.group()->spill.isUse()) 1.360 + reg.group()->spill = *def->output(); 1.361 + } 1.362 +#endif 1.363 + 1.364 + // Place all intervals for this register on the allocation queue. 1.365 + // During initial queueing use single queue items for groups of 1.366 + // registers, so that they will be allocated together and reduce the 1.367 + // risk of unnecessary conflicts. This is in keeping with the idea that 1.368 + // register groups are effectively single registers whose value changes 1.369 + // during execution. If any intervals in the group are evicted later 1.370 + // then they will be reallocated individually. 1.371 + size_t start = 0; 1.372 + if (VirtualRegisterGroup *group = reg.group()) { 1.373 + if (i == group->canonicalReg()) { 1.374 + size_t priority = computePriority(group); 1.375 + if (!allocationQueue.insert(QueueItem(group, priority))) 1.376 + return false; 1.377 + } 1.378 + start++; 1.379 + } 1.380 + for (; start < reg.numIntervals(); start++) { 1.381 + LiveInterval *interval = reg.getInterval(start); 1.382 + if (interval->numRanges() > 0) { 1.383 + size_t priority = computePriority(interval); 1.384 + if (!allocationQueue.insert(QueueItem(interval, priority))) 1.385 + return false; 1.386 + } 1.387 + } 1.388 + } 1.389 + 1.390 + return true; 1.391 +} 1.392 + 1.393 +static const size_t MAX_ATTEMPTS = 2; 1.394 + 1.395 +bool 1.396 +BacktrackingAllocator::tryAllocateFixed(LiveInterval *interval, bool *success, 1.397 + bool *pfixed, LiveInterval **pconflicting) 1.398 +{ 1.399 + // Spill intervals which are required to be in a certain stack slot. 1.400 + if (!interval->requirement()->allocation().isRegister()) { 1.401 + IonSpew(IonSpew_RegAlloc, "stack allocation requirement"); 1.402 + interval->setAllocation(interval->requirement()->allocation()); 1.403 + *success = true; 1.404 + return true; 1.405 + } 1.406 + 1.407 + AnyRegister reg = interval->requirement()->allocation().toRegister(); 1.408 + return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting); 1.409 +} 1.410 + 1.411 +bool 1.412 +BacktrackingAllocator::tryAllocateNonFixed(LiveInterval *interval, bool *success, 1.413 + bool *pfixed, LiveInterval **pconflicting) 1.414 +{ 1.415 + // If we want, but do not require an interval to be in a specific 1.416 + // register, only look at that register for allocating and evict 1.417 + // or spill if it is not available. Picking a separate register may 1.418 + // be even worse than spilling, as it will still necessitate moves 1.419 + // and will tie up more registers than if we spilled. 1.420 + if (interval->hint()->kind() == Requirement::FIXED) { 1.421 + AnyRegister reg = interval->hint()->allocation().toRegister(); 1.422 + if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting)) 1.423 + return false; 1.424 + if (*success) 1.425 + return true; 1.426 + } 1.427 + 1.428 + // Spill intervals which have no hint or register requirement. 1.429 + if (interval->requirement()->kind() == Requirement::NONE) { 1.430 + spill(interval); 1.431 + *success = true; 1.432 + return true; 1.433 + } 1.434 + 1.435 + if (!*pconflicting || minimalInterval(interval)) { 1.436 + // Search for any available register which the interval can be 1.437 + // allocated to. 1.438 + for (size_t i = 0; i < AnyRegister::Total; i++) { 1.439 + if (!tryAllocateRegister(registers[i], interval, success, pfixed, pconflicting)) 1.440 + return false; 1.441 + if (*success) 1.442 + return true; 1.443 + } 1.444 + } 1.445 + 1.446 + // We failed to allocate this interval. 1.447 + JS_ASSERT(!*success); 1.448 + return true; 1.449 +} 1.450 + 1.451 +bool 1.452 +BacktrackingAllocator::processInterval(LiveInterval *interval) 1.453 +{ 1.454 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.455 + IonSpew(IonSpew_RegAlloc, "Allocating v%u [priority %lu] [weight %lu]: %s", 1.456 + interval->vreg(), computePriority(interval), computeSpillWeight(interval), 1.457 + interval->rangesToString()); 1.458 + } 1.459 + 1.460 + // An interval can be processed by doing any of the following: 1.461 + // 1.462 + // - Assigning the interval a register. The interval cannot overlap any 1.463 + // other interval allocated for that physical register. 1.464 + // 1.465 + // - Spilling the interval, provided it has no register uses. 1.466 + // 1.467 + // - Splitting the interval into two or more intervals which cover the 1.468 + // original one. The new intervals are placed back onto the priority 1.469 + // queue for later processing. 1.470 + // 1.471 + // - Evicting one or more existing allocated intervals, and then doing one 1.472 + // of the above operations. Evicted intervals are placed back on the 1.473 + // priority queue. Any evicted intervals must have a lower spill weight 1.474 + // than the interval being processed. 1.475 + // 1.476 + // As long as this structure is followed, termination is guaranteed. 1.477 + // In general, we want to minimize the amount of interval splitting 1.478 + // (which generally necessitates spills), so allocate longer lived, lower 1.479 + // weight intervals first and evict and split them later if they prevent 1.480 + // allocation for higher weight intervals. 1.481 + 1.482 + bool canAllocate = setIntervalRequirement(interval); 1.483 + 1.484 + bool fixed; 1.485 + LiveInterval *conflict = nullptr; 1.486 + for (size_t attempt = 0;; attempt++) { 1.487 + if (canAllocate) { 1.488 + bool success = false; 1.489 + fixed = false; 1.490 + conflict = nullptr; 1.491 + 1.492 + // Ok, let's try allocating for this interval. 1.493 + if (interval->requirement()->kind() == Requirement::FIXED) { 1.494 + if (!tryAllocateFixed(interval, &success, &fixed, &conflict)) 1.495 + return false; 1.496 + } else { 1.497 + if (!tryAllocateNonFixed(interval, &success, &fixed, &conflict)) 1.498 + return false; 1.499 + } 1.500 + 1.501 + // If that worked, we're done! 1.502 + if (success) 1.503 + return true; 1.504 + 1.505 + // If that didn't work, but we have a non-fixed LiveInterval known 1.506 + // to be conflicting, maybe we can evict it and try again. 1.507 + if (attempt < MAX_ATTEMPTS && 1.508 + !fixed && 1.509 + conflict && 1.510 + computeSpillWeight(conflict) < computeSpillWeight(interval)) 1.511 + { 1.512 + if (!evictInterval(conflict)) 1.513 + return false; 1.514 + continue; 1.515 + } 1.516 + } 1.517 + 1.518 + // A minimal interval cannot be split any further. If we try to split 1.519 + // it at this point we will just end up with the same interval and will 1.520 + // enter an infinite loop. Weights and the initial live intervals must 1.521 + // be constructed so that any minimal interval is allocatable. 1.522 + JS_ASSERT(!minimalInterval(interval)); 1.523 + 1.524 + if (canAllocate && fixed) 1.525 + return splitAcrossCalls(interval); 1.526 + return chooseIntervalSplit(interval, conflict); 1.527 + } 1.528 +} 1.529 + 1.530 +bool 1.531 +BacktrackingAllocator::processGroup(VirtualRegisterGroup *group) 1.532 +{ 1.533 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.534 + IonSpew(IonSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]", 1.535 + group->registers[0], computePriority(group), computeSpillWeight(group)); 1.536 + } 1.537 + 1.538 + bool fixed; 1.539 + LiveInterval *conflict; 1.540 + for (size_t attempt = 0;; attempt++) { 1.541 + // Search for any available register which the group can be allocated to. 1.542 + fixed = false; 1.543 + conflict = nullptr; 1.544 + for (size_t i = 0; i < AnyRegister::Total; i++) { 1.545 + bool success; 1.546 + if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict)) 1.547 + return false; 1.548 + if (success) { 1.549 + conflict = nullptr; 1.550 + break; 1.551 + } 1.552 + } 1.553 + 1.554 + if (attempt < MAX_ATTEMPTS && 1.555 + !fixed && 1.556 + conflict && 1.557 + conflict->hasVreg() && 1.558 + computeSpillWeight(conflict) < computeSpillWeight(group)) 1.559 + { 1.560 + if (!evictInterval(conflict)) 1.561 + return false; 1.562 + continue; 1.563 + } 1.564 + 1.565 + for (size_t i = 0; i < group->registers.length(); i++) { 1.566 + VirtualRegister ® = vregs[group->registers[i]]; 1.567 + JS_ASSERT(reg.numIntervals() <= 2); 1.568 + if (!processInterval(reg.getInterval(0))) 1.569 + return false; 1.570 + } 1.571 + 1.572 + return true; 1.573 + } 1.574 +} 1.575 + 1.576 +bool 1.577 +BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval) 1.578 +{ 1.579 + // Set any requirement or hint on interval according to its definition and 1.580 + // uses. Return false if there are conflicting requirements which will 1.581 + // require the interval to be split. 1.582 + interval->setHint(Requirement()); 1.583 + interval->setRequirement(Requirement()); 1.584 + 1.585 + BacktrackingVirtualRegister *reg = &vregs[interval->vreg()]; 1.586 + 1.587 + // Set a hint if another interval in the same group is in a register. 1.588 + if (VirtualRegisterGroup *group = reg->group()) { 1.589 + if (group->allocation.isRegister()) { 1.590 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.591 + IonSpew(IonSpew_RegAlloc, "Hint %s, used by group allocation", 1.592 + group->allocation.toString()); 1.593 + } 1.594 + interval->setHint(Requirement(group->allocation)); 1.595 + } 1.596 + } 1.597 + 1.598 + if (interval->index() == 0) { 1.599 + // The first interval is the definition, so deal with any definition 1.600 + // constraints/hints. 1.601 + 1.602 + LDefinition::Policy policy = reg->def()->policy(); 1.603 + if (policy == LDefinition::PRESET) { 1.604 + // Preset policies get a FIXED requirement. 1.605 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.606 + IonSpew(IonSpew_RegAlloc, "Requirement %s, preset by definition", 1.607 + reg->def()->output()->toString()); 1.608 + } 1.609 + interval->setRequirement(Requirement(*reg->def()->output())); 1.610 + } else if (reg->ins()->isPhi()) { 1.611 + // Phis don't have any requirements, but they should prefer their 1.612 + // input allocations. This is captured by the group hints above. 1.613 + } else { 1.614 + // Non-phis get a REGISTER requirement. 1.615 + interval->setRequirement(Requirement(Requirement::REGISTER)); 1.616 + } 1.617 + } 1.618 + 1.619 + // Search uses for requirements. 1.620 + for (UsePositionIterator iter = interval->usesBegin(); 1.621 + iter != interval->usesEnd(); 1.622 + iter++) 1.623 + { 1.624 + LUse::Policy policy = iter->use->policy(); 1.625 + if (policy == LUse::FIXED) { 1.626 + AnyRegister required = GetFixedRegister(reg->def(), iter->use); 1.627 + 1.628 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.629 + IonSpew(IonSpew_RegAlloc, "Requirement %s, due to use at %u", 1.630 + required.name(), iter->pos.pos()); 1.631 + } 1.632 + 1.633 + // If there are multiple fixed registers which the interval is 1.634 + // required to use, fail. The interval will need to be split before 1.635 + // it can be allocated. 1.636 + if (!interval->addRequirement(Requirement(LAllocation(required)))) 1.637 + return false; 1.638 + } else if (policy == LUse::REGISTER) { 1.639 + if (!interval->addRequirement(Requirement(Requirement::REGISTER))) 1.640 + return false; 1.641 + } 1.642 + } 1.643 + 1.644 + return true; 1.645 +} 1.646 + 1.647 +bool 1.648 +BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group, 1.649 + bool *psuccess, bool *pfixed, LiveInterval **pconflicting) 1.650 +{ 1.651 + *psuccess = false; 1.652 + 1.653 + if (!r.allocatable) 1.654 + return true; 1.655 + 1.656 + if (r.reg.isFloat() != vregs[group->registers[0]].isFloatReg()) 1.657 + return true; 1.658 + 1.659 + bool allocatable = true; 1.660 + LiveInterval *conflicting = nullptr; 1.661 + 1.662 + for (size_t i = 0; i < group->registers.length(); i++) { 1.663 + VirtualRegister ® = vregs[group->registers[i]]; 1.664 + JS_ASSERT(reg.numIntervals() <= 2); 1.665 + LiveInterval *interval = reg.getInterval(0); 1.666 + 1.667 + for (size_t j = 0; j < interval->numRanges(); j++) { 1.668 + AllocatedRange range(interval, interval->getRange(j)), existing; 1.669 + if (r.allocations.contains(range, &existing)) { 1.670 + if (conflicting) { 1.671 + if (conflicting != existing.interval) 1.672 + return true; 1.673 + } else { 1.674 + conflicting = existing.interval; 1.675 + } 1.676 + allocatable = false; 1.677 + } 1.678 + } 1.679 + } 1.680 + 1.681 + if (!allocatable) { 1.682 + JS_ASSERT(conflicting); 1.683 + if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting)) 1.684 + *pconflicting = conflicting; 1.685 + if (!conflicting->hasVreg()) 1.686 + *pfixed = true; 1.687 + return true; 1.688 + } 1.689 + 1.690 + *psuccess = true; 1.691 + 1.692 + group->allocation = LAllocation(r.reg); 1.693 + return true; 1.694 +} 1.695 + 1.696 +bool 1.697 +BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval, 1.698 + bool *success, bool *pfixed, LiveInterval **pconflicting) 1.699 +{ 1.700 + *success = false; 1.701 + 1.702 + if (!r.allocatable) 1.703 + return true; 1.704 + 1.705 + BacktrackingVirtualRegister *reg = &vregs[interval->vreg()]; 1.706 + if (reg->isFloatReg() != r.reg.isFloat()) 1.707 + return true; 1.708 + 1.709 + JS_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED, 1.710 + interval->requirement()->allocation() == LAllocation(r.reg)); 1.711 + 1.712 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.713 + AllocatedRange range(interval, interval->getRange(i)), existing; 1.714 + if (r.allocations.contains(range, &existing)) { 1.715 + if (existing.interval->hasVreg()) { 1.716 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.717 + IonSpew(IonSpew_RegAlloc, "%s collides with v%u [%u,%u> [weight %lu]", 1.718 + r.reg.name(), existing.interval->vreg(), 1.719 + existing.range->from.pos(), existing.range->to.pos(), 1.720 + computeSpillWeight(existing.interval)); 1.721 + } 1.722 + if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting)) 1.723 + *pconflicting = existing.interval; 1.724 + } else { 1.725 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.726 + IonSpew(IonSpew_RegAlloc, "%s collides with fixed use [%u,%u>", 1.727 + r.reg.name(), existing.range->from.pos(), existing.range->to.pos()); 1.728 + } 1.729 + *pfixed = true; 1.730 + } 1.731 + return true; 1.732 + } 1.733 + } 1.734 + 1.735 + IonSpew(IonSpew_RegAlloc, "allocated to %s", r.reg.name()); 1.736 + 1.737 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.738 + AllocatedRange range(interval, interval->getRange(i)); 1.739 + if (!r.allocations.insert(range)) 1.740 + return false; 1.741 + } 1.742 + 1.743 + // Set any register hint for allocating other intervals in the same group. 1.744 + if (VirtualRegisterGroup *group = reg->group()) { 1.745 + if (!group->allocation.isRegister()) 1.746 + group->allocation = LAllocation(r.reg); 1.747 + } 1.748 + 1.749 + interval->setAllocation(LAllocation(r.reg)); 1.750 + *success = true; 1.751 + return true; 1.752 +} 1.753 + 1.754 +bool 1.755 +BacktrackingAllocator::evictInterval(LiveInterval *interval) 1.756 +{ 1.757 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.758 + IonSpew(IonSpew_RegAlloc, "Evicting interval v%u: %s", 1.759 + interval->vreg(), interval->rangesToString()); 1.760 + } 1.761 + 1.762 + JS_ASSERT(interval->getAllocation()->isRegister()); 1.763 + 1.764 + AnyRegister reg(interval->getAllocation()->toRegister()); 1.765 + PhysicalRegister &physical = registers[reg.code()]; 1.766 + JS_ASSERT(physical.reg == reg && physical.allocatable); 1.767 + 1.768 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.769 + AllocatedRange range(interval, interval->getRange(i)); 1.770 + physical.allocations.remove(range); 1.771 + } 1.772 + 1.773 + interval->setAllocation(LAllocation()); 1.774 + 1.775 + size_t priority = computePriority(interval); 1.776 + return allocationQueue.insert(QueueItem(interval, priority)); 1.777 +} 1.778 + 1.779 +void 1.780 +BacktrackingAllocator::distributeUses(LiveInterval *interval, 1.781 + const LiveIntervalVector &newIntervals) 1.782 +{ 1.783 + JS_ASSERT(newIntervals.length() >= 2); 1.784 + 1.785 + // Simple redistribution of uses from an old interval to a set of new 1.786 + // intervals. Intervals are permitted to overlap, in which case this will 1.787 + // assign uses in the overlapping section to the interval with the latest 1.788 + // start position. 1.789 + for (UsePositionIterator iter(interval->usesBegin()); 1.790 + iter != interval->usesEnd(); 1.791 + iter++) 1.792 + { 1.793 + CodePosition pos = iter->pos; 1.794 + LiveInterval *addInterval = nullptr; 1.795 + for (size_t i = 0; i < newIntervals.length(); i++) { 1.796 + LiveInterval *newInterval = newIntervals[i]; 1.797 + if (newInterval->covers(pos)) { 1.798 + if (!addInterval || newInterval->start() < addInterval->start()) 1.799 + addInterval = newInterval; 1.800 + } 1.801 + } 1.802 + addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.803 + } 1.804 +} 1.805 + 1.806 +bool 1.807 +BacktrackingAllocator::split(LiveInterval *interval, 1.808 + const LiveIntervalVector &newIntervals) 1.809 +{ 1.810 + if (IonSpewEnabled(IonSpew_RegAlloc)) { 1.811 + IonSpew(IonSpew_RegAlloc, "splitting interval v%u %s into:", 1.812 + interval->vreg(), interval->rangesToString()); 1.813 + for (size_t i = 0; i < newIntervals.length(); i++) 1.814 + IonSpew(IonSpew_RegAlloc, " %s", newIntervals[i]->rangesToString()); 1.815 + } 1.816 + 1.817 + JS_ASSERT(newIntervals.length() >= 2); 1.818 + 1.819 + // Find the earliest interval in the new list. 1.820 + LiveInterval *first = newIntervals[0]; 1.821 + for (size_t i = 1; i < newIntervals.length(); i++) { 1.822 + if (newIntervals[i]->start() < first->start()) 1.823 + first = newIntervals[i]; 1.824 + } 1.825 + 1.826 + // Replace the old interval in the virtual register's state with the new intervals. 1.827 + VirtualRegister *reg = &vregs[interval->vreg()]; 1.828 + reg->replaceInterval(interval, first); 1.829 + for (size_t i = 0; i < newIntervals.length(); i++) { 1.830 + if (newIntervals[i] != first && !reg->addInterval(newIntervals[i])) 1.831 + return false; 1.832 + } 1.833 + 1.834 + return true; 1.835 +} 1.836 + 1.837 +bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector &newIntervals) 1.838 +{ 1.839 + // Queue the new intervals for register assignment. 1.840 + for (size_t i = 0; i < newIntervals.length(); i++) { 1.841 + LiveInterval *newInterval = newIntervals[i]; 1.842 + size_t priority = computePriority(newInterval); 1.843 + if (!allocationQueue.insert(QueueItem(newInterval, priority))) 1.844 + return false; 1.845 + } 1.846 + return true; 1.847 +} 1.848 + 1.849 +void 1.850 +BacktrackingAllocator::spill(LiveInterval *interval) 1.851 +{ 1.852 + IonSpew(IonSpew_RegAlloc, "Spilling interval"); 1.853 + 1.854 + JS_ASSERT(interval->requirement()->kind() == Requirement::NONE); 1.855 + JS_ASSERT(!interval->getAllocation()->isStackSlot()); 1.856 + 1.857 + // We can't spill bogus intervals. 1.858 + JS_ASSERT(interval->hasVreg()); 1.859 + 1.860 + BacktrackingVirtualRegister *reg = &vregs[interval->vreg()]; 1.861 + 1.862 + bool useCanonical = !reg->hasCanonicalSpillExclude() 1.863 + || interval->start() < reg->canonicalSpillExclude(); 1.864 + 1.865 + if (useCanonical) { 1.866 + if (reg->canonicalSpill()) { 1.867 + IonSpew(IonSpew_RegAlloc, " Picked canonical spill location %s", 1.868 + reg->canonicalSpill()->toString()); 1.869 + interval->setAllocation(*reg->canonicalSpill()); 1.870 + return; 1.871 + } 1.872 + 1.873 + if (reg->group() && !reg->group()->spill.isUse()) { 1.874 + IonSpew(IonSpew_RegAlloc, " Reusing group spill location %s", 1.875 + reg->group()->spill.toString()); 1.876 + interval->setAllocation(reg->group()->spill); 1.877 + reg->setCanonicalSpill(reg->group()->spill); 1.878 + return; 1.879 + } 1.880 + } 1.881 + 1.882 + uint32_t stackSlot = stackSlotAllocator.allocateSlot(reg->type()); 1.883 + JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight()); 1.884 + 1.885 + LStackSlot alloc(stackSlot); 1.886 + interval->setAllocation(alloc); 1.887 + 1.888 + IonSpew(IonSpew_RegAlloc, " Allocating spill location %s", alloc.toString()); 1.889 + 1.890 + if (useCanonical) { 1.891 + reg->setCanonicalSpill(alloc); 1.892 + if (reg->group()) 1.893 + reg->group()->spill = alloc; 1.894 + } 1.895 +} 1.896 + 1.897 +// Add moves to resolve conflicting assignments between a block and its 1.898 +// predecessors. XXX try to common this with LinearScanAllocator. 1.899 +bool 1.900 +BacktrackingAllocator::resolveControlFlow() 1.901 +{ 1.902 + // Virtual register number 0 is unused. 1.903 + JS_ASSERT(vregs[0u].numIntervals() == 0); 1.904 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.905 + BacktrackingVirtualRegister *reg = &vregs[i]; 1.906 + 1.907 + if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)")) 1.908 + return false; 1.909 + 1.910 + for (size_t j = 1; j < reg->numIntervals(); j++) { 1.911 + LiveInterval *interval = reg->getInterval(j); 1.912 + JS_ASSERT(interval->index() == j); 1.913 + 1.914 + bool skip = false; 1.915 + for (int k = j - 1; k >= 0; k--) { 1.916 + LiveInterval *prevInterval = reg->getInterval(k); 1.917 + if (prevInterval->start() != interval->start()) 1.918 + break; 1.919 + if (*prevInterval->getAllocation() == *interval->getAllocation()) { 1.920 + skip = true; 1.921 + break; 1.922 + } 1.923 + } 1.924 + if (skip) 1.925 + continue; 1.926 + 1.927 + CodePosition start = interval->start(); 1.928 + InstructionData *data = &insData[start]; 1.929 + if (interval->start() > inputOf(data->block()->firstId())) { 1.930 + JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins())); 1.931 + 1.932 + LiveInterval *prevInterval = reg->intervalFor(start.previous()); 1.933 + if (start.subpos() == CodePosition::INPUT) { 1.934 + if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type())) 1.935 + return false; 1.936 + } else { 1.937 + if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type())) 1.938 + return false; 1.939 + } 1.940 + } 1.941 + } 1.942 + } 1.943 + 1.944 + for (size_t i = 0; i < graph.numBlocks(); i++) { 1.945 + if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)")) 1.946 + return false; 1.947 + 1.948 + LBlock *successor = graph.getBlock(i); 1.949 + MBasicBlock *mSuccessor = successor->mir(); 1.950 + if (mSuccessor->numPredecessors() < 1) 1.951 + continue; 1.952 + 1.953 + // Resolve phis to moves 1.954 + for (size_t j = 0; j < successor->numPhis(); j++) { 1.955 + LPhi *phi = successor->getPhi(j); 1.956 + JS_ASSERT(phi->numDefs() == 1); 1.957 + LDefinition *def = phi->getDef(0); 1.958 + VirtualRegister *vreg = &vregs[def]; 1.959 + LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId())); 1.960 + JS_ASSERT(to); 1.961 + 1.962 + for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) { 1.963 + LBlock *predecessor = mSuccessor->getPredecessor(k)->lir(); 1.964 + JS_ASSERT(predecessor->mir()->numSuccessors() == 1); 1.965 + 1.966 + LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor()); 1.967 + LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId())); 1.968 + JS_ASSERT(from); 1.969 + 1.970 + if (!moveAtExit(predecessor, from, to, def->type())) 1.971 + return false; 1.972 + } 1.973 + } 1.974 + 1.975 + // Resolve split intervals with moves 1.976 + BitSet *live = liveIn[mSuccessor->id()]; 1.977 + 1.978 + for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { 1.979 + VirtualRegister ® = vregs[*liveRegId]; 1.980 + 1.981 + for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) { 1.982 + LBlock *predecessor = mSuccessor->getPredecessor(j)->lir(); 1.983 + 1.984 + for (size_t k = 0; k < reg.numIntervals(); k++) { 1.985 + LiveInterval *to = reg.getInterval(k); 1.986 + if (!to->covers(inputOf(successor->firstId()))) 1.987 + continue; 1.988 + if (to->covers(outputOf(predecessor->lastId()))) 1.989 + continue; 1.990 + 1.991 + LiveInterval *from = reg.intervalFor(outputOf(predecessor->lastId())); 1.992 + 1.993 + if (mSuccessor->numPredecessors() > 1) { 1.994 + JS_ASSERT(predecessor->mir()->numSuccessors() == 1); 1.995 + if (!moveAtExit(predecessor, from, to, reg.type())) 1.996 + return false; 1.997 + } else { 1.998 + if (!moveAtEntry(successor, from, to, reg.type())) 1.999 + return false; 1.1000 + } 1.1001 + } 1.1002 + } 1.1003 + } 1.1004 + } 1.1005 + 1.1006 + return true; 1.1007 +} 1.1008 + 1.1009 +bool 1.1010 +BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy) 1.1011 +{ 1.1012 + if (LDefinition *def = FindReusingDefinition(ins, use)) 1.1013 + return considerCopy || !vregs[def->virtualRegister()].mustCopyInput(); 1.1014 + return false; 1.1015 +} 1.1016 + 1.1017 +bool 1.1018 +BacktrackingAllocator::isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy) 1.1019 +{ 1.1020 + switch (use->policy()) { 1.1021 + case LUse::ANY: 1.1022 + return isReusedInput(use, ins, considerCopy); 1.1023 + 1.1024 + case LUse::REGISTER: 1.1025 + case LUse::FIXED: 1.1026 + return true; 1.1027 + 1.1028 + default: 1.1029 + return false; 1.1030 + } 1.1031 +} 1.1032 + 1.1033 +bool 1.1034 +BacktrackingAllocator::isRegisterDefinition(LiveInterval *interval) 1.1035 +{ 1.1036 + if (interval->index() != 0) 1.1037 + return false; 1.1038 + 1.1039 + VirtualRegister ® = vregs[interval->vreg()]; 1.1040 + if (reg.ins()->isPhi()) 1.1041 + return false; 1.1042 + 1.1043 + if (reg.def()->policy() == LDefinition::PRESET && !reg.def()->output()->isRegister()) 1.1044 + return false; 1.1045 + 1.1046 + return true; 1.1047 +} 1.1048 + 1.1049 +bool 1.1050 +BacktrackingAllocator::reifyAllocations() 1.1051 +{ 1.1052 + // Virtual register number 0 is unused. 1.1053 + JS_ASSERT(vregs[0u].numIntervals() == 0); 1.1054 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.1055 + VirtualRegister *reg = &vregs[i]; 1.1056 + 1.1057 + if (mir->shouldCancel("Backtracking Reify Allocations (main loop)")) 1.1058 + return false; 1.1059 + 1.1060 + for (size_t j = 0; j < reg->numIntervals(); j++) { 1.1061 + LiveInterval *interval = reg->getInterval(j); 1.1062 + JS_ASSERT(interval->index() == j); 1.1063 + 1.1064 + if (interval->index() == 0) { 1.1065 + reg->def()->setOutput(*interval->getAllocation()); 1.1066 + if (reg->ins()->recoversInput()) { 1.1067 + LSnapshot *snapshot = reg->ins()->snapshot(); 1.1068 + for (size_t i = 0; i < snapshot->numEntries(); i++) { 1.1069 + LAllocation *entry = snapshot->getEntry(i); 1.1070 + if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT) 1.1071 + *entry = *reg->def()->output(); 1.1072 + } 1.1073 + } 1.1074 + } 1.1075 + 1.1076 + for (UsePositionIterator iter(interval->usesBegin()); 1.1077 + iter != interval->usesEnd(); 1.1078 + iter++) 1.1079 + { 1.1080 + LAllocation *alloc = iter->use; 1.1081 + *alloc = *interval->getAllocation(); 1.1082 + 1.1083 + // For any uses which feed into MUST_REUSE_INPUT definitions, 1.1084 + // add copies if the use and def have different allocations. 1.1085 + LInstruction *ins = insData[iter->pos].ins(); 1.1086 + if (LDefinition *def = FindReusingDefinition(ins, alloc)) { 1.1087 + LiveInterval *outputInterval = 1.1088 + vregs[def->virtualRegister()].intervalFor(outputOf(ins)); 1.1089 + LAllocation *res = outputInterval->getAllocation(); 1.1090 + LAllocation *sourceAlloc = interval->getAllocation(); 1.1091 + 1.1092 + if (*res != *alloc) { 1.1093 + LMoveGroup *group = getInputMoveGroup(inputOf(ins)); 1.1094 + if (!group->addAfter(sourceAlloc, res, def->type())) 1.1095 + return false; 1.1096 + *alloc = *res; 1.1097 + } 1.1098 + } 1.1099 + } 1.1100 + 1.1101 + addLiveRegistersForInterval(reg, interval); 1.1102 + } 1.1103 + } 1.1104 + 1.1105 + graph.setLocalSlotCount(stackSlotAllocator.stackHeight()); 1.1106 + return true; 1.1107 +} 1.1108 + 1.1109 +bool 1.1110 +BacktrackingAllocator::populateSafepoints() 1.1111 +{ 1.1112 + size_t firstSafepoint = 0; 1.1113 + 1.1114 + // Virtual register number 0 is unused. 1.1115 + JS_ASSERT(!vregs[0u].def()); 1.1116 + for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) { 1.1117 + BacktrackingVirtualRegister *reg = &vregs[i]; 1.1118 + 1.1119 + if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg))) 1.1120 + continue; 1.1121 + 1.1122 + firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint); 1.1123 + if (firstSafepoint >= graph.numSafepoints()) 1.1124 + break; 1.1125 + 1.1126 + // Find the furthest endpoint. 1.1127 + CodePosition end = reg->getInterval(0)->end(); 1.1128 + for (size_t j = 1; j < reg->numIntervals(); j++) 1.1129 + end = Max(end, reg->getInterval(j)->end()); 1.1130 + 1.1131 + for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) { 1.1132 + LInstruction *ins = graph.getSafepoint(j); 1.1133 + 1.1134 + // Stop processing safepoints if we know we're out of this virtual 1.1135 + // register's range. 1.1136 + if (end < outputOf(ins)) 1.1137 + break; 1.1138 + 1.1139 + // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT 1.1140 + // is not used with gcthings or nunboxes, or we would have to add the input reg 1.1141 + // to this safepoint. 1.1142 + if (ins == reg->ins() && !reg->isTemp()) { 1.1143 + DebugOnly<LDefinition*> def = reg->def(); 1.1144 + JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT, 1.1145 + def->type() == LDefinition::GENERAL || 1.1146 + def->type() == LDefinition::INT32 || 1.1147 + def->type() == LDefinition::FLOAT32 || 1.1148 + def->type() == LDefinition::DOUBLE); 1.1149 + continue; 1.1150 + } 1.1151 + 1.1152 + LSafepoint *safepoint = ins->safepoint(); 1.1153 + 1.1154 + for (size_t k = 0; k < reg->numIntervals(); k++) { 1.1155 + LiveInterval *interval = reg->getInterval(k); 1.1156 + if (!interval->covers(inputOf(ins))) 1.1157 + continue; 1.1158 + 1.1159 + LAllocation *a = interval->getAllocation(); 1.1160 + if (a->isGeneralReg() && ins->isCall()) 1.1161 + continue; 1.1162 + 1.1163 + switch (reg->type()) { 1.1164 + case LDefinition::OBJECT: 1.1165 + safepoint->addGcPointer(*a); 1.1166 + break; 1.1167 + case LDefinition::SLOTS: 1.1168 + safepoint->addSlotsOrElementsPointer(*a); 1.1169 + break; 1.1170 +#ifdef JS_NUNBOX32 1.1171 + case LDefinition::TYPE: 1.1172 + safepoint->addNunboxType(i, *a); 1.1173 + break; 1.1174 + case LDefinition::PAYLOAD: 1.1175 + safepoint->addNunboxPayload(i, *a); 1.1176 + break; 1.1177 +#else 1.1178 + case LDefinition::BOX: 1.1179 + safepoint->addBoxedValue(*a); 1.1180 + break; 1.1181 +#endif 1.1182 + default: 1.1183 + MOZ_ASSUME_UNREACHABLE("Bad register type"); 1.1184 + } 1.1185 + } 1.1186 + } 1.1187 + } 1.1188 + 1.1189 + return true; 1.1190 +} 1.1191 + 1.1192 +void 1.1193 +BacktrackingAllocator::dumpRegisterGroups() 1.1194 +{ 1.1195 + fprintf(stderr, "Register groups:\n"); 1.1196 + 1.1197 + // Virtual register number 0 is unused. 1.1198 + JS_ASSERT(!vregs[0u].group()); 1.1199 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.1200 + VirtualRegisterGroup *group = vregs[i].group(); 1.1201 + if (group && i == group->canonicalReg()) { 1.1202 + for (size_t j = 0; j < group->registers.length(); j++) 1.1203 + fprintf(stderr, " v%u", group->registers[j]); 1.1204 + fprintf(stderr, "\n"); 1.1205 + } 1.1206 + } 1.1207 +} 1.1208 + 1.1209 +void 1.1210 +BacktrackingAllocator::dumpLiveness() 1.1211 +{ 1.1212 +#ifdef DEBUG 1.1213 + fprintf(stderr, "Virtual Registers:\n"); 1.1214 + 1.1215 + for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { 1.1216 + LBlock *block = graph.getBlock(blockIndex); 1.1217 + MBasicBlock *mir = block->mir(); 1.1218 + 1.1219 + fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex)); 1.1220 + for (size_t i = 0; i < mir->numSuccessors(); i++) 1.1221 + fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); 1.1222 + fprintf(stderr, "\n"); 1.1223 + 1.1224 + for (size_t i = 0; i < block->numPhis(); i++) { 1.1225 + LPhi *phi = block->getPhi(i); 1.1226 + 1.1227 + // Don't print the inputOf for phi nodes, since it's never used. 1.1228 + fprintf(stderr, "[,%u Phi [def v%u] <-", 1.1229 + outputOf(phi).pos(), 1.1230 + phi->getDef(0)->virtualRegister()); 1.1231 + for (size_t j = 0; j < phi->numOperands(); j++) 1.1232 + fprintf(stderr, " %s", phi->getOperand(j)->toString()); 1.1233 + fprintf(stderr, "]\n"); 1.1234 + } 1.1235 + 1.1236 + for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { 1.1237 + LInstruction *ins = *iter; 1.1238 + 1.1239 + fprintf(stderr, "[%u,%u %s]", inputOf(ins).pos(), outputOf(ins).pos(), ins->opName()); 1.1240 + 1.1241 + for (size_t i = 0; i < ins->numTemps(); i++) { 1.1242 + LDefinition *temp = ins->getTemp(i); 1.1243 + if (!temp->isBogusTemp()) 1.1244 + fprintf(stderr, " [temp v%u]", temp->virtualRegister()); 1.1245 + } 1.1246 + 1.1247 + for (size_t i = 0; i < ins->numDefs(); i++) { 1.1248 + LDefinition *def = ins->getDef(i); 1.1249 + fprintf(stderr, " [def v%u]", def->virtualRegister()); 1.1250 + } 1.1251 + 1.1252 + for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) 1.1253 + fprintf(stderr, " [use %s]", alloc->toString()); 1.1254 + 1.1255 + fprintf(stderr, "\n"); 1.1256 + } 1.1257 + } 1.1258 + 1.1259 + fprintf(stderr, "\nLive Ranges:\n\n"); 1.1260 + 1.1261 + for (size_t i = 0; i < AnyRegister::Total; i++) 1.1262 + if (registers[i].allocatable) 1.1263 + fprintf(stderr, "reg %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->rangesToString()); 1.1264 + 1.1265 + // Virtual register number 0 is unused. 1.1266 + JS_ASSERT(vregs[0u].numIntervals() == 0); 1.1267 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.1268 + fprintf(stderr, "v%lu:", static_cast<unsigned long>(i)); 1.1269 + VirtualRegister &vreg = vregs[i]; 1.1270 + for (size_t j = 0; j < vreg.numIntervals(); j++) { 1.1271 + if (j) 1.1272 + fprintf(stderr, " *"); 1.1273 + fprintf(stderr, "%s", vreg.getInterval(j)->rangesToString()); 1.1274 + } 1.1275 + fprintf(stderr, "\n"); 1.1276 + } 1.1277 + 1.1278 + fprintf(stderr, "\n"); 1.1279 +#endif // DEBUG 1.1280 +} 1.1281 + 1.1282 +#ifdef DEBUG 1.1283 +struct BacktrackingAllocator::PrintLiveIntervalRange 1.1284 +{ 1.1285 + void operator()(const AllocatedRange &item) 1.1286 + { 1.1287 + if (item.range == item.interval->getRange(0)) { 1.1288 + if (item.interval->hasVreg()) 1.1289 + fprintf(stderr, " v%u: %s\n", 1.1290 + item.interval->vreg(), 1.1291 + item.interval->rangesToString()); 1.1292 + else 1.1293 + fprintf(stderr, " fixed: %s\n", 1.1294 + item.interval->rangesToString()); 1.1295 + } 1.1296 + } 1.1297 +}; 1.1298 +#endif 1.1299 + 1.1300 +void 1.1301 +BacktrackingAllocator::dumpAllocations() 1.1302 +{ 1.1303 +#ifdef DEBUG 1.1304 + fprintf(stderr, "Allocations:\n"); 1.1305 + 1.1306 + // Virtual register number 0 is unused. 1.1307 + JS_ASSERT(vregs[0u].numIntervals() == 0); 1.1308 + for (size_t i = 1; i < graph.numVirtualRegisters(); i++) { 1.1309 + fprintf(stderr, "v%lu:", static_cast<unsigned long>(i)); 1.1310 + VirtualRegister &vreg = vregs[i]; 1.1311 + for (size_t j = 0; j < vreg.numIntervals(); j++) { 1.1312 + if (j) 1.1313 + fprintf(stderr, " *"); 1.1314 + LiveInterval *interval = vreg.getInterval(j); 1.1315 + fprintf(stderr, "%s :: %s", interval->rangesToString(), interval->getAllocation()->toString()); 1.1316 + } 1.1317 + fprintf(stderr, "\n"); 1.1318 + } 1.1319 + 1.1320 + fprintf(stderr, "\n"); 1.1321 + 1.1322 + for (size_t i = 0; i < AnyRegister::Total; i++) { 1.1323 + if (registers[i].allocatable) { 1.1324 + fprintf(stderr, "reg %s:\n", AnyRegister::FromCode(i).name()); 1.1325 + registers[i].allocations.forEach(PrintLiveIntervalRange()); 1.1326 + } 1.1327 + } 1.1328 + 1.1329 + fprintf(stderr, "\n"); 1.1330 +#endif // DEBUG 1.1331 +} 1.1332 + 1.1333 +bool 1.1334 +BacktrackingAllocator::addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg, 1.1335 + LiveInterval *spillInterval, 1.1336 + CodePosition from, CodePosition to) 1.1337 +{ 1.1338 + LiveInterval *interval = LiveInterval::New(alloc(), vreg, 0); 1.1339 + interval->setSpillInterval(spillInterval); 1.1340 + return interval->addRange(from, to) && intervals.append(interval); 1.1341 +} 1.1342 + 1.1343 +/////////////////////////////////////////////////////////////////////////////// 1.1344 +// Heuristic Methods 1.1345 +/////////////////////////////////////////////////////////////////////////////// 1.1346 + 1.1347 +size_t 1.1348 +BacktrackingAllocator::computePriority(const LiveInterval *interval) 1.1349 +{ 1.1350 + // The priority of an interval is its total length, so that longer lived 1.1351 + // intervals will be processed before shorter ones (even if the longer ones 1.1352 + // have a low spill weight). See processInterval(). 1.1353 + size_t lifetimeTotal = 0; 1.1354 + 1.1355 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.1356 + const LiveInterval::Range *range = interval->getRange(i); 1.1357 + lifetimeTotal += range->to.pos() - range->from.pos(); 1.1358 + } 1.1359 + 1.1360 + return lifetimeTotal; 1.1361 +} 1.1362 + 1.1363 +size_t 1.1364 +BacktrackingAllocator::computePriority(const VirtualRegisterGroup *group) 1.1365 +{ 1.1366 + size_t priority = 0; 1.1367 + for (size_t j = 0; j < group->registers.length(); j++) { 1.1368 + uint32_t vreg = group->registers[j]; 1.1369 + priority += computePriority(vregs[vreg].getInterval(0)); 1.1370 + } 1.1371 + return priority; 1.1372 +} 1.1373 + 1.1374 +bool 1.1375 +BacktrackingAllocator::minimalDef(const LiveInterval *interval, LInstruction *ins) 1.1376 +{ 1.1377 + // Whether interval is a minimal interval capturing a definition at ins. 1.1378 + return (interval->end() <= minimalDefEnd(ins).next()) && 1.1379 + ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins)); 1.1380 +} 1.1381 + 1.1382 +bool 1.1383 +BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *ins) 1.1384 +{ 1.1385 + // Whether interval is a minimal interval capturing a use at ins. 1.1386 + return (interval->start() == inputOf(ins)) && 1.1387 + (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next()); 1.1388 +} 1.1389 + 1.1390 +bool 1.1391 +BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed) 1.1392 +{ 1.1393 + if (!interval->hasVreg()) { 1.1394 + *pfixed = true; 1.1395 + return true; 1.1396 + } 1.1397 + 1.1398 + if (interval->index() == 0) { 1.1399 + VirtualRegister ® = vregs[interval->vreg()]; 1.1400 + if (pfixed) 1.1401 + *pfixed = reg.def()->policy() == LDefinition::PRESET && reg.def()->output()->isRegister(); 1.1402 + return minimalDef(interval, reg.ins()); 1.1403 + } 1.1404 + 1.1405 + bool fixed = false, minimal = false; 1.1406 + 1.1407 + for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { 1.1408 + LUse *use = iter->use; 1.1409 + 1.1410 + switch (use->policy()) { 1.1411 + case LUse::FIXED: 1.1412 + if (fixed) 1.1413 + return false; 1.1414 + fixed = true; 1.1415 + if (minimalUse(interval, insData[iter->pos].ins())) 1.1416 + minimal = true; 1.1417 + break; 1.1418 + 1.1419 + case LUse::REGISTER: 1.1420 + if (minimalUse(interval, insData[iter->pos].ins())) 1.1421 + minimal = true; 1.1422 + break; 1.1423 + 1.1424 + default: 1.1425 + break; 1.1426 + } 1.1427 + } 1.1428 + 1.1429 + if (pfixed) 1.1430 + *pfixed = fixed; 1.1431 + return minimal; 1.1432 +} 1.1433 + 1.1434 +size_t 1.1435 +BacktrackingAllocator::computeSpillWeight(const LiveInterval *interval) 1.1436 +{ 1.1437 + // Minimal intervals have an extremely high spill weight, to ensure they 1.1438 + // can evict any other intervals and be allocated to a register. 1.1439 + bool fixed; 1.1440 + if (minimalInterval(interval, &fixed)) 1.1441 + return fixed ? 2000000 : 1000000; 1.1442 + 1.1443 + size_t usesTotal = 0; 1.1444 + 1.1445 + if (interval->index() == 0) { 1.1446 + VirtualRegister *reg = &vregs[interval->vreg()]; 1.1447 + if (reg->def()->policy() == LDefinition::PRESET && reg->def()->output()->isRegister()) 1.1448 + usesTotal += 2000; 1.1449 + else if (!reg->ins()->isPhi()) 1.1450 + usesTotal += 2000; 1.1451 + } 1.1452 + 1.1453 + for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) { 1.1454 + LUse *use = iter->use; 1.1455 + 1.1456 + switch (use->policy()) { 1.1457 + case LUse::ANY: 1.1458 + usesTotal += 1000; 1.1459 + break; 1.1460 + 1.1461 + case LUse::REGISTER: 1.1462 + case LUse::FIXED: 1.1463 + usesTotal += 2000; 1.1464 + break; 1.1465 + 1.1466 + case LUse::KEEPALIVE: 1.1467 + break; 1.1468 + 1.1469 + default: 1.1470 + // Note: RECOVERED_INPUT will not appear in UsePositionIterator. 1.1471 + MOZ_ASSUME_UNREACHABLE("Bad use"); 1.1472 + } 1.1473 + } 1.1474 + 1.1475 + // Intervals for registers in groups get higher weights. 1.1476 + if (interval->hint()->kind() != Requirement::NONE) 1.1477 + usesTotal += 2000; 1.1478 + 1.1479 + // Compute spill weight as a use density, lowering the weight for long 1.1480 + // lived intervals with relatively few uses. 1.1481 + size_t lifetimeTotal = computePriority(interval); 1.1482 + return lifetimeTotal ? usesTotal / lifetimeTotal : 0; 1.1483 +} 1.1484 + 1.1485 +size_t 1.1486 +BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup *group) 1.1487 +{ 1.1488 + size_t maxWeight = 0; 1.1489 + for (size_t j = 0; j < group->registers.length(); j++) { 1.1490 + uint32_t vreg = group->registers[j]; 1.1491 + maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0))); 1.1492 + } 1.1493 + return maxWeight; 1.1494 +} 1.1495 + 1.1496 +bool 1.1497 +BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *success) 1.1498 +{ 1.1499 + // If this interval has portions that are hot and portions that are cold, 1.1500 + // split it at the boundaries between hot and cold code. 1.1501 + 1.1502 + const LiveInterval::Range *hotRange = nullptr; 1.1503 + 1.1504 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.1505 + AllocatedRange range(interval, interval->getRange(i)), existing; 1.1506 + if (hotcode.contains(range, &existing)) { 1.1507 + hotRange = existing.range; 1.1508 + break; 1.1509 + } 1.1510 + } 1.1511 + 1.1512 + // Don't split if there is no hot code in the interval. 1.1513 + if (!hotRange) 1.1514 + return true; 1.1515 + 1.1516 + // Don't split if there is no cold code in the interval. 1.1517 + bool coldCode = false; 1.1518 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.1519 + if (!hotRange->contains(interval->getRange(i))) { 1.1520 + coldCode = true; 1.1521 + break; 1.1522 + } 1.1523 + } 1.1524 + if (!coldCode) 1.1525 + return true; 1.1526 + 1.1527 + SplitPositionVector splitPositions; 1.1528 + if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to)) 1.1529 + return false; 1.1530 + *success = true; 1.1531 + return splitAt(interval, splitPositions); 1.1532 +} 1.1533 + 1.1534 +bool 1.1535 +BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success) 1.1536 +{ 1.1537 + // If this interval's later uses do not require it to be in a register, 1.1538 + // split it after the last use which does require a register. If conflict 1.1539 + // is specified, only consider register uses before the conflict starts. 1.1540 + 1.1541 + CodePosition lastRegisterFrom, lastRegisterTo, lastUse; 1.1542 + 1.1543 + for (UsePositionIterator iter(interval->usesBegin()); 1.1544 + iter != interval->usesEnd(); 1.1545 + iter++) 1.1546 + { 1.1547 + LUse *use = iter->use; 1.1548 + LInstruction *ins = insData[iter->pos].ins(); 1.1549 + 1.1550 + // Uses in the interval should be sorted. 1.1551 + JS_ASSERT(iter->pos >= lastUse); 1.1552 + lastUse = inputOf(ins); 1.1553 + 1.1554 + if (!conflict || outputOf(ins) < conflict->start()) { 1.1555 + if (isRegisterUse(use, ins, /* considerCopy = */ true)) { 1.1556 + lastRegisterFrom = inputOf(ins); 1.1557 + lastRegisterTo = iter->pos.next(); 1.1558 + } 1.1559 + } 1.1560 + } 1.1561 + 1.1562 + if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) { 1.1563 + // Can't trim non-register uses off the end by splitting. 1.1564 + return true; 1.1565 + } 1.1566 + 1.1567 + SplitPositionVector splitPositions; 1.1568 + if (!splitPositions.append(lastRegisterTo)) 1.1569 + return false; 1.1570 + *success = true; 1.1571 + return splitAt(interval, splitPositions); 1.1572 +} 1.1573 + 1.1574 +bool 1.1575 +BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval) 1.1576 +{ 1.1577 + // Split this interval so that all its register uses become minimal 1.1578 + // intervals and allow the vreg to be spilled throughout its range. 1.1579 + 1.1580 + LiveIntervalVector newIntervals; 1.1581 + uint32_t vreg = interval->vreg(); 1.1582 + 1.1583 + // If this LiveInterval is the result of an earlier split which created a 1.1584 + // spill interval, that spill interval covers the whole range, so we don't 1.1585 + // need to create a new one. 1.1586 + bool spillIntervalIsNew = false; 1.1587 + LiveInterval *spillInterval = interval->spillInterval(); 1.1588 + if (!spillInterval) { 1.1589 + spillInterval = LiveInterval::New(alloc(), vreg, 0); 1.1590 + spillIntervalIsNew = true; 1.1591 + } 1.1592 + 1.1593 + CodePosition spillStart = interval->start(); 1.1594 + if (isRegisterDefinition(interval)) { 1.1595 + // Treat the definition of the interval as a register use so that it 1.1596 + // can be split and spilled ASAP. 1.1597 + CodePosition from = interval->start(); 1.1598 + CodePosition to = minimalDefEnd(insData[from].ins()).next(); 1.1599 + if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to)) 1.1600 + return false; 1.1601 + spillStart = to; 1.1602 + } 1.1603 + 1.1604 + if (spillIntervalIsNew) { 1.1605 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.1606 + const LiveInterval::Range *range = interval->getRange(i); 1.1607 + CodePosition from = range->from < spillStart ? spillStart : range->from; 1.1608 + if (!spillInterval->addRange(from, range->to)) 1.1609 + return false; 1.1610 + } 1.1611 + } 1.1612 + 1.1613 + for (UsePositionIterator iter(interval->usesBegin()); 1.1614 + iter != interval->usesEnd(); 1.1615 + iter++) 1.1616 + { 1.1617 + LInstruction *ins = insData[iter->pos].ins(); 1.1618 + if (iter->pos < spillStart) { 1.1619 + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.1620 + } else if (isRegisterUse(iter->use, ins)) { 1.1621 + // For register uses which are not useRegisterAtStart, pick an 1.1622 + // interval that covers both the instruction's input and output, so 1.1623 + // that the register is not reused for an output. 1.1624 + CodePosition from = inputOf(ins); 1.1625 + CodePosition to = iter->pos.next(); 1.1626 + 1.1627 + // Use the same interval for duplicate use positions, except when 1.1628 + // the uses are fixed (they may require incompatible registers). 1.1629 + if (newIntervals.empty() || newIntervals.back()->end() != to || iter->use->policy() == LUse::FIXED) { 1.1630 + if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to)) 1.1631 + return false; 1.1632 + } 1.1633 + 1.1634 + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.1635 + } else { 1.1636 + JS_ASSERT(spillIntervalIsNew); 1.1637 + spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.1638 + } 1.1639 + } 1.1640 + 1.1641 + if (spillIntervalIsNew && !newIntervals.append(spillInterval)) 1.1642 + return false; 1.1643 + 1.1644 + return split(interval, newIntervals) && requeueIntervals(newIntervals); 1.1645 +} 1.1646 + 1.1647 +// Find the next split position after the current position. 1.1648 +static size_t NextSplitPosition(size_t activeSplitPosition, 1.1649 + const SplitPositionVector &splitPositions, 1.1650 + CodePosition currentPos) 1.1651 +{ 1.1652 + while (activeSplitPosition < splitPositions.length() && 1.1653 + splitPositions[activeSplitPosition] <= currentPos) 1.1654 + { 1.1655 + ++activeSplitPosition; 1.1656 + } 1.1657 + return activeSplitPosition; 1.1658 +} 1.1659 + 1.1660 +// Test whether the current position has just crossed a split point. 1.1661 +static bool SplitHere(size_t activeSplitPosition, 1.1662 + const SplitPositionVector &splitPositions, 1.1663 + CodePosition currentPos) 1.1664 +{ 1.1665 + return activeSplitPosition < splitPositions.length() && 1.1666 + currentPos >= splitPositions[activeSplitPosition]; 1.1667 +} 1.1668 + 1.1669 +bool 1.1670 +BacktrackingAllocator::splitAt(LiveInterval *interval, 1.1671 + const SplitPositionVector &splitPositions) 1.1672 +{ 1.1673 + // Split the interval at the given split points. Unlike splitAtAllRegisterUses, 1.1674 + // consolidate any register uses which have no intervening split points into the 1.1675 + // same resulting interval. 1.1676 + 1.1677 + // splitPositions should be non-empty and sorted. 1.1678 + JS_ASSERT(!splitPositions.empty()); 1.1679 + for (size_t i = 1; i < splitPositions.length(); ++i) 1.1680 + JS_ASSERT(splitPositions[i-1] < splitPositions[i]); 1.1681 + 1.1682 + // Don't spill the interval until after the end of its definition. 1.1683 + CodePosition spillStart = interval->start(); 1.1684 + if (isRegisterDefinition(interval)) 1.1685 + spillStart = minimalDefEnd(insData[interval->start()].ins()).next(); 1.1686 + 1.1687 + uint32_t vreg = interval->vreg(); 1.1688 + 1.1689 + // If this LiveInterval is the result of an earlier split which created a 1.1690 + // spill interval, that spill interval covers the whole range, so we don't 1.1691 + // need to create a new one. 1.1692 + bool spillIntervalIsNew = false; 1.1693 + LiveInterval *spillInterval = interval->spillInterval(); 1.1694 + if (!spillInterval) { 1.1695 + spillInterval = LiveInterval::New(alloc(), vreg, 0); 1.1696 + spillIntervalIsNew = true; 1.1697 + 1.1698 + for (size_t i = 0; i < interval->numRanges(); i++) { 1.1699 + const LiveInterval::Range *range = interval->getRange(i); 1.1700 + CodePosition from = range->from < spillStart ? spillStart : range->from; 1.1701 + if (!spillInterval->addRange(from, range->to)) 1.1702 + return false; 1.1703 + } 1.1704 + } 1.1705 + 1.1706 + LiveIntervalVector newIntervals; 1.1707 + 1.1708 + CodePosition lastRegisterUse; 1.1709 + if (spillStart != interval->start()) { 1.1710 + LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0); 1.1711 + newInterval->setSpillInterval(spillInterval); 1.1712 + if (!newIntervals.append(newInterval)) 1.1713 + return false; 1.1714 + lastRegisterUse = interval->start(); 1.1715 + } 1.1716 + 1.1717 + size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start()); 1.1718 + for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) { 1.1719 + LInstruction *ins = insData[iter->pos].ins(); 1.1720 + if (iter->pos < spillStart) { 1.1721 + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.1722 + activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos); 1.1723 + } else if (isRegisterUse(iter->use, ins)) { 1.1724 + if (lastRegisterUse.pos() == 0 || 1.1725 + SplitHere(activeSplitPosition, splitPositions, iter->pos)) 1.1726 + { 1.1727 + // Place this register use into a different interval from the 1.1728 + // last one if there are any split points between the two uses. 1.1729 + LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0); 1.1730 + newInterval->setSpillInterval(spillInterval); 1.1731 + if (!newIntervals.append(newInterval)) 1.1732 + return false; 1.1733 + activeSplitPosition = NextSplitPosition(activeSplitPosition, 1.1734 + splitPositions, 1.1735 + iter->pos); 1.1736 + } 1.1737 + newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.1738 + lastRegisterUse = iter->pos; 1.1739 + } else { 1.1740 + JS_ASSERT(spillIntervalIsNew); 1.1741 + spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos)); 1.1742 + } 1.1743 + } 1.1744 + 1.1745 + // Compute ranges for each new interval that cover all its uses. 1.1746 + size_t activeRange = interval->numRanges(); 1.1747 + for (size_t i = 0; i < newIntervals.length(); i++) { 1.1748 + LiveInterval *newInterval = newIntervals[i]; 1.1749 + CodePosition start, end; 1.1750 + if (i == 0 && spillStart != interval->start()) { 1.1751 + start = interval->start(); 1.1752 + if (newInterval->usesEmpty()) 1.1753 + end = spillStart; 1.1754 + else 1.1755 + end = newInterval->usesBack()->pos.next(); 1.1756 + } else { 1.1757 + start = inputOf(insData[newInterval->usesBegin()->pos].ins()); 1.1758 + end = newInterval->usesBack()->pos.next(); 1.1759 + } 1.1760 + for (; activeRange > 0; --activeRange) { 1.1761 + const LiveInterval::Range *range = interval->getRange(activeRange - 1); 1.1762 + if (range->to <= start) 1.1763 + continue; 1.1764 + if (range->from >= end) 1.1765 + break; 1.1766 + if (!newInterval->addRange(Max(range->from, start), 1.1767 + Min(range->to, end))) 1.1768 + return false; 1.1769 + if (range->to >= end) 1.1770 + break; 1.1771 + } 1.1772 + } 1.1773 + 1.1774 + if (spillIntervalIsNew && !newIntervals.append(spillInterval)) 1.1775 + return false; 1.1776 + 1.1777 + return split(interval, newIntervals) && requeueIntervals(newIntervals); 1.1778 +} 1.1779 + 1.1780 +bool 1.1781 +BacktrackingAllocator::splitAcrossCalls(LiveInterval *interval) 1.1782 +{ 1.1783 + // Split the interval to separate register uses and non-register uses and 1.1784 + // allow the vreg to be spilled across its range. 1.1785 + 1.1786 + // Find the locations of all calls in the interval's range. Fixed intervals 1.1787 + // are introduced by buildLivenessInfo only for calls when allocating for 1.1788 + // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so 1.1789 + // iterate through it backwards. 1.1790 + SplitPositionVector callPositions; 1.1791 + for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) { 1.1792 + const LiveInterval::Range *range = fixedIntervalsUnion->getRange(i - 1); 1.1793 + if (interval->covers(range->from) && interval->covers(range->from.previous())) { 1.1794 + if (!callPositions.append(range->from)) 1.1795 + return false; 1.1796 + } 1.1797 + } 1.1798 + JS_ASSERT(callPositions.length()); 1.1799 + 1.1800 + return splitAt(interval, callPositions); 1.1801 +} 1.1802 + 1.1803 +bool 1.1804 +BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict) 1.1805 +{ 1.1806 + bool success = false; 1.1807 + 1.1808 + if (!trySplitAcrossHotcode(interval, &success)) 1.1809 + return false; 1.1810 + if (success) 1.1811 + return true; 1.1812 + 1.1813 + if (!trySplitAfterLastRegisterUse(interval, conflict, &success)) 1.1814 + return false; 1.1815 + if (success) 1.1816 + return true; 1.1817 + 1.1818 + return splitAtAllRegisterUses(interval); 1.1819 +}