js/src/jit/BacktrackingAllocator.cpp

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #include "jit/BacktrackingAllocator.h"
michael@0 8 #include "jit/BitSet.h"
michael@0 9
michael@0 10 using namespace js;
michael@0 11 using namespace js::jit;
michael@0 12
michael@0 13 using mozilla::DebugOnly;
michael@0 14
michael@0 15 bool
michael@0 16 BacktrackingAllocator::init()
michael@0 17 {
michael@0 18 RegisterSet remainingRegisters(allRegisters_);
michael@0 19 while (!remainingRegisters.empty(/* float = */ false)) {
michael@0 20 AnyRegister reg = AnyRegister(remainingRegisters.takeGeneral());
michael@0 21 registers[reg.code()].allocatable = true;
michael@0 22 }
michael@0 23 while (!remainingRegisters.empty(/* float = */ true)) {
michael@0 24 AnyRegister reg = AnyRegister(remainingRegisters.takeFloat());
michael@0 25 registers[reg.code()].allocatable = true;
michael@0 26 }
michael@0 27
michael@0 28 LifoAlloc *lifoAlloc = mir->alloc().lifoAlloc();
michael@0 29 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 30 registers[i].reg = AnyRegister::FromCode(i);
michael@0 31 registers[i].allocations.setAllocator(lifoAlloc);
michael@0 32
michael@0 33 LiveInterval *fixed = fixedIntervals[i];
michael@0 34 for (size_t j = 0; j < fixed->numRanges(); j++) {
michael@0 35 AllocatedRange range(fixed, fixed->getRange(j));
michael@0 36 if (!registers[i].allocations.insert(range))
michael@0 37 return false;
michael@0 38 }
michael@0 39 }
michael@0 40
michael@0 41 hotcode.setAllocator(lifoAlloc);
michael@0 42
michael@0 43 // Partition the graph into hot and cold sections, for helping to make
michael@0 44 // splitting decisions. Since we don't have any profiling data this is a
michael@0 45 // crapshoot, so just mark the bodies of inner loops as hot and everything
michael@0 46 // else as cold.
michael@0 47
michael@0 48 LiveInterval *hotcodeInterval = LiveInterval::New(alloc(), 0);
michael@0 49
michael@0 50 LBlock *backedge = nullptr;
michael@0 51 for (size_t i = 0; i < graph.numBlocks(); i++) {
michael@0 52 LBlock *block = graph.getBlock(i);
michael@0 53
michael@0 54 // If we see a loop header, mark the backedge so we know when we have
michael@0 55 // hit the end of the loop. Don't process the loop immediately, so that
michael@0 56 // if there is an inner loop we will ignore the outer backedge.
michael@0 57 if (block->mir()->isLoopHeader())
michael@0 58 backedge = block->mir()->backedge()->lir();
michael@0 59
michael@0 60 if (block == backedge) {
michael@0 61 LBlock *header = block->mir()->loopHeaderOfBackedge()->lir();
michael@0 62 CodePosition from = inputOf(header->firstId());
michael@0 63 CodePosition to = outputOf(block->lastId()).next();
michael@0 64 if (!hotcodeInterval->addRange(from, to))
michael@0 65 return false;
michael@0 66 }
michael@0 67 }
michael@0 68
michael@0 69 for (size_t i = 0; i < hotcodeInterval->numRanges(); i++) {
michael@0 70 AllocatedRange range(hotcodeInterval, hotcodeInterval->getRange(i));
michael@0 71 if (!hotcode.insert(range))
michael@0 72 return false;
michael@0 73 }
michael@0 74
michael@0 75 return true;
michael@0 76 }
michael@0 77
michael@0 78 bool
michael@0 79 BacktrackingAllocator::go()
michael@0 80 {
michael@0 81 IonSpew(IonSpew_RegAlloc, "Beginning register allocation");
michael@0 82
michael@0 83 IonSpew(IonSpew_RegAlloc, "Beginning liveness analysis");
michael@0 84 if (!buildLivenessInfo())
michael@0 85 return false;
michael@0 86 IonSpew(IonSpew_RegAlloc, "Liveness analysis complete");
michael@0 87
michael@0 88 if (!init())
michael@0 89 return false;
michael@0 90
michael@0 91 if (IonSpewEnabled(IonSpew_RegAlloc))
michael@0 92 dumpLiveness();
michael@0 93
michael@0 94 if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
michael@0 95 return false;
michael@0 96
michael@0 97 if (!groupAndQueueRegisters())
michael@0 98 return false;
michael@0 99
michael@0 100 if (IonSpewEnabled(IonSpew_RegAlloc))
michael@0 101 dumpRegisterGroups();
michael@0 102
michael@0 103 // Allocate, spill and split register intervals until finished.
michael@0 104 while (!allocationQueue.empty()) {
michael@0 105 if (mir->shouldCancel("Backtracking Allocation"))
michael@0 106 return false;
michael@0 107
michael@0 108 QueueItem item = allocationQueue.removeHighest();
michael@0 109 if (item.interval ? !processInterval(item.interval) : !processGroup(item.group))
michael@0 110 return false;
michael@0 111 }
michael@0 112
michael@0 113 if (IonSpewEnabled(IonSpew_RegAlloc))
michael@0 114 dumpAllocations();
michael@0 115
michael@0 116 return resolveControlFlow() && reifyAllocations() && populateSafepoints();
michael@0 117 }
michael@0 118
michael@0 119 static bool
michael@0 120 LifetimesOverlap(BacktrackingVirtualRegister *reg0, BacktrackingVirtualRegister *reg1)
michael@0 121 {
michael@0 122 // Registers may have been eagerly split in two, see tryGroupReusedRegister.
michael@0 123 // In such cases, only consider the first interval.
michael@0 124 JS_ASSERT(reg0->numIntervals() <= 2 && reg1->numIntervals() <= 2);
michael@0 125
michael@0 126 LiveInterval *interval0 = reg0->getInterval(0), *interval1 = reg1->getInterval(0);
michael@0 127
michael@0 128 // Interval ranges are sorted in reverse order. The lifetimes overlap if
michael@0 129 // any of their ranges overlap.
michael@0 130 size_t index0 = 0, index1 = 0;
michael@0 131 while (index0 < interval0->numRanges() && index1 < interval1->numRanges()) {
michael@0 132 const LiveInterval::Range
michael@0 133 *range0 = interval0->getRange(index0),
michael@0 134 *range1 = interval1->getRange(index1);
michael@0 135 if (range0->from >= range1->to)
michael@0 136 index0++;
michael@0 137 else if (range1->from >= range0->to)
michael@0 138 index1++;
michael@0 139 else
michael@0 140 return true;
michael@0 141 }
michael@0 142
michael@0 143 return false;
michael@0 144 }
michael@0 145
michael@0 146 bool
michael@0 147 BacktrackingAllocator::canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg)
michael@0 148 {
michael@0 149 for (size_t i = 0; i < group->registers.length(); i++) {
michael@0 150 if (LifetimesOverlap(reg, &vregs[group->registers[i]]))
michael@0 151 return false;
michael@0 152 }
michael@0 153 return true;
michael@0 154 }
michael@0 155
michael@0 156 bool
michael@0 157 BacktrackingAllocator::tryGroupRegisters(uint32_t vreg0, uint32_t vreg1)
michael@0 158 {
michael@0 159 // See if reg0 and reg1 can be placed in the same group, following the
michael@0 160 // restrictions imposed by VirtualRegisterGroup and any other registers
michael@0 161 // already grouped with reg0 or reg1.
michael@0 162 BacktrackingVirtualRegister *reg0 = &vregs[vreg0], *reg1 = &vregs[vreg1];
michael@0 163
michael@0 164 if (reg0->isFloatReg() != reg1->isFloatReg())
michael@0 165 return true;
michael@0 166
michael@0 167 VirtualRegisterGroup *group0 = reg0->group(), *group1 = reg1->group();
michael@0 168
michael@0 169 if (!group0 && group1)
michael@0 170 return tryGroupRegisters(vreg1, vreg0);
michael@0 171
michael@0 172 if (group0) {
michael@0 173 if (group1) {
michael@0 174 if (group0 == group1) {
michael@0 175 // The registers are already grouped together.
michael@0 176 return true;
michael@0 177 }
michael@0 178 // Try to unify the two distinct groups.
michael@0 179 for (size_t i = 0; i < group1->registers.length(); i++) {
michael@0 180 if (!canAddToGroup(group0, &vregs[group1->registers[i]]))
michael@0 181 return true;
michael@0 182 }
michael@0 183 for (size_t i = 0; i < group1->registers.length(); i++) {
michael@0 184 uint32_t vreg = group1->registers[i];
michael@0 185 if (!group0->registers.append(vreg))
michael@0 186 return false;
michael@0 187 vregs[vreg].setGroup(group0);
michael@0 188 }
michael@0 189 return true;
michael@0 190 }
michael@0 191 if (!canAddToGroup(group0, reg1))
michael@0 192 return true;
michael@0 193 if (!group0->registers.append(vreg1))
michael@0 194 return false;
michael@0 195 reg1->setGroup(group0);
michael@0 196 return true;
michael@0 197 }
michael@0 198
michael@0 199 if (LifetimesOverlap(reg0, reg1))
michael@0 200 return true;
michael@0 201
michael@0 202 VirtualRegisterGroup *group = new(alloc()) VirtualRegisterGroup(alloc());
michael@0 203 if (!group->registers.append(vreg0) || !group->registers.append(vreg1))
michael@0 204 return false;
michael@0 205
michael@0 206 reg0->setGroup(group);
michael@0 207 reg1->setGroup(group);
michael@0 208 return true;
michael@0 209 }
michael@0 210
michael@0 211 bool
michael@0 212 BacktrackingAllocator::tryGroupReusedRegister(uint32_t def, uint32_t use)
michael@0 213 {
michael@0 214 BacktrackingVirtualRegister &reg = vregs[def], &usedReg = vregs[use];
michael@0 215
michael@0 216 // reg is a vreg which reuses its input usedReg for its output physical
michael@0 217 // register. Try to group reg with usedReg if at all possible, as avoiding
michael@0 218 // copies before reg's instruction is crucial for the quality of the
michael@0 219 // generated code (MUST_REUSE_INPUT is used by all arithmetic instructions
michael@0 220 // on x86/x64).
michael@0 221
michael@0 222 if (reg.intervalFor(inputOf(reg.ins()))) {
michael@0 223 JS_ASSERT(reg.isTemp());
michael@0 224 reg.setMustCopyInput();
michael@0 225 return true;
michael@0 226 }
michael@0 227
michael@0 228 if (!usedReg.intervalFor(outputOf(reg.ins()))) {
michael@0 229 // The input is not live after the instruction, either in a safepoint
michael@0 230 // for the instruction or in subsequent code. The input and output
michael@0 231 // can thus be in the same group.
michael@0 232 return tryGroupRegisters(use, def);
michael@0 233 }
michael@0 234
michael@0 235 // The input is live afterwards, either in future instructions or in a
michael@0 236 // safepoint for the reusing instruction. This is impossible to satisfy
michael@0 237 // without copying the input.
michael@0 238 //
michael@0 239 // It may or may not be better to split the interval at the point of the
michael@0 240 // definition, which may permit grouping. One case where it is definitely
michael@0 241 // better to split is if the input never has any register uses after the
michael@0 242 // instruction. Handle this splitting eagerly.
michael@0 243
michael@0 244 if (usedReg.numIntervals() != 1 ||
michael@0 245 (usedReg.def()->isPreset() && !usedReg.def()->output()->isRegister())) {
michael@0 246 reg.setMustCopyInput();
michael@0 247 return true;
michael@0 248 }
michael@0 249 LiveInterval *interval = usedReg.getInterval(0);
michael@0 250 LBlock *block = insData[reg.ins()].block();
michael@0 251
michael@0 252 // The input's lifetime must end within the same block as the definition,
michael@0 253 // otherwise it could live on in phis elsewhere.
michael@0 254 if (interval->end() > outputOf(block->lastId())) {
michael@0 255 reg.setMustCopyInput();
michael@0 256 return true;
michael@0 257 }
michael@0 258
michael@0 259 for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
michael@0 260 if (iter->pos <= inputOf(reg.ins()))
michael@0 261 continue;
michael@0 262
michael@0 263 LUse *use = iter->use;
michael@0 264 if (FindReusingDefinition(insData[iter->pos].ins(), use)) {
michael@0 265 reg.setMustCopyInput();
michael@0 266 return true;
michael@0 267 }
michael@0 268 if (use->policy() != LUse::ANY && use->policy() != LUse::KEEPALIVE) {
michael@0 269 reg.setMustCopyInput();
michael@0 270 return true;
michael@0 271 }
michael@0 272 }
michael@0 273
michael@0 274 LiveInterval *preInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
michael@0 275 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 276 const LiveInterval::Range *range = interval->getRange(i);
michael@0 277 JS_ASSERT(range->from <= inputOf(reg.ins()));
michael@0 278
michael@0 279 CodePosition to = (range->to <= outputOf(reg.ins())) ? range->to : outputOf(reg.ins());
michael@0 280 if (!preInterval->addRange(range->from, to))
michael@0 281 return false;
michael@0 282 }
michael@0 283
michael@0 284 LiveInterval *postInterval = LiveInterval::New(alloc(), interval->vreg(), 0);
michael@0 285 if (!postInterval->addRange(inputOf(reg.ins()), interval->end()))
michael@0 286 return false;
michael@0 287
michael@0 288 LiveIntervalVector newIntervals;
michael@0 289 if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
michael@0 290 return false;
michael@0 291
michael@0 292 distributeUses(interval, newIntervals);
michael@0 293
michael@0 294 if (!split(interval, newIntervals))
michael@0 295 return false;
michael@0 296
michael@0 297 JS_ASSERT(usedReg.numIntervals() == 2);
michael@0 298
michael@0 299 usedReg.setCanonicalSpillExclude(inputOf(reg.ins()));
michael@0 300
michael@0 301 return tryGroupRegisters(use, def);
michael@0 302 }
michael@0 303
michael@0 304 bool
michael@0 305 BacktrackingAllocator::groupAndQueueRegisters()
michael@0 306 {
michael@0 307 // Try to group registers with their reused inputs.
michael@0 308 // Virtual register number 0 is unused.
michael@0 309 JS_ASSERT(vregs[0u].numIntervals() == 0);
michael@0 310 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 311 BacktrackingVirtualRegister &reg = vregs[i];
michael@0 312 if (!reg.numIntervals())
michael@0 313 continue;
michael@0 314
michael@0 315 if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
michael@0 316 LUse *use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
michael@0 317 if (!tryGroupReusedRegister(i, use->virtualRegister()))
michael@0 318 return false;
michael@0 319 }
michael@0 320 }
michael@0 321
michael@0 322 // Try to group phis with their inputs.
michael@0 323 for (size_t i = 0; i < graph.numBlocks(); i++) {
michael@0 324 LBlock *block = graph.getBlock(i);
michael@0 325 for (size_t j = 0; j < block->numPhis(); j++) {
michael@0 326 LPhi *phi = block->getPhi(j);
michael@0 327 uint32_t output = phi->getDef(0)->virtualRegister();
michael@0 328 for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
michael@0 329 uint32_t input = phi->getOperand(k)->toUse()->virtualRegister();
michael@0 330 if (!tryGroupRegisters(input, output))
michael@0 331 return false;
michael@0 332 }
michael@0 333 }
michael@0 334 }
michael@0 335
michael@0 336 // Virtual register number 0 is unused.
michael@0 337 JS_ASSERT(vregs[0u].numIntervals() == 0);
michael@0 338 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 339 if (mir->shouldCancel("Backtracking Enqueue Registers"))
michael@0 340 return false;
michael@0 341
michael@0 342 BacktrackingVirtualRegister &reg = vregs[i];
michael@0 343 JS_ASSERT(reg.numIntervals() <= 2);
michael@0 344 JS_ASSERT(!reg.canonicalSpill());
michael@0 345
michael@0 346 if (!reg.numIntervals())
michael@0 347 continue;
michael@0 348
michael@0 349 // Disable this for now; see bugs 906858, 931487, and 932465.
michael@0 350 #if 0
michael@0 351 // Eagerly set the canonical spill slot for registers which are preset
michael@0 352 // for that slot, and reuse it for other registers in the group.
michael@0 353 LDefinition *def = reg.def();
michael@0 354 if (def->policy() == LDefinition::PRESET && !def->output()->isRegister()) {
michael@0 355 reg.setCanonicalSpill(*def->output());
michael@0 356 if (reg.group() && reg.group()->spill.isUse())
michael@0 357 reg.group()->spill = *def->output();
michael@0 358 }
michael@0 359 #endif
michael@0 360
michael@0 361 // Place all intervals for this register on the allocation queue.
michael@0 362 // During initial queueing use single queue items for groups of
michael@0 363 // registers, so that they will be allocated together and reduce the
michael@0 364 // risk of unnecessary conflicts. This is in keeping with the idea that
michael@0 365 // register groups are effectively single registers whose value changes
michael@0 366 // during execution. If any intervals in the group are evicted later
michael@0 367 // then they will be reallocated individually.
michael@0 368 size_t start = 0;
michael@0 369 if (VirtualRegisterGroup *group = reg.group()) {
michael@0 370 if (i == group->canonicalReg()) {
michael@0 371 size_t priority = computePriority(group);
michael@0 372 if (!allocationQueue.insert(QueueItem(group, priority)))
michael@0 373 return false;
michael@0 374 }
michael@0 375 start++;
michael@0 376 }
michael@0 377 for (; start < reg.numIntervals(); start++) {
michael@0 378 LiveInterval *interval = reg.getInterval(start);
michael@0 379 if (interval->numRanges() > 0) {
michael@0 380 size_t priority = computePriority(interval);
michael@0 381 if (!allocationQueue.insert(QueueItem(interval, priority)))
michael@0 382 return false;
michael@0 383 }
michael@0 384 }
michael@0 385 }
michael@0 386
michael@0 387 return true;
michael@0 388 }
michael@0 389
michael@0 390 static const size_t MAX_ATTEMPTS = 2;
michael@0 391
michael@0 392 bool
michael@0 393 BacktrackingAllocator::tryAllocateFixed(LiveInterval *interval, bool *success,
michael@0 394 bool *pfixed, LiveInterval **pconflicting)
michael@0 395 {
michael@0 396 // Spill intervals which are required to be in a certain stack slot.
michael@0 397 if (!interval->requirement()->allocation().isRegister()) {
michael@0 398 IonSpew(IonSpew_RegAlloc, "stack allocation requirement");
michael@0 399 interval->setAllocation(interval->requirement()->allocation());
michael@0 400 *success = true;
michael@0 401 return true;
michael@0 402 }
michael@0 403
michael@0 404 AnyRegister reg = interval->requirement()->allocation().toRegister();
michael@0 405 return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting);
michael@0 406 }
michael@0 407
michael@0 408 bool
michael@0 409 BacktrackingAllocator::tryAllocateNonFixed(LiveInterval *interval, bool *success,
michael@0 410 bool *pfixed, LiveInterval **pconflicting)
michael@0 411 {
michael@0 412 // If we want, but do not require an interval to be in a specific
michael@0 413 // register, only look at that register for allocating and evict
michael@0 414 // or spill if it is not available. Picking a separate register may
michael@0 415 // be even worse than spilling, as it will still necessitate moves
michael@0 416 // and will tie up more registers than if we spilled.
michael@0 417 if (interval->hint()->kind() == Requirement::FIXED) {
michael@0 418 AnyRegister reg = interval->hint()->allocation().toRegister();
michael@0 419 if (!tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting))
michael@0 420 return false;
michael@0 421 if (*success)
michael@0 422 return true;
michael@0 423 }
michael@0 424
michael@0 425 // Spill intervals which have no hint or register requirement.
michael@0 426 if (interval->requirement()->kind() == Requirement::NONE) {
michael@0 427 spill(interval);
michael@0 428 *success = true;
michael@0 429 return true;
michael@0 430 }
michael@0 431
michael@0 432 if (!*pconflicting || minimalInterval(interval)) {
michael@0 433 // Search for any available register which the interval can be
michael@0 434 // allocated to.
michael@0 435 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 436 if (!tryAllocateRegister(registers[i], interval, success, pfixed, pconflicting))
michael@0 437 return false;
michael@0 438 if (*success)
michael@0 439 return true;
michael@0 440 }
michael@0 441 }
michael@0 442
michael@0 443 // We failed to allocate this interval.
michael@0 444 JS_ASSERT(!*success);
michael@0 445 return true;
michael@0 446 }
michael@0 447
michael@0 448 bool
michael@0 449 BacktrackingAllocator::processInterval(LiveInterval *interval)
michael@0 450 {
michael@0 451 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 452 IonSpew(IonSpew_RegAlloc, "Allocating v%u [priority %lu] [weight %lu]: %s",
michael@0 453 interval->vreg(), computePriority(interval), computeSpillWeight(interval),
michael@0 454 interval->rangesToString());
michael@0 455 }
michael@0 456
michael@0 457 // An interval can be processed by doing any of the following:
michael@0 458 //
michael@0 459 // - Assigning the interval a register. The interval cannot overlap any
michael@0 460 // other interval allocated for that physical register.
michael@0 461 //
michael@0 462 // - Spilling the interval, provided it has no register uses.
michael@0 463 //
michael@0 464 // - Splitting the interval into two or more intervals which cover the
michael@0 465 // original one. The new intervals are placed back onto the priority
michael@0 466 // queue for later processing.
michael@0 467 //
michael@0 468 // - Evicting one or more existing allocated intervals, and then doing one
michael@0 469 // of the above operations. Evicted intervals are placed back on the
michael@0 470 // priority queue. Any evicted intervals must have a lower spill weight
michael@0 471 // than the interval being processed.
michael@0 472 //
michael@0 473 // As long as this structure is followed, termination is guaranteed.
michael@0 474 // In general, we want to minimize the amount of interval splitting
michael@0 475 // (which generally necessitates spills), so allocate longer lived, lower
michael@0 476 // weight intervals first and evict and split them later if they prevent
michael@0 477 // allocation for higher weight intervals.
michael@0 478
michael@0 479 bool canAllocate = setIntervalRequirement(interval);
michael@0 480
michael@0 481 bool fixed;
michael@0 482 LiveInterval *conflict = nullptr;
michael@0 483 for (size_t attempt = 0;; attempt++) {
michael@0 484 if (canAllocate) {
michael@0 485 bool success = false;
michael@0 486 fixed = false;
michael@0 487 conflict = nullptr;
michael@0 488
michael@0 489 // Ok, let's try allocating for this interval.
michael@0 490 if (interval->requirement()->kind() == Requirement::FIXED) {
michael@0 491 if (!tryAllocateFixed(interval, &success, &fixed, &conflict))
michael@0 492 return false;
michael@0 493 } else {
michael@0 494 if (!tryAllocateNonFixed(interval, &success, &fixed, &conflict))
michael@0 495 return false;
michael@0 496 }
michael@0 497
michael@0 498 // If that worked, we're done!
michael@0 499 if (success)
michael@0 500 return true;
michael@0 501
michael@0 502 // If that didn't work, but we have a non-fixed LiveInterval known
michael@0 503 // to be conflicting, maybe we can evict it and try again.
michael@0 504 if (attempt < MAX_ATTEMPTS &&
michael@0 505 !fixed &&
michael@0 506 conflict &&
michael@0 507 computeSpillWeight(conflict) < computeSpillWeight(interval))
michael@0 508 {
michael@0 509 if (!evictInterval(conflict))
michael@0 510 return false;
michael@0 511 continue;
michael@0 512 }
michael@0 513 }
michael@0 514
michael@0 515 // A minimal interval cannot be split any further. If we try to split
michael@0 516 // it at this point we will just end up with the same interval and will
michael@0 517 // enter an infinite loop. Weights and the initial live intervals must
michael@0 518 // be constructed so that any minimal interval is allocatable.
michael@0 519 JS_ASSERT(!minimalInterval(interval));
michael@0 520
michael@0 521 if (canAllocate && fixed)
michael@0 522 return splitAcrossCalls(interval);
michael@0 523 return chooseIntervalSplit(interval, conflict);
michael@0 524 }
michael@0 525 }
michael@0 526
michael@0 527 bool
michael@0 528 BacktrackingAllocator::processGroup(VirtualRegisterGroup *group)
michael@0 529 {
michael@0 530 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 531 IonSpew(IonSpew_RegAlloc, "Allocating group v%u [priority %lu] [weight %lu]",
michael@0 532 group->registers[0], computePriority(group), computeSpillWeight(group));
michael@0 533 }
michael@0 534
michael@0 535 bool fixed;
michael@0 536 LiveInterval *conflict;
michael@0 537 for (size_t attempt = 0;; attempt++) {
michael@0 538 // Search for any available register which the group can be allocated to.
michael@0 539 fixed = false;
michael@0 540 conflict = nullptr;
michael@0 541 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 542 bool success;
michael@0 543 if (!tryAllocateGroupRegister(registers[i], group, &success, &fixed, &conflict))
michael@0 544 return false;
michael@0 545 if (success) {
michael@0 546 conflict = nullptr;
michael@0 547 break;
michael@0 548 }
michael@0 549 }
michael@0 550
michael@0 551 if (attempt < MAX_ATTEMPTS &&
michael@0 552 !fixed &&
michael@0 553 conflict &&
michael@0 554 conflict->hasVreg() &&
michael@0 555 computeSpillWeight(conflict) < computeSpillWeight(group))
michael@0 556 {
michael@0 557 if (!evictInterval(conflict))
michael@0 558 return false;
michael@0 559 continue;
michael@0 560 }
michael@0 561
michael@0 562 for (size_t i = 0; i < group->registers.length(); i++) {
michael@0 563 VirtualRegister &reg = vregs[group->registers[i]];
michael@0 564 JS_ASSERT(reg.numIntervals() <= 2);
michael@0 565 if (!processInterval(reg.getInterval(0)))
michael@0 566 return false;
michael@0 567 }
michael@0 568
michael@0 569 return true;
michael@0 570 }
michael@0 571 }
michael@0 572
michael@0 573 bool
michael@0 574 BacktrackingAllocator::setIntervalRequirement(LiveInterval *interval)
michael@0 575 {
michael@0 576 // Set any requirement or hint on interval according to its definition and
michael@0 577 // uses. Return false if there are conflicting requirements which will
michael@0 578 // require the interval to be split.
michael@0 579 interval->setHint(Requirement());
michael@0 580 interval->setRequirement(Requirement());
michael@0 581
michael@0 582 BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 583
michael@0 584 // Set a hint if another interval in the same group is in a register.
michael@0 585 if (VirtualRegisterGroup *group = reg->group()) {
michael@0 586 if (group->allocation.isRegister()) {
michael@0 587 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 588 IonSpew(IonSpew_RegAlloc, "Hint %s, used by group allocation",
michael@0 589 group->allocation.toString());
michael@0 590 }
michael@0 591 interval->setHint(Requirement(group->allocation));
michael@0 592 }
michael@0 593 }
michael@0 594
michael@0 595 if (interval->index() == 0) {
michael@0 596 // The first interval is the definition, so deal with any definition
michael@0 597 // constraints/hints.
michael@0 598
michael@0 599 LDefinition::Policy policy = reg->def()->policy();
michael@0 600 if (policy == LDefinition::PRESET) {
michael@0 601 // Preset policies get a FIXED requirement.
michael@0 602 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 603 IonSpew(IonSpew_RegAlloc, "Requirement %s, preset by definition",
michael@0 604 reg->def()->output()->toString());
michael@0 605 }
michael@0 606 interval->setRequirement(Requirement(*reg->def()->output()));
michael@0 607 } else if (reg->ins()->isPhi()) {
michael@0 608 // Phis don't have any requirements, but they should prefer their
michael@0 609 // input allocations. This is captured by the group hints above.
michael@0 610 } else {
michael@0 611 // Non-phis get a REGISTER requirement.
michael@0 612 interval->setRequirement(Requirement(Requirement::REGISTER));
michael@0 613 }
michael@0 614 }
michael@0 615
michael@0 616 // Search uses for requirements.
michael@0 617 for (UsePositionIterator iter = interval->usesBegin();
michael@0 618 iter != interval->usesEnd();
michael@0 619 iter++)
michael@0 620 {
michael@0 621 LUse::Policy policy = iter->use->policy();
michael@0 622 if (policy == LUse::FIXED) {
michael@0 623 AnyRegister required = GetFixedRegister(reg->def(), iter->use);
michael@0 624
michael@0 625 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 626 IonSpew(IonSpew_RegAlloc, "Requirement %s, due to use at %u",
michael@0 627 required.name(), iter->pos.pos());
michael@0 628 }
michael@0 629
michael@0 630 // If there are multiple fixed registers which the interval is
michael@0 631 // required to use, fail. The interval will need to be split before
michael@0 632 // it can be allocated.
michael@0 633 if (!interval->addRequirement(Requirement(LAllocation(required))))
michael@0 634 return false;
michael@0 635 } else if (policy == LUse::REGISTER) {
michael@0 636 if (!interval->addRequirement(Requirement(Requirement::REGISTER)))
michael@0 637 return false;
michael@0 638 }
michael@0 639 }
michael@0 640
michael@0 641 return true;
michael@0 642 }
michael@0 643
michael@0 644 bool
michael@0 645 BacktrackingAllocator::tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
michael@0 646 bool *psuccess, bool *pfixed, LiveInterval **pconflicting)
michael@0 647 {
michael@0 648 *psuccess = false;
michael@0 649
michael@0 650 if (!r.allocatable)
michael@0 651 return true;
michael@0 652
michael@0 653 if (r.reg.isFloat() != vregs[group->registers[0]].isFloatReg())
michael@0 654 return true;
michael@0 655
michael@0 656 bool allocatable = true;
michael@0 657 LiveInterval *conflicting = nullptr;
michael@0 658
michael@0 659 for (size_t i = 0; i < group->registers.length(); i++) {
michael@0 660 VirtualRegister &reg = vregs[group->registers[i]];
michael@0 661 JS_ASSERT(reg.numIntervals() <= 2);
michael@0 662 LiveInterval *interval = reg.getInterval(0);
michael@0 663
michael@0 664 for (size_t j = 0; j < interval->numRanges(); j++) {
michael@0 665 AllocatedRange range(interval, interval->getRange(j)), existing;
michael@0 666 if (r.allocations.contains(range, &existing)) {
michael@0 667 if (conflicting) {
michael@0 668 if (conflicting != existing.interval)
michael@0 669 return true;
michael@0 670 } else {
michael@0 671 conflicting = existing.interval;
michael@0 672 }
michael@0 673 allocatable = false;
michael@0 674 }
michael@0 675 }
michael@0 676 }
michael@0 677
michael@0 678 if (!allocatable) {
michael@0 679 JS_ASSERT(conflicting);
michael@0 680 if (!*pconflicting || computeSpillWeight(conflicting) < computeSpillWeight(*pconflicting))
michael@0 681 *pconflicting = conflicting;
michael@0 682 if (!conflicting->hasVreg())
michael@0 683 *pfixed = true;
michael@0 684 return true;
michael@0 685 }
michael@0 686
michael@0 687 *psuccess = true;
michael@0 688
michael@0 689 group->allocation = LAllocation(r.reg);
michael@0 690 return true;
michael@0 691 }
michael@0 692
michael@0 693 bool
michael@0 694 BacktrackingAllocator::tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
michael@0 695 bool *success, bool *pfixed, LiveInterval **pconflicting)
michael@0 696 {
michael@0 697 *success = false;
michael@0 698
michael@0 699 if (!r.allocatable)
michael@0 700 return true;
michael@0 701
michael@0 702 BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 703 if (reg->isFloatReg() != r.reg.isFloat())
michael@0 704 return true;
michael@0 705
michael@0 706 JS_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED,
michael@0 707 interval->requirement()->allocation() == LAllocation(r.reg));
michael@0 708
michael@0 709 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 710 AllocatedRange range(interval, interval->getRange(i)), existing;
michael@0 711 if (r.allocations.contains(range, &existing)) {
michael@0 712 if (existing.interval->hasVreg()) {
michael@0 713 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 714 IonSpew(IonSpew_RegAlloc, "%s collides with v%u [%u,%u> [weight %lu]",
michael@0 715 r.reg.name(), existing.interval->vreg(),
michael@0 716 existing.range->from.pos(), existing.range->to.pos(),
michael@0 717 computeSpillWeight(existing.interval));
michael@0 718 }
michael@0 719 if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting))
michael@0 720 *pconflicting = existing.interval;
michael@0 721 } else {
michael@0 722 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 723 IonSpew(IonSpew_RegAlloc, "%s collides with fixed use [%u,%u>",
michael@0 724 r.reg.name(), existing.range->from.pos(), existing.range->to.pos());
michael@0 725 }
michael@0 726 *pfixed = true;
michael@0 727 }
michael@0 728 return true;
michael@0 729 }
michael@0 730 }
michael@0 731
michael@0 732 IonSpew(IonSpew_RegAlloc, "allocated to %s", r.reg.name());
michael@0 733
michael@0 734 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 735 AllocatedRange range(interval, interval->getRange(i));
michael@0 736 if (!r.allocations.insert(range))
michael@0 737 return false;
michael@0 738 }
michael@0 739
michael@0 740 // Set any register hint for allocating other intervals in the same group.
michael@0 741 if (VirtualRegisterGroup *group = reg->group()) {
michael@0 742 if (!group->allocation.isRegister())
michael@0 743 group->allocation = LAllocation(r.reg);
michael@0 744 }
michael@0 745
michael@0 746 interval->setAllocation(LAllocation(r.reg));
michael@0 747 *success = true;
michael@0 748 return true;
michael@0 749 }
michael@0 750
michael@0 751 bool
michael@0 752 BacktrackingAllocator::evictInterval(LiveInterval *interval)
michael@0 753 {
michael@0 754 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 755 IonSpew(IonSpew_RegAlloc, "Evicting interval v%u: %s",
michael@0 756 interval->vreg(), interval->rangesToString());
michael@0 757 }
michael@0 758
michael@0 759 JS_ASSERT(interval->getAllocation()->isRegister());
michael@0 760
michael@0 761 AnyRegister reg(interval->getAllocation()->toRegister());
michael@0 762 PhysicalRegister &physical = registers[reg.code()];
michael@0 763 JS_ASSERT(physical.reg == reg && physical.allocatable);
michael@0 764
michael@0 765 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 766 AllocatedRange range(interval, interval->getRange(i));
michael@0 767 physical.allocations.remove(range);
michael@0 768 }
michael@0 769
michael@0 770 interval->setAllocation(LAllocation());
michael@0 771
michael@0 772 size_t priority = computePriority(interval);
michael@0 773 return allocationQueue.insert(QueueItem(interval, priority));
michael@0 774 }
michael@0 775
michael@0 776 void
michael@0 777 BacktrackingAllocator::distributeUses(LiveInterval *interval,
michael@0 778 const LiveIntervalVector &newIntervals)
michael@0 779 {
michael@0 780 JS_ASSERT(newIntervals.length() >= 2);
michael@0 781
michael@0 782 // Simple redistribution of uses from an old interval to a set of new
michael@0 783 // intervals. Intervals are permitted to overlap, in which case this will
michael@0 784 // assign uses in the overlapping section to the interval with the latest
michael@0 785 // start position.
michael@0 786 for (UsePositionIterator iter(interval->usesBegin());
michael@0 787 iter != interval->usesEnd();
michael@0 788 iter++)
michael@0 789 {
michael@0 790 CodePosition pos = iter->pos;
michael@0 791 LiveInterval *addInterval = nullptr;
michael@0 792 for (size_t i = 0; i < newIntervals.length(); i++) {
michael@0 793 LiveInterval *newInterval = newIntervals[i];
michael@0 794 if (newInterval->covers(pos)) {
michael@0 795 if (!addInterval || newInterval->start() < addInterval->start())
michael@0 796 addInterval = newInterval;
michael@0 797 }
michael@0 798 }
michael@0 799 addInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 800 }
michael@0 801 }
michael@0 802
michael@0 803 bool
michael@0 804 BacktrackingAllocator::split(LiveInterval *interval,
michael@0 805 const LiveIntervalVector &newIntervals)
michael@0 806 {
michael@0 807 if (IonSpewEnabled(IonSpew_RegAlloc)) {
michael@0 808 IonSpew(IonSpew_RegAlloc, "splitting interval v%u %s into:",
michael@0 809 interval->vreg(), interval->rangesToString());
michael@0 810 for (size_t i = 0; i < newIntervals.length(); i++)
michael@0 811 IonSpew(IonSpew_RegAlloc, " %s", newIntervals[i]->rangesToString());
michael@0 812 }
michael@0 813
michael@0 814 JS_ASSERT(newIntervals.length() >= 2);
michael@0 815
michael@0 816 // Find the earliest interval in the new list.
michael@0 817 LiveInterval *first = newIntervals[0];
michael@0 818 for (size_t i = 1; i < newIntervals.length(); i++) {
michael@0 819 if (newIntervals[i]->start() < first->start())
michael@0 820 first = newIntervals[i];
michael@0 821 }
michael@0 822
michael@0 823 // Replace the old interval in the virtual register's state with the new intervals.
michael@0 824 VirtualRegister *reg = &vregs[interval->vreg()];
michael@0 825 reg->replaceInterval(interval, first);
michael@0 826 for (size_t i = 0; i < newIntervals.length(); i++) {
michael@0 827 if (newIntervals[i] != first && !reg->addInterval(newIntervals[i]))
michael@0 828 return false;
michael@0 829 }
michael@0 830
michael@0 831 return true;
michael@0 832 }
michael@0 833
michael@0 834 bool BacktrackingAllocator::requeueIntervals(const LiveIntervalVector &newIntervals)
michael@0 835 {
michael@0 836 // Queue the new intervals for register assignment.
michael@0 837 for (size_t i = 0; i < newIntervals.length(); i++) {
michael@0 838 LiveInterval *newInterval = newIntervals[i];
michael@0 839 size_t priority = computePriority(newInterval);
michael@0 840 if (!allocationQueue.insert(QueueItem(newInterval, priority)))
michael@0 841 return false;
michael@0 842 }
michael@0 843 return true;
michael@0 844 }
michael@0 845
michael@0 846 void
michael@0 847 BacktrackingAllocator::spill(LiveInterval *interval)
michael@0 848 {
michael@0 849 IonSpew(IonSpew_RegAlloc, "Spilling interval");
michael@0 850
michael@0 851 JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
michael@0 852 JS_ASSERT(!interval->getAllocation()->isStackSlot());
michael@0 853
michael@0 854 // We can't spill bogus intervals.
michael@0 855 JS_ASSERT(interval->hasVreg());
michael@0 856
michael@0 857 BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
michael@0 858
michael@0 859 bool useCanonical = !reg->hasCanonicalSpillExclude()
michael@0 860 || interval->start() < reg->canonicalSpillExclude();
michael@0 861
michael@0 862 if (useCanonical) {
michael@0 863 if (reg->canonicalSpill()) {
michael@0 864 IonSpew(IonSpew_RegAlloc, " Picked canonical spill location %s",
michael@0 865 reg->canonicalSpill()->toString());
michael@0 866 interval->setAllocation(*reg->canonicalSpill());
michael@0 867 return;
michael@0 868 }
michael@0 869
michael@0 870 if (reg->group() && !reg->group()->spill.isUse()) {
michael@0 871 IonSpew(IonSpew_RegAlloc, " Reusing group spill location %s",
michael@0 872 reg->group()->spill.toString());
michael@0 873 interval->setAllocation(reg->group()->spill);
michael@0 874 reg->setCanonicalSpill(reg->group()->spill);
michael@0 875 return;
michael@0 876 }
michael@0 877 }
michael@0 878
michael@0 879 uint32_t stackSlot = stackSlotAllocator.allocateSlot(reg->type());
michael@0 880 JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
michael@0 881
michael@0 882 LStackSlot alloc(stackSlot);
michael@0 883 interval->setAllocation(alloc);
michael@0 884
michael@0 885 IonSpew(IonSpew_RegAlloc, " Allocating spill location %s", alloc.toString());
michael@0 886
michael@0 887 if (useCanonical) {
michael@0 888 reg->setCanonicalSpill(alloc);
michael@0 889 if (reg->group())
michael@0 890 reg->group()->spill = alloc;
michael@0 891 }
michael@0 892 }
michael@0 893
michael@0 894 // Add moves to resolve conflicting assignments between a block and its
michael@0 895 // predecessors. XXX try to common this with LinearScanAllocator.
michael@0 896 bool
michael@0 897 BacktrackingAllocator::resolveControlFlow()
michael@0 898 {
michael@0 899 // Virtual register number 0 is unused.
michael@0 900 JS_ASSERT(vregs[0u].numIntervals() == 0);
michael@0 901 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 902 BacktrackingVirtualRegister *reg = &vregs[i];
michael@0 903
michael@0 904 if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg loop)"))
michael@0 905 return false;
michael@0 906
michael@0 907 for (size_t j = 1; j < reg->numIntervals(); j++) {
michael@0 908 LiveInterval *interval = reg->getInterval(j);
michael@0 909 JS_ASSERT(interval->index() == j);
michael@0 910
michael@0 911 bool skip = false;
michael@0 912 for (int k = j - 1; k >= 0; k--) {
michael@0 913 LiveInterval *prevInterval = reg->getInterval(k);
michael@0 914 if (prevInterval->start() != interval->start())
michael@0 915 break;
michael@0 916 if (*prevInterval->getAllocation() == *interval->getAllocation()) {
michael@0 917 skip = true;
michael@0 918 break;
michael@0 919 }
michael@0 920 }
michael@0 921 if (skip)
michael@0 922 continue;
michael@0 923
michael@0 924 CodePosition start = interval->start();
michael@0 925 InstructionData *data = &insData[start];
michael@0 926 if (interval->start() > inputOf(data->block()->firstId())) {
michael@0 927 JS_ASSERT(start == inputOf(data->ins()) || start == outputOf(data->ins()));
michael@0 928
michael@0 929 LiveInterval *prevInterval = reg->intervalFor(start.previous());
michael@0 930 if (start.subpos() == CodePosition::INPUT) {
michael@0 931 if (!moveInput(inputOf(data->ins()), prevInterval, interval, reg->type()))
michael@0 932 return false;
michael@0 933 } else {
michael@0 934 if (!moveAfter(outputOf(data->ins()), prevInterval, interval, reg->type()))
michael@0 935 return false;
michael@0 936 }
michael@0 937 }
michael@0 938 }
michael@0 939 }
michael@0 940
michael@0 941 for (size_t i = 0; i < graph.numBlocks(); i++) {
michael@0 942 if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
michael@0 943 return false;
michael@0 944
michael@0 945 LBlock *successor = graph.getBlock(i);
michael@0 946 MBasicBlock *mSuccessor = successor->mir();
michael@0 947 if (mSuccessor->numPredecessors() < 1)
michael@0 948 continue;
michael@0 949
michael@0 950 // Resolve phis to moves
michael@0 951 for (size_t j = 0; j < successor->numPhis(); j++) {
michael@0 952 LPhi *phi = successor->getPhi(j);
michael@0 953 JS_ASSERT(phi->numDefs() == 1);
michael@0 954 LDefinition *def = phi->getDef(0);
michael@0 955 VirtualRegister *vreg = &vregs[def];
michael@0 956 LiveInterval *to = vreg->intervalFor(inputOf(successor->firstId()));
michael@0 957 JS_ASSERT(to);
michael@0 958
michael@0 959 for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
michael@0 960 LBlock *predecessor = mSuccessor->getPredecessor(k)->lir();
michael@0 961 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
michael@0 962
michael@0 963 LAllocation *input = phi->getOperand(predecessor->mir()->positionInPhiSuccessor());
michael@0 964 LiveInterval *from = vregs[input].intervalFor(outputOf(predecessor->lastId()));
michael@0 965 JS_ASSERT(from);
michael@0 966
michael@0 967 if (!moveAtExit(predecessor, from, to, def->type()))
michael@0 968 return false;
michael@0 969 }
michael@0 970 }
michael@0 971
michael@0 972 // Resolve split intervals with moves
michael@0 973 BitSet *live = liveIn[mSuccessor->id()];
michael@0 974
michael@0 975 for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
michael@0 976 VirtualRegister &reg = vregs[*liveRegId];
michael@0 977
michael@0 978 for (size_t j = 0; j < mSuccessor->numPredecessors(); j++) {
michael@0 979 LBlock *predecessor = mSuccessor->getPredecessor(j)->lir();
michael@0 980
michael@0 981 for (size_t k = 0; k < reg.numIntervals(); k++) {
michael@0 982 LiveInterval *to = reg.getInterval(k);
michael@0 983 if (!to->covers(inputOf(successor->firstId())))
michael@0 984 continue;
michael@0 985 if (to->covers(outputOf(predecessor->lastId())))
michael@0 986 continue;
michael@0 987
michael@0 988 LiveInterval *from = reg.intervalFor(outputOf(predecessor->lastId()));
michael@0 989
michael@0 990 if (mSuccessor->numPredecessors() > 1) {
michael@0 991 JS_ASSERT(predecessor->mir()->numSuccessors() == 1);
michael@0 992 if (!moveAtExit(predecessor, from, to, reg.type()))
michael@0 993 return false;
michael@0 994 } else {
michael@0 995 if (!moveAtEntry(successor, from, to, reg.type()))
michael@0 996 return false;
michael@0 997 }
michael@0 998 }
michael@0 999 }
michael@0 1000 }
michael@0 1001 }
michael@0 1002
michael@0 1003 return true;
michael@0 1004 }
michael@0 1005
michael@0 1006 bool
michael@0 1007 BacktrackingAllocator::isReusedInput(LUse *use, LInstruction *ins, bool considerCopy)
michael@0 1008 {
michael@0 1009 if (LDefinition *def = FindReusingDefinition(ins, use))
michael@0 1010 return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
michael@0 1011 return false;
michael@0 1012 }
michael@0 1013
michael@0 1014 bool
michael@0 1015 BacktrackingAllocator::isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy)
michael@0 1016 {
michael@0 1017 switch (use->policy()) {
michael@0 1018 case LUse::ANY:
michael@0 1019 return isReusedInput(use, ins, considerCopy);
michael@0 1020
michael@0 1021 case LUse::REGISTER:
michael@0 1022 case LUse::FIXED:
michael@0 1023 return true;
michael@0 1024
michael@0 1025 default:
michael@0 1026 return false;
michael@0 1027 }
michael@0 1028 }
michael@0 1029
michael@0 1030 bool
michael@0 1031 BacktrackingAllocator::isRegisterDefinition(LiveInterval *interval)
michael@0 1032 {
michael@0 1033 if (interval->index() != 0)
michael@0 1034 return false;
michael@0 1035
michael@0 1036 VirtualRegister &reg = vregs[interval->vreg()];
michael@0 1037 if (reg.ins()->isPhi())
michael@0 1038 return false;
michael@0 1039
michael@0 1040 if (reg.def()->policy() == LDefinition::PRESET && !reg.def()->output()->isRegister())
michael@0 1041 return false;
michael@0 1042
michael@0 1043 return true;
michael@0 1044 }
michael@0 1045
michael@0 1046 bool
michael@0 1047 BacktrackingAllocator::reifyAllocations()
michael@0 1048 {
michael@0 1049 // Virtual register number 0 is unused.
michael@0 1050 JS_ASSERT(vregs[0u].numIntervals() == 0);
michael@0 1051 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 1052 VirtualRegister *reg = &vregs[i];
michael@0 1053
michael@0 1054 if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
michael@0 1055 return false;
michael@0 1056
michael@0 1057 for (size_t j = 0; j < reg->numIntervals(); j++) {
michael@0 1058 LiveInterval *interval = reg->getInterval(j);
michael@0 1059 JS_ASSERT(interval->index() == j);
michael@0 1060
michael@0 1061 if (interval->index() == 0) {
michael@0 1062 reg->def()->setOutput(*interval->getAllocation());
michael@0 1063 if (reg->ins()->recoversInput()) {
michael@0 1064 LSnapshot *snapshot = reg->ins()->snapshot();
michael@0 1065 for (size_t i = 0; i < snapshot->numEntries(); i++) {
michael@0 1066 LAllocation *entry = snapshot->getEntry(i);
michael@0 1067 if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
michael@0 1068 *entry = *reg->def()->output();
michael@0 1069 }
michael@0 1070 }
michael@0 1071 }
michael@0 1072
michael@0 1073 for (UsePositionIterator iter(interval->usesBegin());
michael@0 1074 iter != interval->usesEnd();
michael@0 1075 iter++)
michael@0 1076 {
michael@0 1077 LAllocation *alloc = iter->use;
michael@0 1078 *alloc = *interval->getAllocation();
michael@0 1079
michael@0 1080 // For any uses which feed into MUST_REUSE_INPUT definitions,
michael@0 1081 // add copies if the use and def have different allocations.
michael@0 1082 LInstruction *ins = insData[iter->pos].ins();
michael@0 1083 if (LDefinition *def = FindReusingDefinition(ins, alloc)) {
michael@0 1084 LiveInterval *outputInterval =
michael@0 1085 vregs[def->virtualRegister()].intervalFor(outputOf(ins));
michael@0 1086 LAllocation *res = outputInterval->getAllocation();
michael@0 1087 LAllocation *sourceAlloc = interval->getAllocation();
michael@0 1088
michael@0 1089 if (*res != *alloc) {
michael@0 1090 LMoveGroup *group = getInputMoveGroup(inputOf(ins));
michael@0 1091 if (!group->addAfter(sourceAlloc, res, def->type()))
michael@0 1092 return false;
michael@0 1093 *alloc = *res;
michael@0 1094 }
michael@0 1095 }
michael@0 1096 }
michael@0 1097
michael@0 1098 addLiveRegistersForInterval(reg, interval);
michael@0 1099 }
michael@0 1100 }
michael@0 1101
michael@0 1102 graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
michael@0 1103 return true;
michael@0 1104 }
michael@0 1105
michael@0 1106 bool
michael@0 1107 BacktrackingAllocator::populateSafepoints()
michael@0 1108 {
michael@0 1109 size_t firstSafepoint = 0;
michael@0 1110
michael@0 1111 // Virtual register number 0 is unused.
michael@0 1112 JS_ASSERT(!vregs[0u].def());
michael@0 1113 for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) {
michael@0 1114 BacktrackingVirtualRegister *reg = &vregs[i];
michael@0 1115
michael@0 1116 if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
michael@0 1117 continue;
michael@0 1118
michael@0 1119 firstSafepoint = findFirstSafepoint(reg->getInterval(0), firstSafepoint);
michael@0 1120 if (firstSafepoint >= graph.numSafepoints())
michael@0 1121 break;
michael@0 1122
michael@0 1123 // Find the furthest endpoint.
michael@0 1124 CodePosition end = reg->getInterval(0)->end();
michael@0 1125 for (size_t j = 1; j < reg->numIntervals(); j++)
michael@0 1126 end = Max(end, reg->getInterval(j)->end());
michael@0 1127
michael@0 1128 for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
michael@0 1129 LInstruction *ins = graph.getSafepoint(j);
michael@0 1130
michael@0 1131 // Stop processing safepoints if we know we're out of this virtual
michael@0 1132 // register's range.
michael@0 1133 if (end < outputOf(ins))
michael@0 1134 break;
michael@0 1135
michael@0 1136 // Include temps but not instruction outputs. Also make sure MUST_REUSE_INPUT
michael@0 1137 // is not used with gcthings or nunboxes, or we would have to add the input reg
michael@0 1138 // to this safepoint.
michael@0 1139 if (ins == reg->ins() && !reg->isTemp()) {
michael@0 1140 DebugOnly<LDefinition*> def = reg->def();
michael@0 1141 JS_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
michael@0 1142 def->type() == LDefinition::GENERAL ||
michael@0 1143 def->type() == LDefinition::INT32 ||
michael@0 1144 def->type() == LDefinition::FLOAT32 ||
michael@0 1145 def->type() == LDefinition::DOUBLE);
michael@0 1146 continue;
michael@0 1147 }
michael@0 1148
michael@0 1149 LSafepoint *safepoint = ins->safepoint();
michael@0 1150
michael@0 1151 for (size_t k = 0; k < reg->numIntervals(); k++) {
michael@0 1152 LiveInterval *interval = reg->getInterval(k);
michael@0 1153 if (!interval->covers(inputOf(ins)))
michael@0 1154 continue;
michael@0 1155
michael@0 1156 LAllocation *a = interval->getAllocation();
michael@0 1157 if (a->isGeneralReg() && ins->isCall())
michael@0 1158 continue;
michael@0 1159
michael@0 1160 switch (reg->type()) {
michael@0 1161 case LDefinition::OBJECT:
michael@0 1162 safepoint->addGcPointer(*a);
michael@0 1163 break;
michael@0 1164 case LDefinition::SLOTS:
michael@0 1165 safepoint->addSlotsOrElementsPointer(*a);
michael@0 1166 break;
michael@0 1167 #ifdef JS_NUNBOX32
michael@0 1168 case LDefinition::TYPE:
michael@0 1169 safepoint->addNunboxType(i, *a);
michael@0 1170 break;
michael@0 1171 case LDefinition::PAYLOAD:
michael@0 1172 safepoint->addNunboxPayload(i, *a);
michael@0 1173 break;
michael@0 1174 #else
michael@0 1175 case LDefinition::BOX:
michael@0 1176 safepoint->addBoxedValue(*a);
michael@0 1177 break;
michael@0 1178 #endif
michael@0 1179 default:
michael@0 1180 MOZ_ASSUME_UNREACHABLE("Bad register type");
michael@0 1181 }
michael@0 1182 }
michael@0 1183 }
michael@0 1184 }
michael@0 1185
michael@0 1186 return true;
michael@0 1187 }
michael@0 1188
michael@0 1189 void
michael@0 1190 BacktrackingAllocator::dumpRegisterGroups()
michael@0 1191 {
michael@0 1192 fprintf(stderr, "Register groups:\n");
michael@0 1193
michael@0 1194 // Virtual register number 0 is unused.
michael@0 1195 JS_ASSERT(!vregs[0u].group());
michael@0 1196 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 1197 VirtualRegisterGroup *group = vregs[i].group();
michael@0 1198 if (group && i == group->canonicalReg()) {
michael@0 1199 for (size_t j = 0; j < group->registers.length(); j++)
michael@0 1200 fprintf(stderr, " v%u", group->registers[j]);
michael@0 1201 fprintf(stderr, "\n");
michael@0 1202 }
michael@0 1203 }
michael@0 1204 }
michael@0 1205
michael@0 1206 void
michael@0 1207 BacktrackingAllocator::dumpLiveness()
michael@0 1208 {
michael@0 1209 #ifdef DEBUG
michael@0 1210 fprintf(stderr, "Virtual Registers:\n");
michael@0 1211
michael@0 1212 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
michael@0 1213 LBlock *block = graph.getBlock(blockIndex);
michael@0 1214 MBasicBlock *mir = block->mir();
michael@0 1215
michael@0 1216 fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
michael@0 1217 for (size_t i = 0; i < mir->numSuccessors(); i++)
michael@0 1218 fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
michael@0 1219 fprintf(stderr, "\n");
michael@0 1220
michael@0 1221 for (size_t i = 0; i < block->numPhis(); i++) {
michael@0 1222 LPhi *phi = block->getPhi(i);
michael@0 1223
michael@0 1224 // Don't print the inputOf for phi nodes, since it's never used.
michael@0 1225 fprintf(stderr, "[,%u Phi [def v%u] <-",
michael@0 1226 outputOf(phi).pos(),
michael@0 1227 phi->getDef(0)->virtualRegister());
michael@0 1228 for (size_t j = 0; j < phi->numOperands(); j++)
michael@0 1229 fprintf(stderr, " %s", phi->getOperand(j)->toString());
michael@0 1230 fprintf(stderr, "]\n");
michael@0 1231 }
michael@0 1232
michael@0 1233 for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
michael@0 1234 LInstruction *ins = *iter;
michael@0 1235
michael@0 1236 fprintf(stderr, "[%u,%u %s]", inputOf(ins).pos(), outputOf(ins).pos(), ins->opName());
michael@0 1237
michael@0 1238 for (size_t i = 0; i < ins->numTemps(); i++) {
michael@0 1239 LDefinition *temp = ins->getTemp(i);
michael@0 1240 if (!temp->isBogusTemp())
michael@0 1241 fprintf(stderr, " [temp v%u]", temp->virtualRegister());
michael@0 1242 }
michael@0 1243
michael@0 1244 for (size_t i = 0; i < ins->numDefs(); i++) {
michael@0 1245 LDefinition *def = ins->getDef(i);
michael@0 1246 fprintf(stderr, " [def v%u]", def->virtualRegister());
michael@0 1247 }
michael@0 1248
michael@0 1249 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
michael@0 1250 fprintf(stderr, " [use %s]", alloc->toString());
michael@0 1251
michael@0 1252 fprintf(stderr, "\n");
michael@0 1253 }
michael@0 1254 }
michael@0 1255
michael@0 1256 fprintf(stderr, "\nLive Ranges:\n\n");
michael@0 1257
michael@0 1258 for (size_t i = 0; i < AnyRegister::Total; i++)
michael@0 1259 if (registers[i].allocatable)
michael@0 1260 fprintf(stderr, "reg %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->rangesToString());
michael@0 1261
michael@0 1262 // Virtual register number 0 is unused.
michael@0 1263 JS_ASSERT(vregs[0u].numIntervals() == 0);
michael@0 1264 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 1265 fprintf(stderr, "v%lu:", static_cast<unsigned long>(i));
michael@0 1266 VirtualRegister &vreg = vregs[i];
michael@0 1267 for (size_t j = 0; j < vreg.numIntervals(); j++) {
michael@0 1268 if (j)
michael@0 1269 fprintf(stderr, " *");
michael@0 1270 fprintf(stderr, "%s", vreg.getInterval(j)->rangesToString());
michael@0 1271 }
michael@0 1272 fprintf(stderr, "\n");
michael@0 1273 }
michael@0 1274
michael@0 1275 fprintf(stderr, "\n");
michael@0 1276 #endif // DEBUG
michael@0 1277 }
michael@0 1278
michael@0 1279 #ifdef DEBUG
michael@0 1280 struct BacktrackingAllocator::PrintLiveIntervalRange
michael@0 1281 {
michael@0 1282 void operator()(const AllocatedRange &item)
michael@0 1283 {
michael@0 1284 if (item.range == item.interval->getRange(0)) {
michael@0 1285 if (item.interval->hasVreg())
michael@0 1286 fprintf(stderr, " v%u: %s\n",
michael@0 1287 item.interval->vreg(),
michael@0 1288 item.interval->rangesToString());
michael@0 1289 else
michael@0 1290 fprintf(stderr, " fixed: %s\n",
michael@0 1291 item.interval->rangesToString());
michael@0 1292 }
michael@0 1293 }
michael@0 1294 };
michael@0 1295 #endif
michael@0 1296
michael@0 1297 void
michael@0 1298 BacktrackingAllocator::dumpAllocations()
michael@0 1299 {
michael@0 1300 #ifdef DEBUG
michael@0 1301 fprintf(stderr, "Allocations:\n");
michael@0 1302
michael@0 1303 // Virtual register number 0 is unused.
michael@0 1304 JS_ASSERT(vregs[0u].numIntervals() == 0);
michael@0 1305 for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
michael@0 1306 fprintf(stderr, "v%lu:", static_cast<unsigned long>(i));
michael@0 1307 VirtualRegister &vreg = vregs[i];
michael@0 1308 for (size_t j = 0; j < vreg.numIntervals(); j++) {
michael@0 1309 if (j)
michael@0 1310 fprintf(stderr, " *");
michael@0 1311 LiveInterval *interval = vreg.getInterval(j);
michael@0 1312 fprintf(stderr, "%s :: %s", interval->rangesToString(), interval->getAllocation()->toString());
michael@0 1313 }
michael@0 1314 fprintf(stderr, "\n");
michael@0 1315 }
michael@0 1316
michael@0 1317 fprintf(stderr, "\n");
michael@0 1318
michael@0 1319 for (size_t i = 0; i < AnyRegister::Total; i++) {
michael@0 1320 if (registers[i].allocatable) {
michael@0 1321 fprintf(stderr, "reg %s:\n", AnyRegister::FromCode(i).name());
michael@0 1322 registers[i].allocations.forEach(PrintLiveIntervalRange());
michael@0 1323 }
michael@0 1324 }
michael@0 1325
michael@0 1326 fprintf(stderr, "\n");
michael@0 1327 #endif // DEBUG
michael@0 1328 }
michael@0 1329
michael@0 1330 bool
michael@0 1331 BacktrackingAllocator::addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
michael@0 1332 LiveInterval *spillInterval,
michael@0 1333 CodePosition from, CodePosition to)
michael@0 1334 {
michael@0 1335 LiveInterval *interval = LiveInterval::New(alloc(), vreg, 0);
michael@0 1336 interval->setSpillInterval(spillInterval);
michael@0 1337 return interval->addRange(from, to) && intervals.append(interval);
michael@0 1338 }
michael@0 1339
michael@0 1340 ///////////////////////////////////////////////////////////////////////////////
michael@0 1341 // Heuristic Methods
michael@0 1342 ///////////////////////////////////////////////////////////////////////////////
michael@0 1343
michael@0 1344 size_t
michael@0 1345 BacktrackingAllocator::computePriority(const LiveInterval *interval)
michael@0 1346 {
michael@0 1347 // The priority of an interval is its total length, so that longer lived
michael@0 1348 // intervals will be processed before shorter ones (even if the longer ones
michael@0 1349 // have a low spill weight). See processInterval().
michael@0 1350 size_t lifetimeTotal = 0;
michael@0 1351
michael@0 1352 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 1353 const LiveInterval::Range *range = interval->getRange(i);
michael@0 1354 lifetimeTotal += range->to.pos() - range->from.pos();
michael@0 1355 }
michael@0 1356
michael@0 1357 return lifetimeTotal;
michael@0 1358 }
michael@0 1359
michael@0 1360 size_t
michael@0 1361 BacktrackingAllocator::computePriority(const VirtualRegisterGroup *group)
michael@0 1362 {
michael@0 1363 size_t priority = 0;
michael@0 1364 for (size_t j = 0; j < group->registers.length(); j++) {
michael@0 1365 uint32_t vreg = group->registers[j];
michael@0 1366 priority += computePriority(vregs[vreg].getInterval(0));
michael@0 1367 }
michael@0 1368 return priority;
michael@0 1369 }
michael@0 1370
michael@0 1371 bool
michael@0 1372 BacktrackingAllocator::minimalDef(const LiveInterval *interval, LInstruction *ins)
michael@0 1373 {
michael@0 1374 // Whether interval is a minimal interval capturing a definition at ins.
michael@0 1375 return (interval->end() <= minimalDefEnd(ins).next()) &&
michael@0 1376 ((!ins->isPhi() && interval->start() == inputOf(ins)) || interval->start() == outputOf(ins));
michael@0 1377 }
michael@0 1378
michael@0 1379 bool
michael@0 1380 BacktrackingAllocator::minimalUse(const LiveInterval *interval, LInstruction *ins)
michael@0 1381 {
michael@0 1382 // Whether interval is a minimal interval capturing a use at ins.
michael@0 1383 return (interval->start() == inputOf(ins)) &&
michael@0 1384 (interval->end() == outputOf(ins) || interval->end() == outputOf(ins).next());
michael@0 1385 }
michael@0 1386
michael@0 1387 bool
michael@0 1388 BacktrackingAllocator::minimalInterval(const LiveInterval *interval, bool *pfixed)
michael@0 1389 {
michael@0 1390 if (!interval->hasVreg()) {
michael@0 1391 *pfixed = true;
michael@0 1392 return true;
michael@0 1393 }
michael@0 1394
michael@0 1395 if (interval->index() == 0) {
michael@0 1396 VirtualRegister &reg = vregs[interval->vreg()];
michael@0 1397 if (pfixed)
michael@0 1398 *pfixed = reg.def()->policy() == LDefinition::PRESET && reg.def()->output()->isRegister();
michael@0 1399 return minimalDef(interval, reg.ins());
michael@0 1400 }
michael@0 1401
michael@0 1402 bool fixed = false, minimal = false;
michael@0 1403
michael@0 1404 for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
michael@0 1405 LUse *use = iter->use;
michael@0 1406
michael@0 1407 switch (use->policy()) {
michael@0 1408 case LUse::FIXED:
michael@0 1409 if (fixed)
michael@0 1410 return false;
michael@0 1411 fixed = true;
michael@0 1412 if (minimalUse(interval, insData[iter->pos].ins()))
michael@0 1413 minimal = true;
michael@0 1414 break;
michael@0 1415
michael@0 1416 case LUse::REGISTER:
michael@0 1417 if (minimalUse(interval, insData[iter->pos].ins()))
michael@0 1418 minimal = true;
michael@0 1419 break;
michael@0 1420
michael@0 1421 default:
michael@0 1422 break;
michael@0 1423 }
michael@0 1424 }
michael@0 1425
michael@0 1426 if (pfixed)
michael@0 1427 *pfixed = fixed;
michael@0 1428 return minimal;
michael@0 1429 }
michael@0 1430
michael@0 1431 size_t
michael@0 1432 BacktrackingAllocator::computeSpillWeight(const LiveInterval *interval)
michael@0 1433 {
michael@0 1434 // Minimal intervals have an extremely high spill weight, to ensure they
michael@0 1435 // can evict any other intervals and be allocated to a register.
michael@0 1436 bool fixed;
michael@0 1437 if (minimalInterval(interval, &fixed))
michael@0 1438 return fixed ? 2000000 : 1000000;
michael@0 1439
michael@0 1440 size_t usesTotal = 0;
michael@0 1441
michael@0 1442 if (interval->index() == 0) {
michael@0 1443 VirtualRegister *reg = &vregs[interval->vreg()];
michael@0 1444 if (reg->def()->policy() == LDefinition::PRESET && reg->def()->output()->isRegister())
michael@0 1445 usesTotal += 2000;
michael@0 1446 else if (!reg->ins()->isPhi())
michael@0 1447 usesTotal += 2000;
michael@0 1448 }
michael@0 1449
michael@0 1450 for (UsePositionIterator iter = interval->usesBegin(); iter != interval->usesEnd(); iter++) {
michael@0 1451 LUse *use = iter->use;
michael@0 1452
michael@0 1453 switch (use->policy()) {
michael@0 1454 case LUse::ANY:
michael@0 1455 usesTotal += 1000;
michael@0 1456 break;
michael@0 1457
michael@0 1458 case LUse::REGISTER:
michael@0 1459 case LUse::FIXED:
michael@0 1460 usesTotal += 2000;
michael@0 1461 break;
michael@0 1462
michael@0 1463 case LUse::KEEPALIVE:
michael@0 1464 break;
michael@0 1465
michael@0 1466 default:
michael@0 1467 // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
michael@0 1468 MOZ_ASSUME_UNREACHABLE("Bad use");
michael@0 1469 }
michael@0 1470 }
michael@0 1471
michael@0 1472 // Intervals for registers in groups get higher weights.
michael@0 1473 if (interval->hint()->kind() != Requirement::NONE)
michael@0 1474 usesTotal += 2000;
michael@0 1475
michael@0 1476 // Compute spill weight as a use density, lowering the weight for long
michael@0 1477 // lived intervals with relatively few uses.
michael@0 1478 size_t lifetimeTotal = computePriority(interval);
michael@0 1479 return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
michael@0 1480 }
michael@0 1481
michael@0 1482 size_t
michael@0 1483 BacktrackingAllocator::computeSpillWeight(const VirtualRegisterGroup *group)
michael@0 1484 {
michael@0 1485 size_t maxWeight = 0;
michael@0 1486 for (size_t j = 0; j < group->registers.length(); j++) {
michael@0 1487 uint32_t vreg = group->registers[j];
michael@0 1488 maxWeight = Max(maxWeight, computeSpillWeight(vregs[vreg].getInterval(0)));
michael@0 1489 }
michael@0 1490 return maxWeight;
michael@0 1491 }
michael@0 1492
michael@0 1493 bool
michael@0 1494 BacktrackingAllocator::trySplitAcrossHotcode(LiveInterval *interval, bool *success)
michael@0 1495 {
michael@0 1496 // If this interval has portions that are hot and portions that are cold,
michael@0 1497 // split it at the boundaries between hot and cold code.
michael@0 1498
michael@0 1499 const LiveInterval::Range *hotRange = nullptr;
michael@0 1500
michael@0 1501 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 1502 AllocatedRange range(interval, interval->getRange(i)), existing;
michael@0 1503 if (hotcode.contains(range, &existing)) {
michael@0 1504 hotRange = existing.range;
michael@0 1505 break;
michael@0 1506 }
michael@0 1507 }
michael@0 1508
michael@0 1509 // Don't split if there is no hot code in the interval.
michael@0 1510 if (!hotRange)
michael@0 1511 return true;
michael@0 1512
michael@0 1513 // Don't split if there is no cold code in the interval.
michael@0 1514 bool coldCode = false;
michael@0 1515 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 1516 if (!hotRange->contains(interval->getRange(i))) {
michael@0 1517 coldCode = true;
michael@0 1518 break;
michael@0 1519 }
michael@0 1520 }
michael@0 1521 if (!coldCode)
michael@0 1522 return true;
michael@0 1523
michael@0 1524 SplitPositionVector splitPositions;
michael@0 1525 if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to))
michael@0 1526 return false;
michael@0 1527 *success = true;
michael@0 1528 return splitAt(interval, splitPositions);
michael@0 1529 }
michael@0 1530
michael@0 1531 bool
michael@0 1532 BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success)
michael@0 1533 {
michael@0 1534 // If this interval's later uses do not require it to be in a register,
michael@0 1535 // split it after the last use which does require a register. If conflict
michael@0 1536 // is specified, only consider register uses before the conflict starts.
michael@0 1537
michael@0 1538 CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
michael@0 1539
michael@0 1540 for (UsePositionIterator iter(interval->usesBegin());
michael@0 1541 iter != interval->usesEnd();
michael@0 1542 iter++)
michael@0 1543 {
michael@0 1544 LUse *use = iter->use;
michael@0 1545 LInstruction *ins = insData[iter->pos].ins();
michael@0 1546
michael@0 1547 // Uses in the interval should be sorted.
michael@0 1548 JS_ASSERT(iter->pos >= lastUse);
michael@0 1549 lastUse = inputOf(ins);
michael@0 1550
michael@0 1551 if (!conflict || outputOf(ins) < conflict->start()) {
michael@0 1552 if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
michael@0 1553 lastRegisterFrom = inputOf(ins);
michael@0 1554 lastRegisterTo = iter->pos.next();
michael@0 1555 }
michael@0 1556 }
michael@0 1557 }
michael@0 1558
michael@0 1559 if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) {
michael@0 1560 // Can't trim non-register uses off the end by splitting.
michael@0 1561 return true;
michael@0 1562 }
michael@0 1563
michael@0 1564 SplitPositionVector splitPositions;
michael@0 1565 if (!splitPositions.append(lastRegisterTo))
michael@0 1566 return false;
michael@0 1567 *success = true;
michael@0 1568 return splitAt(interval, splitPositions);
michael@0 1569 }
michael@0 1570
michael@0 1571 bool
michael@0 1572 BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval)
michael@0 1573 {
michael@0 1574 // Split this interval so that all its register uses become minimal
michael@0 1575 // intervals and allow the vreg to be spilled throughout its range.
michael@0 1576
michael@0 1577 LiveIntervalVector newIntervals;
michael@0 1578 uint32_t vreg = interval->vreg();
michael@0 1579
michael@0 1580 // If this LiveInterval is the result of an earlier split which created a
michael@0 1581 // spill interval, that spill interval covers the whole range, so we don't
michael@0 1582 // need to create a new one.
michael@0 1583 bool spillIntervalIsNew = false;
michael@0 1584 LiveInterval *spillInterval = interval->spillInterval();
michael@0 1585 if (!spillInterval) {
michael@0 1586 spillInterval = LiveInterval::New(alloc(), vreg, 0);
michael@0 1587 spillIntervalIsNew = true;
michael@0 1588 }
michael@0 1589
michael@0 1590 CodePosition spillStart = interval->start();
michael@0 1591 if (isRegisterDefinition(interval)) {
michael@0 1592 // Treat the definition of the interval as a register use so that it
michael@0 1593 // can be split and spilled ASAP.
michael@0 1594 CodePosition from = interval->start();
michael@0 1595 CodePosition to = minimalDefEnd(insData[from].ins()).next();
michael@0 1596 if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
michael@0 1597 return false;
michael@0 1598 spillStart = to;
michael@0 1599 }
michael@0 1600
michael@0 1601 if (spillIntervalIsNew) {
michael@0 1602 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 1603 const LiveInterval::Range *range = interval->getRange(i);
michael@0 1604 CodePosition from = range->from < spillStart ? spillStart : range->from;
michael@0 1605 if (!spillInterval->addRange(from, range->to))
michael@0 1606 return false;
michael@0 1607 }
michael@0 1608 }
michael@0 1609
michael@0 1610 for (UsePositionIterator iter(interval->usesBegin());
michael@0 1611 iter != interval->usesEnd();
michael@0 1612 iter++)
michael@0 1613 {
michael@0 1614 LInstruction *ins = insData[iter->pos].ins();
michael@0 1615 if (iter->pos < spillStart) {
michael@0 1616 newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 1617 } else if (isRegisterUse(iter->use, ins)) {
michael@0 1618 // For register uses which are not useRegisterAtStart, pick an
michael@0 1619 // interval that covers both the instruction's input and output, so
michael@0 1620 // that the register is not reused for an output.
michael@0 1621 CodePosition from = inputOf(ins);
michael@0 1622 CodePosition to = iter->pos.next();
michael@0 1623
michael@0 1624 // Use the same interval for duplicate use positions, except when
michael@0 1625 // the uses are fixed (they may require incompatible registers).
michael@0 1626 if (newIntervals.empty() || newIntervals.back()->end() != to || iter->use->policy() == LUse::FIXED) {
michael@0 1627 if (!addLiveInterval(newIntervals, vreg, spillInterval, from, to))
michael@0 1628 return false;
michael@0 1629 }
michael@0 1630
michael@0 1631 newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 1632 } else {
michael@0 1633 JS_ASSERT(spillIntervalIsNew);
michael@0 1634 spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 1635 }
michael@0 1636 }
michael@0 1637
michael@0 1638 if (spillIntervalIsNew && !newIntervals.append(spillInterval))
michael@0 1639 return false;
michael@0 1640
michael@0 1641 return split(interval, newIntervals) && requeueIntervals(newIntervals);
michael@0 1642 }
michael@0 1643
michael@0 1644 // Find the next split position after the current position.
michael@0 1645 static size_t NextSplitPosition(size_t activeSplitPosition,
michael@0 1646 const SplitPositionVector &splitPositions,
michael@0 1647 CodePosition currentPos)
michael@0 1648 {
michael@0 1649 while (activeSplitPosition < splitPositions.length() &&
michael@0 1650 splitPositions[activeSplitPosition] <= currentPos)
michael@0 1651 {
michael@0 1652 ++activeSplitPosition;
michael@0 1653 }
michael@0 1654 return activeSplitPosition;
michael@0 1655 }
michael@0 1656
michael@0 1657 // Test whether the current position has just crossed a split point.
michael@0 1658 static bool SplitHere(size_t activeSplitPosition,
michael@0 1659 const SplitPositionVector &splitPositions,
michael@0 1660 CodePosition currentPos)
michael@0 1661 {
michael@0 1662 return activeSplitPosition < splitPositions.length() &&
michael@0 1663 currentPos >= splitPositions[activeSplitPosition];
michael@0 1664 }
michael@0 1665
michael@0 1666 bool
michael@0 1667 BacktrackingAllocator::splitAt(LiveInterval *interval,
michael@0 1668 const SplitPositionVector &splitPositions)
michael@0 1669 {
michael@0 1670 // Split the interval at the given split points. Unlike splitAtAllRegisterUses,
michael@0 1671 // consolidate any register uses which have no intervening split points into the
michael@0 1672 // same resulting interval.
michael@0 1673
michael@0 1674 // splitPositions should be non-empty and sorted.
michael@0 1675 JS_ASSERT(!splitPositions.empty());
michael@0 1676 for (size_t i = 1; i < splitPositions.length(); ++i)
michael@0 1677 JS_ASSERT(splitPositions[i-1] < splitPositions[i]);
michael@0 1678
michael@0 1679 // Don't spill the interval until after the end of its definition.
michael@0 1680 CodePosition spillStart = interval->start();
michael@0 1681 if (isRegisterDefinition(interval))
michael@0 1682 spillStart = minimalDefEnd(insData[interval->start()].ins()).next();
michael@0 1683
michael@0 1684 uint32_t vreg = interval->vreg();
michael@0 1685
michael@0 1686 // If this LiveInterval is the result of an earlier split which created a
michael@0 1687 // spill interval, that spill interval covers the whole range, so we don't
michael@0 1688 // need to create a new one.
michael@0 1689 bool spillIntervalIsNew = false;
michael@0 1690 LiveInterval *spillInterval = interval->spillInterval();
michael@0 1691 if (!spillInterval) {
michael@0 1692 spillInterval = LiveInterval::New(alloc(), vreg, 0);
michael@0 1693 spillIntervalIsNew = true;
michael@0 1694
michael@0 1695 for (size_t i = 0; i < interval->numRanges(); i++) {
michael@0 1696 const LiveInterval::Range *range = interval->getRange(i);
michael@0 1697 CodePosition from = range->from < spillStart ? spillStart : range->from;
michael@0 1698 if (!spillInterval->addRange(from, range->to))
michael@0 1699 return false;
michael@0 1700 }
michael@0 1701 }
michael@0 1702
michael@0 1703 LiveIntervalVector newIntervals;
michael@0 1704
michael@0 1705 CodePosition lastRegisterUse;
michael@0 1706 if (spillStart != interval->start()) {
michael@0 1707 LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0);
michael@0 1708 newInterval->setSpillInterval(spillInterval);
michael@0 1709 if (!newIntervals.append(newInterval))
michael@0 1710 return false;
michael@0 1711 lastRegisterUse = interval->start();
michael@0 1712 }
michael@0 1713
michael@0 1714 size_t activeSplitPosition = NextSplitPosition(0, splitPositions, interval->start());
michael@0 1715 for (UsePositionIterator iter(interval->usesBegin()); iter != interval->usesEnd(); iter++) {
michael@0 1716 LInstruction *ins = insData[iter->pos].ins();
michael@0 1717 if (iter->pos < spillStart) {
michael@0 1718 newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 1719 activeSplitPosition = NextSplitPosition(activeSplitPosition, splitPositions, iter->pos);
michael@0 1720 } else if (isRegisterUse(iter->use, ins)) {
michael@0 1721 if (lastRegisterUse.pos() == 0 ||
michael@0 1722 SplitHere(activeSplitPosition, splitPositions, iter->pos))
michael@0 1723 {
michael@0 1724 // Place this register use into a different interval from the
michael@0 1725 // last one if there are any split points between the two uses.
michael@0 1726 LiveInterval *newInterval = LiveInterval::New(alloc(), vreg, 0);
michael@0 1727 newInterval->setSpillInterval(spillInterval);
michael@0 1728 if (!newIntervals.append(newInterval))
michael@0 1729 return false;
michael@0 1730 activeSplitPosition = NextSplitPosition(activeSplitPosition,
michael@0 1731 splitPositions,
michael@0 1732 iter->pos);
michael@0 1733 }
michael@0 1734 newIntervals.back()->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 1735 lastRegisterUse = iter->pos;
michael@0 1736 } else {
michael@0 1737 JS_ASSERT(spillIntervalIsNew);
michael@0 1738 spillInterval->addUseAtEnd(new(alloc()) UsePosition(iter->use, iter->pos));
michael@0 1739 }
michael@0 1740 }
michael@0 1741
michael@0 1742 // Compute ranges for each new interval that cover all its uses.
michael@0 1743 size_t activeRange = interval->numRanges();
michael@0 1744 for (size_t i = 0; i < newIntervals.length(); i++) {
michael@0 1745 LiveInterval *newInterval = newIntervals[i];
michael@0 1746 CodePosition start, end;
michael@0 1747 if (i == 0 && spillStart != interval->start()) {
michael@0 1748 start = interval->start();
michael@0 1749 if (newInterval->usesEmpty())
michael@0 1750 end = spillStart;
michael@0 1751 else
michael@0 1752 end = newInterval->usesBack()->pos.next();
michael@0 1753 } else {
michael@0 1754 start = inputOf(insData[newInterval->usesBegin()->pos].ins());
michael@0 1755 end = newInterval->usesBack()->pos.next();
michael@0 1756 }
michael@0 1757 for (; activeRange > 0; --activeRange) {
michael@0 1758 const LiveInterval::Range *range = interval->getRange(activeRange - 1);
michael@0 1759 if (range->to <= start)
michael@0 1760 continue;
michael@0 1761 if (range->from >= end)
michael@0 1762 break;
michael@0 1763 if (!newInterval->addRange(Max(range->from, start),
michael@0 1764 Min(range->to, end)))
michael@0 1765 return false;
michael@0 1766 if (range->to >= end)
michael@0 1767 break;
michael@0 1768 }
michael@0 1769 }
michael@0 1770
michael@0 1771 if (spillIntervalIsNew && !newIntervals.append(spillInterval))
michael@0 1772 return false;
michael@0 1773
michael@0 1774 return split(interval, newIntervals) && requeueIntervals(newIntervals);
michael@0 1775 }
michael@0 1776
michael@0 1777 bool
michael@0 1778 BacktrackingAllocator::splitAcrossCalls(LiveInterval *interval)
michael@0 1779 {
michael@0 1780 // Split the interval to separate register uses and non-register uses and
michael@0 1781 // allow the vreg to be spilled across its range.
michael@0 1782
michael@0 1783 // Find the locations of all calls in the interval's range. Fixed intervals
michael@0 1784 // are introduced by buildLivenessInfo only for calls when allocating for
michael@0 1785 // the backtracking allocator. fixedIntervalsUnion is sorted backwards, so
michael@0 1786 // iterate through it backwards.
michael@0 1787 SplitPositionVector callPositions;
michael@0 1788 for (size_t i = fixedIntervalsUnion->numRanges(); i > 0; i--) {
michael@0 1789 const LiveInterval::Range *range = fixedIntervalsUnion->getRange(i - 1);
michael@0 1790 if (interval->covers(range->from) && interval->covers(range->from.previous())) {
michael@0 1791 if (!callPositions.append(range->from))
michael@0 1792 return false;
michael@0 1793 }
michael@0 1794 }
michael@0 1795 JS_ASSERT(callPositions.length());
michael@0 1796
michael@0 1797 return splitAt(interval, callPositions);
michael@0 1798 }
michael@0 1799
michael@0 1800 bool
michael@0 1801 BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict)
michael@0 1802 {
michael@0 1803 bool success = false;
michael@0 1804
michael@0 1805 if (!trySplitAcrossHotcode(interval, &success))
michael@0 1806 return false;
michael@0 1807 if (success)
michael@0 1808 return true;
michael@0 1809
michael@0 1810 if (!trySplitAfterLastRegisterUse(interval, conflict, &success))
michael@0 1811 return false;
michael@0 1812 if (success)
michael@0 1813 return true;
michael@0 1814
michael@0 1815 return splitAtAllRegisterUses(interval);
michael@0 1816 }

mercurial