Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/RegisterAllocator.h" |
michael@0 | 8 | |
michael@0 | 9 | using namespace js; |
michael@0 | 10 | using namespace js::jit; |
michael@0 | 11 | |
michael@0 | 12 | bool |
michael@0 | 13 | AllocationIntegrityState::record() |
michael@0 | 14 | { |
michael@0 | 15 | // Ignore repeated record() calls. |
michael@0 | 16 | if (!instructions.empty()) |
michael@0 | 17 | return true; |
michael@0 | 18 | |
michael@0 | 19 | if (!instructions.appendN(InstructionInfo(), graph.numInstructions())) |
michael@0 | 20 | return false; |
michael@0 | 21 | |
michael@0 | 22 | if (!virtualRegisters.appendN((LDefinition *)nullptr, graph.numVirtualRegisters())) |
michael@0 | 23 | return false; |
michael@0 | 24 | |
michael@0 | 25 | if (!blocks.reserve(graph.numBlocks())) |
michael@0 | 26 | return false; |
michael@0 | 27 | for (size_t i = 0; i < graph.numBlocks(); i++) { |
michael@0 | 28 | blocks.infallibleAppend(BlockInfo()); |
michael@0 | 29 | LBlock *block = graph.getBlock(i); |
michael@0 | 30 | JS_ASSERT(block->mir()->id() == i); |
michael@0 | 31 | |
michael@0 | 32 | BlockInfo &blockInfo = blocks[i]; |
michael@0 | 33 | if (!blockInfo.phis.reserve(block->numPhis())) |
michael@0 | 34 | return false; |
michael@0 | 35 | |
michael@0 | 36 | for (size_t j = 0; j < block->numPhis(); j++) { |
michael@0 | 37 | blockInfo.phis.infallibleAppend(InstructionInfo()); |
michael@0 | 38 | InstructionInfo &info = blockInfo.phis[j]; |
michael@0 | 39 | LPhi *phi = block->getPhi(j); |
michael@0 | 40 | JS_ASSERT(phi->numDefs() == 1); |
michael@0 | 41 | uint32_t vreg = phi->getDef(0)->virtualRegister(); |
michael@0 | 42 | virtualRegisters[vreg] = phi->getDef(0); |
michael@0 | 43 | if (!info.outputs.append(*phi->getDef(0))) |
michael@0 | 44 | return false; |
michael@0 | 45 | for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) { |
michael@0 | 46 | if (!info.inputs.append(*phi->getOperand(k))) |
michael@0 | 47 | return false; |
michael@0 | 48 | } |
michael@0 | 49 | } |
michael@0 | 50 | |
michael@0 | 51 | for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { |
michael@0 | 52 | LInstruction *ins = *iter; |
michael@0 | 53 | InstructionInfo &info = instructions[ins->id()]; |
michael@0 | 54 | |
michael@0 | 55 | for (size_t k = 0; k < ins->numTemps(); k++) { |
michael@0 | 56 | uint32_t vreg = ins->getTemp(k)->virtualRegister(); |
michael@0 | 57 | virtualRegisters[vreg] = ins->getTemp(k); |
michael@0 | 58 | if (!info.temps.append(*ins->getTemp(k))) |
michael@0 | 59 | return false; |
michael@0 | 60 | } |
michael@0 | 61 | for (size_t k = 0; k < ins->numDefs(); k++) { |
michael@0 | 62 | uint32_t vreg = ins->getDef(k)->virtualRegister(); |
michael@0 | 63 | virtualRegisters[vreg] = ins->getDef(k); |
michael@0 | 64 | if (!info.outputs.append(*ins->getDef(k))) |
michael@0 | 65 | return false; |
michael@0 | 66 | } |
michael@0 | 67 | for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { |
michael@0 | 68 | if (!info.inputs.append(**alloc)) |
michael@0 | 69 | return false; |
michael@0 | 70 | } |
michael@0 | 71 | } |
michael@0 | 72 | } |
michael@0 | 73 | |
michael@0 | 74 | return seen.init(); |
michael@0 | 75 | } |
michael@0 | 76 | |
michael@0 | 77 | bool |
michael@0 | 78 | AllocationIntegrityState::check(bool populateSafepoints) |
michael@0 | 79 | { |
michael@0 | 80 | JS_ASSERT(!instructions.empty()); |
michael@0 | 81 | |
michael@0 | 82 | #ifdef DEBUG |
michael@0 | 83 | if (IonSpewEnabled(IonSpew_RegAlloc)) |
michael@0 | 84 | dump(); |
michael@0 | 85 | |
michael@0 | 86 | for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { |
michael@0 | 87 | LBlock *block = graph.getBlock(blockIndex); |
michael@0 | 88 | |
michael@0 | 89 | // Check that all instruction inputs and outputs have been assigned an allocation. |
michael@0 | 90 | for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { |
michael@0 | 91 | LInstruction *ins = *iter; |
michael@0 | 92 | |
michael@0 | 93 | for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) |
michael@0 | 94 | JS_ASSERT(!alloc->isUse()); |
michael@0 | 95 | |
michael@0 | 96 | for (size_t i = 0; i < ins->numDefs(); i++) { |
michael@0 | 97 | LDefinition *def = ins->getDef(i); |
michael@0 | 98 | JS_ASSERT_IF(def->policy() != LDefinition::PASSTHROUGH, !def->output()->isUse()); |
michael@0 | 99 | |
michael@0 | 100 | LDefinition oldDef = instructions[ins->id()].outputs[i]; |
michael@0 | 101 | JS_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT, |
michael@0 | 102 | *def->output() == *ins->getOperand(oldDef.getReusedInput())); |
michael@0 | 103 | } |
michael@0 | 104 | |
michael@0 | 105 | for (size_t i = 0; i < ins->numTemps(); i++) { |
michael@0 | 106 | LDefinition *temp = ins->getTemp(i); |
michael@0 | 107 | JS_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister()); |
michael@0 | 108 | |
michael@0 | 109 | LDefinition oldTemp = instructions[ins->id()].temps[i]; |
michael@0 | 110 | JS_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT, |
michael@0 | 111 | *temp->output() == *ins->getOperand(oldTemp.getReusedInput())); |
michael@0 | 112 | } |
michael@0 | 113 | } |
michael@0 | 114 | } |
michael@0 | 115 | #endif |
michael@0 | 116 | |
michael@0 | 117 | // Check that the register assignment and move groups preserve the original |
michael@0 | 118 | // semantics of the virtual registers. Each virtual register has a single |
michael@0 | 119 | // write (owing to the SSA representation), but the allocation may move the |
michael@0 | 120 | // written value around between registers and memory locations along |
michael@0 | 121 | // different paths through the script. |
michael@0 | 122 | // |
michael@0 | 123 | // For each use of an allocation, follow the physical value which is read |
michael@0 | 124 | // backward through the script, along all paths to the value's virtual |
michael@0 | 125 | // register's definition. |
michael@0 | 126 | for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { |
michael@0 | 127 | LBlock *block = graph.getBlock(blockIndex); |
michael@0 | 128 | for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { |
michael@0 | 129 | LInstruction *ins = *iter; |
michael@0 | 130 | const InstructionInfo &info = instructions[ins->id()]; |
michael@0 | 131 | |
michael@0 | 132 | LSafepoint *safepoint = ins->safepoint(); |
michael@0 | 133 | if (safepoint) { |
michael@0 | 134 | for (size_t i = 0; i < ins->numTemps(); i++) { |
michael@0 | 135 | uint32_t vreg = info.temps[i].virtualRegister(); |
michael@0 | 136 | LAllocation *alloc = ins->getTemp(i)->output(); |
michael@0 | 137 | if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints)) |
michael@0 | 138 | return false; |
michael@0 | 139 | } |
michael@0 | 140 | JS_ASSERT_IF(ins->isCall() && !populateSafepoints, |
michael@0 | 141 | safepoint->liveRegs().empty(true) && |
michael@0 | 142 | safepoint->liveRegs().empty(false)); |
michael@0 | 143 | } |
michael@0 | 144 | |
michael@0 | 145 | size_t inputIndex = 0; |
michael@0 | 146 | for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { |
michael@0 | 147 | LAllocation oldInput = info.inputs[inputIndex++]; |
michael@0 | 148 | if (!oldInput.isUse()) |
michael@0 | 149 | continue; |
michael@0 | 150 | |
michael@0 | 151 | uint32_t vreg = oldInput.toUse()->virtualRegister(); |
michael@0 | 152 | |
michael@0 | 153 | if (safepoint && !oldInput.toUse()->usedAtStart()) { |
michael@0 | 154 | if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints)) |
michael@0 | 155 | return false; |
michael@0 | 156 | } |
michael@0 | 157 | |
michael@0 | 158 | // Start checking at the previous instruction, in case this |
michael@0 | 159 | // instruction reuses its input register for an output. |
michael@0 | 160 | LInstructionReverseIterator riter = block->rbegin(ins); |
michael@0 | 161 | riter++; |
michael@0 | 162 | checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints); |
michael@0 | 163 | |
michael@0 | 164 | while (!worklist.empty()) { |
michael@0 | 165 | IntegrityItem item = worklist.popCopy(); |
michael@0 | 166 | checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints); |
michael@0 | 167 | } |
michael@0 | 168 | } |
michael@0 | 169 | } |
michael@0 | 170 | } |
michael@0 | 171 | |
michael@0 | 172 | return true; |
michael@0 | 173 | } |
michael@0 | 174 | |
michael@0 | 175 | bool |
michael@0 | 176 | AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins, |
michael@0 | 177 | uint32_t vreg, LAllocation alloc, bool populateSafepoints) |
michael@0 | 178 | { |
michael@0 | 179 | for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) { |
michael@0 | 180 | ins = *iter; |
michael@0 | 181 | |
michael@0 | 182 | // Follow values through assignments in move groups. All assignments in |
michael@0 | 183 | // a move group are considered to happen simultaneously, so stop after |
michael@0 | 184 | // the first matching move is found. |
michael@0 | 185 | if (ins->isMoveGroup()) { |
michael@0 | 186 | LMoveGroup *group = ins->toMoveGroup(); |
michael@0 | 187 | for (int i = group->numMoves() - 1; i >= 0; i--) { |
michael@0 | 188 | if (*group->getMove(i).to() == alloc) { |
michael@0 | 189 | alloc = *group->getMove(i).from(); |
michael@0 | 190 | break; |
michael@0 | 191 | } |
michael@0 | 192 | } |
michael@0 | 193 | } |
michael@0 | 194 | |
michael@0 | 195 | const InstructionInfo &info = instructions[ins->id()]; |
michael@0 | 196 | |
michael@0 | 197 | // Make sure the physical location being tracked is not clobbered by |
michael@0 | 198 | // another instruction, and that if the originating vreg definition is |
michael@0 | 199 | // found that it is writing to the tracked location. |
michael@0 | 200 | |
michael@0 | 201 | for (size_t i = 0; i < ins->numDefs(); i++) { |
michael@0 | 202 | LDefinition *def = ins->getDef(i); |
michael@0 | 203 | if (def->policy() == LDefinition::PASSTHROUGH) |
michael@0 | 204 | continue; |
michael@0 | 205 | if (info.outputs[i].virtualRegister() == vreg) { |
michael@0 | 206 | JS_ASSERT(*def->output() == alloc); |
michael@0 | 207 | |
michael@0 | 208 | // Found the original definition, done scanning. |
michael@0 | 209 | return true; |
michael@0 | 210 | } else { |
michael@0 | 211 | JS_ASSERT(*def->output() != alloc); |
michael@0 | 212 | } |
michael@0 | 213 | } |
michael@0 | 214 | |
michael@0 | 215 | for (size_t i = 0; i < ins->numTemps(); i++) { |
michael@0 | 216 | LDefinition *temp = ins->getTemp(i); |
michael@0 | 217 | if (!temp->isBogusTemp()) |
michael@0 | 218 | JS_ASSERT(*temp->output() != alloc); |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | if (ins->safepoint()) { |
michael@0 | 222 | if (!checkSafepointAllocation(ins, vreg, alloc, populateSafepoints)) |
michael@0 | 223 | return false; |
michael@0 | 224 | } |
michael@0 | 225 | } |
michael@0 | 226 | |
michael@0 | 227 | // Phis are effectless, but change the vreg we are tracking. Check if there |
michael@0 | 228 | // is one which produced this vreg. We need to follow back through the phi |
michael@0 | 229 | // inputs as it is not guaranteed the register allocator filled in physical |
michael@0 | 230 | // allocations for the inputs and outputs of the phis. |
michael@0 | 231 | for (size_t i = 0; i < block->numPhis(); i++) { |
michael@0 | 232 | const InstructionInfo &info = blocks[block->mir()->id()].phis[i]; |
michael@0 | 233 | LPhi *phi = block->getPhi(i); |
michael@0 | 234 | if (info.outputs[0].virtualRegister() == vreg) { |
michael@0 | 235 | for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) { |
michael@0 | 236 | uint32_t newvreg = info.inputs[j].toUse()->virtualRegister(); |
michael@0 | 237 | LBlock *predecessor = graph.getBlock(block->mir()->getPredecessor(j)->id()); |
michael@0 | 238 | if (!addPredecessor(predecessor, newvreg, alloc)) |
michael@0 | 239 | return false; |
michael@0 | 240 | } |
michael@0 | 241 | return true; |
michael@0 | 242 | } |
michael@0 | 243 | } |
michael@0 | 244 | |
michael@0 | 245 | // No phi which defined the vreg we are tracking, follow back through all |
michael@0 | 246 | // predecessors with the existing vreg. |
michael@0 | 247 | for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) { |
michael@0 | 248 | LBlock *predecessor = graph.getBlock(block->mir()->getPredecessor(i)->id()); |
michael@0 | 249 | if (!addPredecessor(predecessor, vreg, alloc)) |
michael@0 | 250 | return false; |
michael@0 | 251 | } |
michael@0 | 252 | |
michael@0 | 253 | return true; |
michael@0 | 254 | } |
michael@0 | 255 | |
michael@0 | 256 | bool |
michael@0 | 257 | AllocationIntegrityState::checkSafepointAllocation(LInstruction *ins, |
michael@0 | 258 | uint32_t vreg, LAllocation alloc, |
michael@0 | 259 | bool populateSafepoints) |
michael@0 | 260 | { |
michael@0 | 261 | LSafepoint *safepoint = ins->safepoint(); |
michael@0 | 262 | JS_ASSERT(safepoint); |
michael@0 | 263 | |
michael@0 | 264 | if (ins->isCall() && alloc.isRegister()) |
michael@0 | 265 | return true; |
michael@0 | 266 | |
michael@0 | 267 | if (alloc.isRegister()) { |
michael@0 | 268 | AnyRegister reg = alloc.toRegister(); |
michael@0 | 269 | if (populateSafepoints) |
michael@0 | 270 | safepoint->addLiveRegister(reg); |
michael@0 | 271 | JS_ASSERT(safepoint->liveRegs().has(reg)); |
michael@0 | 272 | } |
michael@0 | 273 | |
michael@0 | 274 | LDefinition::Type type = virtualRegisters[vreg] |
michael@0 | 275 | ? virtualRegisters[vreg]->type() |
michael@0 | 276 | : LDefinition::GENERAL; |
michael@0 | 277 | |
michael@0 | 278 | switch (type) { |
michael@0 | 279 | case LDefinition::OBJECT: |
michael@0 | 280 | if (populateSafepoints) { |
michael@0 | 281 | IonSpew(IonSpew_RegAlloc, "Safepoint object v%u i%u %s", |
michael@0 | 282 | vreg, ins->id(), alloc.toString()); |
michael@0 | 283 | if (!safepoint->addGcPointer(alloc)) |
michael@0 | 284 | return false; |
michael@0 | 285 | } |
michael@0 | 286 | JS_ASSERT(safepoint->hasGcPointer(alloc)); |
michael@0 | 287 | break; |
michael@0 | 288 | case LDefinition::SLOTS: |
michael@0 | 289 | if (populateSafepoints) { |
michael@0 | 290 | IonSpew(IonSpew_RegAlloc, "Safepoint slots v%u i%u %s", |
michael@0 | 291 | vreg, ins->id(), alloc.toString()); |
michael@0 | 292 | if (!safepoint->addSlotsOrElementsPointer(alloc)) |
michael@0 | 293 | return false; |
michael@0 | 294 | } |
michael@0 | 295 | JS_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc)); |
michael@0 | 296 | break; |
michael@0 | 297 | #ifdef JS_NUNBOX32 |
michael@0 | 298 | // Do not assert that safepoint information for nunbox types is complete, |
michael@0 | 299 | // as if a vreg for a value's components are copied in multiple places |
michael@0 | 300 | // then the safepoint information may not reflect all copies. All copies |
michael@0 | 301 | // of payloads must be reflected, however, for generational GC. |
michael@0 | 302 | case LDefinition::TYPE: |
michael@0 | 303 | if (populateSafepoints) { |
michael@0 | 304 | IonSpew(IonSpew_RegAlloc, "Safepoint type v%u i%u %s", |
michael@0 | 305 | vreg, ins->id(), alloc.toString()); |
michael@0 | 306 | if (!safepoint->addNunboxType(vreg, alloc)) |
michael@0 | 307 | return false; |
michael@0 | 308 | } |
michael@0 | 309 | break; |
michael@0 | 310 | case LDefinition::PAYLOAD: |
michael@0 | 311 | if (populateSafepoints) { |
michael@0 | 312 | IonSpew(IonSpew_RegAlloc, "Safepoint payload v%u i%u %s", |
michael@0 | 313 | vreg, ins->id(), alloc.toString()); |
michael@0 | 314 | if (!safepoint->addNunboxPayload(vreg, alloc)) |
michael@0 | 315 | return false; |
michael@0 | 316 | } |
michael@0 | 317 | JS_ASSERT(safepoint->hasNunboxPayload(alloc)); |
michael@0 | 318 | break; |
michael@0 | 319 | #else |
michael@0 | 320 | case LDefinition::BOX: |
michael@0 | 321 | if (populateSafepoints) { |
michael@0 | 322 | IonSpew(IonSpew_RegAlloc, "Safepoint boxed value v%u i%u %s", |
michael@0 | 323 | vreg, ins->id(), alloc.toString()); |
michael@0 | 324 | if (!safepoint->addBoxedValue(alloc)) |
michael@0 | 325 | return false; |
michael@0 | 326 | } |
michael@0 | 327 | JS_ASSERT(safepoint->hasBoxedValue(alloc)); |
michael@0 | 328 | break; |
michael@0 | 329 | #endif |
michael@0 | 330 | default: |
michael@0 | 331 | break; |
michael@0 | 332 | } |
michael@0 | 333 | |
michael@0 | 334 | return true; |
michael@0 | 335 | } |
michael@0 | 336 | |
michael@0 | 337 | bool |
michael@0 | 338 | AllocationIntegrityState::addPredecessor(LBlock *block, uint32_t vreg, LAllocation alloc) |
michael@0 | 339 | { |
michael@0 | 340 | // There is no need to reanalyze if we have already seen this predecessor. |
michael@0 | 341 | // We share the seen allocations across analysis of each use, as there will |
michael@0 | 342 | // likely be common ground between different uses of the same vreg. |
michael@0 | 343 | IntegrityItem item; |
michael@0 | 344 | item.block = block; |
michael@0 | 345 | item.vreg = vreg; |
michael@0 | 346 | item.alloc = alloc; |
michael@0 | 347 | item.index = seen.count(); |
michael@0 | 348 | |
michael@0 | 349 | IntegrityItemSet::AddPtr p = seen.lookupForAdd(item); |
michael@0 | 350 | if (p) |
michael@0 | 351 | return true; |
michael@0 | 352 | if (!seen.add(p, item)) |
michael@0 | 353 | return false; |
michael@0 | 354 | |
michael@0 | 355 | return worklist.append(item); |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | void |
michael@0 | 359 | AllocationIntegrityState::dump() |
michael@0 | 360 | { |
michael@0 | 361 | #ifdef DEBUG |
michael@0 | 362 | fprintf(stderr, "Register Allocation:\n"); |
michael@0 | 363 | |
michael@0 | 364 | for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { |
michael@0 | 365 | LBlock *block = graph.getBlock(blockIndex); |
michael@0 | 366 | MBasicBlock *mir = block->mir(); |
michael@0 | 367 | |
michael@0 | 368 | fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex)); |
michael@0 | 369 | for (size_t i = 0; i < mir->numSuccessors(); i++) |
michael@0 | 370 | fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); |
michael@0 | 371 | fprintf(stderr, "\n"); |
michael@0 | 372 | |
michael@0 | 373 | for (size_t i = 0; i < block->numPhis(); i++) { |
michael@0 | 374 | const InstructionInfo &info = blocks[blockIndex].phis[i]; |
michael@0 | 375 | LPhi *phi = block->getPhi(i); |
michael@0 | 376 | CodePosition output(phi->id(), CodePosition::OUTPUT); |
michael@0 | 377 | |
michael@0 | 378 | // Don't print the inputOf for phi nodes, since it's never used. |
michael@0 | 379 | fprintf(stderr, "[,%u Phi [def v%u %s] <-", |
michael@0 | 380 | output.pos(), |
michael@0 | 381 | info.outputs[0].virtualRegister(), |
michael@0 | 382 | phi->getDef(0)->output()->toString()); |
michael@0 | 383 | for (size_t j = 0; j < phi->numOperands(); j++) |
michael@0 | 384 | fprintf(stderr, " %s", info.inputs[j].toString()); |
michael@0 | 385 | fprintf(stderr, "]\n"); |
michael@0 | 386 | } |
michael@0 | 387 | |
michael@0 | 388 | for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { |
michael@0 | 389 | LInstruction *ins = *iter; |
michael@0 | 390 | const InstructionInfo &info = instructions[ins->id()]; |
michael@0 | 391 | |
michael@0 | 392 | CodePosition input(ins->id(), CodePosition::INPUT); |
michael@0 | 393 | CodePosition output(ins->id(), CodePosition::OUTPUT); |
michael@0 | 394 | |
michael@0 | 395 | fprintf(stderr, "["); |
michael@0 | 396 | if (input != CodePosition::MIN) |
michael@0 | 397 | fprintf(stderr, "%u,%u ", input.pos(), output.pos()); |
michael@0 | 398 | fprintf(stderr, "%s]", ins->opName()); |
michael@0 | 399 | |
michael@0 | 400 | if (ins->isMoveGroup()) { |
michael@0 | 401 | LMoveGroup *group = ins->toMoveGroup(); |
michael@0 | 402 | for (int i = group->numMoves() - 1; i >= 0; i--) { |
michael@0 | 403 | // Use two printfs, as LAllocation::toString is not reentant. |
michael@0 | 404 | fprintf(stderr, " [%s", group->getMove(i).from()->toString()); |
michael@0 | 405 | fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); |
michael@0 | 406 | } |
michael@0 | 407 | fprintf(stderr, "\n"); |
michael@0 | 408 | continue; |
michael@0 | 409 | } |
michael@0 | 410 | |
michael@0 | 411 | for (size_t i = 0; i < ins->numTemps(); i++) { |
michael@0 | 412 | LDefinition *temp = ins->getTemp(i); |
michael@0 | 413 | if (!temp->isBogusTemp()) |
michael@0 | 414 | fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(), |
michael@0 | 415 | temp->output()->toString()); |
michael@0 | 416 | } |
michael@0 | 417 | |
michael@0 | 418 | for (size_t i = 0; i < ins->numDefs(); i++) { |
michael@0 | 419 | LDefinition *def = ins->getDef(i); |
michael@0 | 420 | fprintf(stderr, " [def v%u %s]", info.outputs[i].virtualRegister(), |
michael@0 | 421 | def->output()->toString()); |
michael@0 | 422 | } |
michael@0 | 423 | |
michael@0 | 424 | size_t index = 0; |
michael@0 | 425 | for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { |
michael@0 | 426 | fprintf(stderr, " [use %s", info.inputs[index++].toString()); |
michael@0 | 427 | fprintf(stderr, " %s]", alloc->toString()); |
michael@0 | 428 | } |
michael@0 | 429 | |
michael@0 | 430 | fprintf(stderr, "\n"); |
michael@0 | 431 | } |
michael@0 | 432 | } |
michael@0 | 433 | |
michael@0 | 434 | fprintf(stderr, "\nIntermediate Allocations:\n\n"); |
michael@0 | 435 | |
michael@0 | 436 | // Print discovered allocations at the ends of blocks, in the order they |
michael@0 | 437 | // were discovered. |
michael@0 | 438 | |
michael@0 | 439 | Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered; |
michael@0 | 440 | seenOrdered.appendN(IntegrityItem(), seen.count()); |
michael@0 | 441 | |
michael@0 | 442 | for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) { |
michael@0 | 443 | IntegrityItem item = iter.front(); |
michael@0 | 444 | seenOrdered[item.index] = item; |
michael@0 | 445 | } |
michael@0 | 446 | |
michael@0 | 447 | for (size_t i = 0; i < seenOrdered.length(); i++) { |
michael@0 | 448 | IntegrityItem item = seenOrdered[i]; |
michael@0 | 449 | fprintf(stderr, "block %u reg v%u alloc %s\n", |
michael@0 | 450 | item.block->mir()->id(), item.vreg, item.alloc.toString()); |
michael@0 | 451 | } |
michael@0 | 452 | |
michael@0 | 453 | fprintf(stderr, "\n"); |
michael@0 | 454 | #endif |
michael@0 | 455 | } |
michael@0 | 456 | |
michael@0 | 457 | const CodePosition CodePosition::MAX(UINT_MAX); |
michael@0 | 458 | const CodePosition CodePosition::MIN(0); |
michael@0 | 459 | |
michael@0 | 460 | bool |
michael@0 | 461 | RegisterAllocator::init() |
michael@0 | 462 | { |
michael@0 | 463 | if (!insData.init(mir, graph.numInstructions())) |
michael@0 | 464 | return false; |
michael@0 | 465 | |
michael@0 | 466 | for (size_t i = 0; i < graph.numBlocks(); i++) { |
michael@0 | 467 | LBlock *block = graph.getBlock(i); |
michael@0 | 468 | for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) |
michael@0 | 469 | insData[*ins].init(*ins, block); |
michael@0 | 470 | for (size_t j = 0; j < block->numPhis(); j++) { |
michael@0 | 471 | LPhi *phi = block->getPhi(j); |
michael@0 | 472 | insData[phi].init(phi, block); |
michael@0 | 473 | } |
michael@0 | 474 | } |
michael@0 | 475 | |
michael@0 | 476 | return true; |
michael@0 | 477 | } |
michael@0 | 478 | |
michael@0 | 479 | LMoveGroup * |
michael@0 | 480 | RegisterAllocator::getInputMoveGroup(uint32_t ins) |
michael@0 | 481 | { |
michael@0 | 482 | InstructionData *data = &insData[ins]; |
michael@0 | 483 | JS_ASSERT(!data->ins()->isPhi()); |
michael@0 | 484 | JS_ASSERT(!data->ins()->isLabel()); |
michael@0 | 485 | |
michael@0 | 486 | if (data->inputMoves()) |
michael@0 | 487 | return data->inputMoves(); |
michael@0 | 488 | |
michael@0 | 489 | LMoveGroup *moves = LMoveGroup::New(alloc()); |
michael@0 | 490 | data->setInputMoves(moves); |
michael@0 | 491 | data->block()->insertBefore(data->ins(), moves); |
michael@0 | 492 | |
michael@0 | 493 | return moves; |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | LMoveGroup * |
michael@0 | 497 | RegisterAllocator::getMoveGroupAfter(uint32_t ins) |
michael@0 | 498 | { |
michael@0 | 499 | InstructionData *data = &insData[ins]; |
michael@0 | 500 | JS_ASSERT(!data->ins()->isPhi()); |
michael@0 | 501 | |
michael@0 | 502 | if (data->movesAfter()) |
michael@0 | 503 | return data->movesAfter(); |
michael@0 | 504 | |
michael@0 | 505 | LMoveGroup *moves = LMoveGroup::New(alloc()); |
michael@0 | 506 | data->setMovesAfter(moves); |
michael@0 | 507 | |
michael@0 | 508 | if (data->ins()->isLabel()) |
michael@0 | 509 | data->block()->insertAfter(data->block()->getEntryMoveGroup(alloc()), moves); |
michael@0 | 510 | else |
michael@0 | 511 | data->block()->insertAfter(data->ins(), moves); |
michael@0 | 512 | return moves; |
michael@0 | 513 | } |