Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/RegisterAllocator.h"
9 using namespace js;
10 using namespace js::jit;
12 bool
13 AllocationIntegrityState::record()
14 {
15 // Ignore repeated record() calls.
16 if (!instructions.empty())
17 return true;
19 if (!instructions.appendN(InstructionInfo(), graph.numInstructions()))
20 return false;
22 if (!virtualRegisters.appendN((LDefinition *)nullptr, graph.numVirtualRegisters()))
23 return false;
25 if (!blocks.reserve(graph.numBlocks()))
26 return false;
27 for (size_t i = 0; i < graph.numBlocks(); i++) {
28 blocks.infallibleAppend(BlockInfo());
29 LBlock *block = graph.getBlock(i);
30 JS_ASSERT(block->mir()->id() == i);
32 BlockInfo &blockInfo = blocks[i];
33 if (!blockInfo.phis.reserve(block->numPhis()))
34 return false;
36 for (size_t j = 0; j < block->numPhis(); j++) {
37 blockInfo.phis.infallibleAppend(InstructionInfo());
38 InstructionInfo &info = blockInfo.phis[j];
39 LPhi *phi = block->getPhi(j);
40 JS_ASSERT(phi->numDefs() == 1);
41 uint32_t vreg = phi->getDef(0)->virtualRegister();
42 virtualRegisters[vreg] = phi->getDef(0);
43 if (!info.outputs.append(*phi->getDef(0)))
44 return false;
45 for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
46 if (!info.inputs.append(*phi->getOperand(k)))
47 return false;
48 }
49 }
51 for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
52 LInstruction *ins = *iter;
53 InstructionInfo &info = instructions[ins->id()];
55 for (size_t k = 0; k < ins->numTemps(); k++) {
56 uint32_t vreg = ins->getTemp(k)->virtualRegister();
57 virtualRegisters[vreg] = ins->getTemp(k);
58 if (!info.temps.append(*ins->getTemp(k)))
59 return false;
60 }
61 for (size_t k = 0; k < ins->numDefs(); k++) {
62 uint32_t vreg = ins->getDef(k)->virtualRegister();
63 virtualRegisters[vreg] = ins->getDef(k);
64 if (!info.outputs.append(*ins->getDef(k)))
65 return false;
66 }
67 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
68 if (!info.inputs.append(**alloc))
69 return false;
70 }
71 }
72 }
74 return seen.init();
75 }
77 bool
78 AllocationIntegrityState::check(bool populateSafepoints)
79 {
80 JS_ASSERT(!instructions.empty());
82 #ifdef DEBUG
83 if (IonSpewEnabled(IonSpew_RegAlloc))
84 dump();
86 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
87 LBlock *block = graph.getBlock(blockIndex);
89 // Check that all instruction inputs and outputs have been assigned an allocation.
90 for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
91 LInstruction *ins = *iter;
93 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
94 JS_ASSERT(!alloc->isUse());
96 for (size_t i = 0; i < ins->numDefs(); i++) {
97 LDefinition *def = ins->getDef(i);
98 JS_ASSERT_IF(def->policy() != LDefinition::PASSTHROUGH, !def->output()->isUse());
100 LDefinition oldDef = instructions[ins->id()].outputs[i];
101 JS_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
102 *def->output() == *ins->getOperand(oldDef.getReusedInput()));
103 }
105 for (size_t i = 0; i < ins->numTemps(); i++) {
106 LDefinition *temp = ins->getTemp(i);
107 JS_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
109 LDefinition oldTemp = instructions[ins->id()].temps[i];
110 JS_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
111 *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
112 }
113 }
114 }
115 #endif
117 // Check that the register assignment and move groups preserve the original
118 // semantics of the virtual registers. Each virtual register has a single
119 // write (owing to the SSA representation), but the allocation may move the
120 // written value around between registers and memory locations along
121 // different paths through the script.
122 //
123 // For each use of an allocation, follow the physical value which is read
124 // backward through the script, along all paths to the value's virtual
125 // register's definition.
126 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
127 LBlock *block = graph.getBlock(blockIndex);
128 for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
129 LInstruction *ins = *iter;
130 const InstructionInfo &info = instructions[ins->id()];
132 LSafepoint *safepoint = ins->safepoint();
133 if (safepoint) {
134 for (size_t i = 0; i < ins->numTemps(); i++) {
135 uint32_t vreg = info.temps[i].virtualRegister();
136 LAllocation *alloc = ins->getTemp(i)->output();
137 if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints))
138 return false;
139 }
140 JS_ASSERT_IF(ins->isCall() && !populateSafepoints,
141 safepoint->liveRegs().empty(true) &&
142 safepoint->liveRegs().empty(false));
143 }
145 size_t inputIndex = 0;
146 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
147 LAllocation oldInput = info.inputs[inputIndex++];
148 if (!oldInput.isUse())
149 continue;
151 uint32_t vreg = oldInput.toUse()->virtualRegister();
153 if (safepoint && !oldInput.toUse()->usedAtStart()) {
154 if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints))
155 return false;
156 }
158 // Start checking at the previous instruction, in case this
159 // instruction reuses its input register for an output.
160 LInstructionReverseIterator riter = block->rbegin(ins);
161 riter++;
162 checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints);
164 while (!worklist.empty()) {
165 IntegrityItem item = worklist.popCopy();
166 checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints);
167 }
168 }
169 }
170 }
172 return true;
173 }
175 bool
176 AllocationIntegrityState::checkIntegrity(LBlock *block, LInstruction *ins,
177 uint32_t vreg, LAllocation alloc, bool populateSafepoints)
178 {
179 for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) {
180 ins = *iter;
182 // Follow values through assignments in move groups. All assignments in
183 // a move group are considered to happen simultaneously, so stop after
184 // the first matching move is found.
185 if (ins->isMoveGroup()) {
186 LMoveGroup *group = ins->toMoveGroup();
187 for (int i = group->numMoves() - 1; i >= 0; i--) {
188 if (*group->getMove(i).to() == alloc) {
189 alloc = *group->getMove(i).from();
190 break;
191 }
192 }
193 }
195 const InstructionInfo &info = instructions[ins->id()];
197 // Make sure the physical location being tracked is not clobbered by
198 // another instruction, and that if the originating vreg definition is
199 // found that it is writing to the tracked location.
201 for (size_t i = 0; i < ins->numDefs(); i++) {
202 LDefinition *def = ins->getDef(i);
203 if (def->policy() == LDefinition::PASSTHROUGH)
204 continue;
205 if (info.outputs[i].virtualRegister() == vreg) {
206 JS_ASSERT(*def->output() == alloc);
208 // Found the original definition, done scanning.
209 return true;
210 } else {
211 JS_ASSERT(*def->output() != alloc);
212 }
213 }
215 for (size_t i = 0; i < ins->numTemps(); i++) {
216 LDefinition *temp = ins->getTemp(i);
217 if (!temp->isBogusTemp())
218 JS_ASSERT(*temp->output() != alloc);
219 }
221 if (ins->safepoint()) {
222 if (!checkSafepointAllocation(ins, vreg, alloc, populateSafepoints))
223 return false;
224 }
225 }
227 // Phis are effectless, but change the vreg we are tracking. Check if there
228 // is one which produced this vreg. We need to follow back through the phi
229 // inputs as it is not guaranteed the register allocator filled in physical
230 // allocations for the inputs and outputs of the phis.
231 for (size_t i = 0; i < block->numPhis(); i++) {
232 const InstructionInfo &info = blocks[block->mir()->id()].phis[i];
233 LPhi *phi = block->getPhi(i);
234 if (info.outputs[0].virtualRegister() == vreg) {
235 for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
236 uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
237 LBlock *predecessor = graph.getBlock(block->mir()->getPredecessor(j)->id());
238 if (!addPredecessor(predecessor, newvreg, alloc))
239 return false;
240 }
241 return true;
242 }
243 }
245 // No phi which defined the vreg we are tracking, follow back through all
246 // predecessors with the existing vreg.
247 for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
248 LBlock *predecessor = graph.getBlock(block->mir()->getPredecessor(i)->id());
249 if (!addPredecessor(predecessor, vreg, alloc))
250 return false;
251 }
253 return true;
254 }
256 bool
257 AllocationIntegrityState::checkSafepointAllocation(LInstruction *ins,
258 uint32_t vreg, LAllocation alloc,
259 bool populateSafepoints)
260 {
261 LSafepoint *safepoint = ins->safepoint();
262 JS_ASSERT(safepoint);
264 if (ins->isCall() && alloc.isRegister())
265 return true;
267 if (alloc.isRegister()) {
268 AnyRegister reg = alloc.toRegister();
269 if (populateSafepoints)
270 safepoint->addLiveRegister(reg);
271 JS_ASSERT(safepoint->liveRegs().has(reg));
272 }
274 LDefinition::Type type = virtualRegisters[vreg]
275 ? virtualRegisters[vreg]->type()
276 : LDefinition::GENERAL;
278 switch (type) {
279 case LDefinition::OBJECT:
280 if (populateSafepoints) {
281 IonSpew(IonSpew_RegAlloc, "Safepoint object v%u i%u %s",
282 vreg, ins->id(), alloc.toString());
283 if (!safepoint->addGcPointer(alloc))
284 return false;
285 }
286 JS_ASSERT(safepoint->hasGcPointer(alloc));
287 break;
288 case LDefinition::SLOTS:
289 if (populateSafepoints) {
290 IonSpew(IonSpew_RegAlloc, "Safepoint slots v%u i%u %s",
291 vreg, ins->id(), alloc.toString());
292 if (!safepoint->addSlotsOrElementsPointer(alloc))
293 return false;
294 }
295 JS_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
296 break;
297 #ifdef JS_NUNBOX32
298 // Do not assert that safepoint information for nunbox types is complete,
299 // as if a vreg for a value's components are copied in multiple places
300 // then the safepoint information may not reflect all copies. All copies
301 // of payloads must be reflected, however, for generational GC.
302 case LDefinition::TYPE:
303 if (populateSafepoints) {
304 IonSpew(IonSpew_RegAlloc, "Safepoint type v%u i%u %s",
305 vreg, ins->id(), alloc.toString());
306 if (!safepoint->addNunboxType(vreg, alloc))
307 return false;
308 }
309 break;
310 case LDefinition::PAYLOAD:
311 if (populateSafepoints) {
312 IonSpew(IonSpew_RegAlloc, "Safepoint payload v%u i%u %s",
313 vreg, ins->id(), alloc.toString());
314 if (!safepoint->addNunboxPayload(vreg, alloc))
315 return false;
316 }
317 JS_ASSERT(safepoint->hasNunboxPayload(alloc));
318 break;
319 #else
320 case LDefinition::BOX:
321 if (populateSafepoints) {
322 IonSpew(IonSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
323 vreg, ins->id(), alloc.toString());
324 if (!safepoint->addBoxedValue(alloc))
325 return false;
326 }
327 JS_ASSERT(safepoint->hasBoxedValue(alloc));
328 break;
329 #endif
330 default:
331 break;
332 }
334 return true;
335 }
337 bool
338 AllocationIntegrityState::addPredecessor(LBlock *block, uint32_t vreg, LAllocation alloc)
339 {
340 // There is no need to reanalyze if we have already seen this predecessor.
341 // We share the seen allocations across analysis of each use, as there will
342 // likely be common ground between different uses of the same vreg.
343 IntegrityItem item;
344 item.block = block;
345 item.vreg = vreg;
346 item.alloc = alloc;
347 item.index = seen.count();
349 IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
350 if (p)
351 return true;
352 if (!seen.add(p, item))
353 return false;
355 return worklist.append(item);
356 }
358 void
359 AllocationIntegrityState::dump()
360 {
361 #ifdef DEBUG
362 fprintf(stderr, "Register Allocation:\n");
364 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
365 LBlock *block = graph.getBlock(blockIndex);
366 MBasicBlock *mir = block->mir();
368 fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
369 for (size_t i = 0; i < mir->numSuccessors(); i++)
370 fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
371 fprintf(stderr, "\n");
373 for (size_t i = 0; i < block->numPhis(); i++) {
374 const InstructionInfo &info = blocks[blockIndex].phis[i];
375 LPhi *phi = block->getPhi(i);
376 CodePosition output(phi->id(), CodePosition::OUTPUT);
378 // Don't print the inputOf for phi nodes, since it's never used.
379 fprintf(stderr, "[,%u Phi [def v%u %s] <-",
380 output.pos(),
381 info.outputs[0].virtualRegister(),
382 phi->getDef(0)->output()->toString());
383 for (size_t j = 0; j < phi->numOperands(); j++)
384 fprintf(stderr, " %s", info.inputs[j].toString());
385 fprintf(stderr, "]\n");
386 }
388 for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
389 LInstruction *ins = *iter;
390 const InstructionInfo &info = instructions[ins->id()];
392 CodePosition input(ins->id(), CodePosition::INPUT);
393 CodePosition output(ins->id(), CodePosition::OUTPUT);
395 fprintf(stderr, "[");
396 if (input != CodePosition::MIN)
397 fprintf(stderr, "%u,%u ", input.pos(), output.pos());
398 fprintf(stderr, "%s]", ins->opName());
400 if (ins->isMoveGroup()) {
401 LMoveGroup *group = ins->toMoveGroup();
402 for (int i = group->numMoves() - 1; i >= 0; i--) {
403 // Use two printfs, as LAllocation::toString is not reentant.
404 fprintf(stderr, " [%s", group->getMove(i).from()->toString());
405 fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
406 }
407 fprintf(stderr, "\n");
408 continue;
409 }
411 for (size_t i = 0; i < ins->numTemps(); i++) {
412 LDefinition *temp = ins->getTemp(i);
413 if (!temp->isBogusTemp())
414 fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(),
415 temp->output()->toString());
416 }
418 for (size_t i = 0; i < ins->numDefs(); i++) {
419 LDefinition *def = ins->getDef(i);
420 fprintf(stderr, " [def v%u %s]", info.outputs[i].virtualRegister(),
421 def->output()->toString());
422 }
424 size_t index = 0;
425 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
426 fprintf(stderr, " [use %s", info.inputs[index++].toString());
427 fprintf(stderr, " %s]", alloc->toString());
428 }
430 fprintf(stderr, "\n");
431 }
432 }
434 fprintf(stderr, "\nIntermediate Allocations:\n\n");
436 // Print discovered allocations at the ends of blocks, in the order they
437 // were discovered.
439 Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
440 seenOrdered.appendN(IntegrityItem(), seen.count());
442 for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
443 IntegrityItem item = iter.front();
444 seenOrdered[item.index] = item;
445 }
447 for (size_t i = 0; i < seenOrdered.length(); i++) {
448 IntegrityItem item = seenOrdered[i];
449 fprintf(stderr, "block %u reg v%u alloc %s\n",
450 item.block->mir()->id(), item.vreg, item.alloc.toString());
451 }
453 fprintf(stderr, "\n");
454 #endif
455 }
457 const CodePosition CodePosition::MAX(UINT_MAX);
458 const CodePosition CodePosition::MIN(0);
460 bool
461 RegisterAllocator::init()
462 {
463 if (!insData.init(mir, graph.numInstructions()))
464 return false;
466 for (size_t i = 0; i < graph.numBlocks(); i++) {
467 LBlock *block = graph.getBlock(i);
468 for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
469 insData[*ins].init(*ins, block);
470 for (size_t j = 0; j < block->numPhis(); j++) {
471 LPhi *phi = block->getPhi(j);
472 insData[phi].init(phi, block);
473 }
474 }
476 return true;
477 }
479 LMoveGroup *
480 RegisterAllocator::getInputMoveGroup(uint32_t ins)
481 {
482 InstructionData *data = &insData[ins];
483 JS_ASSERT(!data->ins()->isPhi());
484 JS_ASSERT(!data->ins()->isLabel());
486 if (data->inputMoves())
487 return data->inputMoves();
489 LMoveGroup *moves = LMoveGroup::New(alloc());
490 data->setInputMoves(moves);
491 data->block()->insertBefore(data->ins(), moves);
493 return moves;
494 }
496 LMoveGroup *
497 RegisterAllocator::getMoveGroupAfter(uint32_t ins)
498 {
499 InstructionData *data = &insData[ins];
500 JS_ASSERT(!data->ins()->isPhi());
502 if (data->movesAfter())
503 return data->movesAfter();
505 LMoveGroup *moves = LMoveGroup::New(alloc());
506 data->setMovesAfter(moves);
508 if (data->ins()->isLabel())
509 data->block()->insertAfter(data->block()->getEntryMoveGroup(alloc()), moves);
510 else
511 data->block()->insertAfter(data->ins(), moves);
512 return moves;
513 }