Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/StupidAllocator.h"
9 #include "jstypes.h"
11 using namespace js;
12 using namespace js::jit;
14 static inline uint32_t
15 DefaultStackSlot(uint32_t vreg)
16 {
17 return vreg * sizeof(Value);
18 }
20 LAllocation *
21 StupidAllocator::stackLocation(uint32_t vreg)
22 {
23 LDefinition *def = virtualRegisters[vreg];
24 if (def->policy() == LDefinition::PRESET && def->output()->isArgument())
25 return def->output();
27 return new(alloc()) LStackSlot(DefaultStackSlot(vreg));
28 }
30 StupidAllocator::RegisterIndex
31 StupidAllocator::registerIndex(AnyRegister reg)
32 {
33 for (size_t i = 0; i < registerCount; i++) {
34 if (reg == registers[i].reg)
35 return i;
36 }
37 MOZ_ASSUME_UNREACHABLE("Bad register");
38 }
40 bool
41 StupidAllocator::init()
42 {
43 if (!RegisterAllocator::init())
44 return false;
46 if (!virtualRegisters.appendN((LDefinition *)nullptr, graph.numVirtualRegisters()))
47 return false;
49 for (size_t i = 0; i < graph.numBlocks(); i++) {
50 LBlock *block = graph.getBlock(i);
51 for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
52 for (size_t j = 0; j < ins->numDefs(); j++) {
53 LDefinition *def = ins->getDef(j);
54 if (def->policy() != LDefinition::PASSTHROUGH)
55 virtualRegisters[def->virtualRegister()] = def;
56 }
58 for (size_t j = 0; j < ins->numTemps(); j++) {
59 LDefinition *def = ins->getTemp(j);
60 if (def->isBogusTemp())
61 continue;
62 virtualRegisters[def->virtualRegister()] = def;
63 }
64 }
65 for (size_t j = 0; j < block->numPhis(); j++) {
66 LPhi *phi = block->getPhi(j);
67 LDefinition *def = phi->getDef(0);
68 uint32_t vreg = def->virtualRegister();
70 virtualRegisters[vreg] = def;
71 }
72 }
74 // Assign physical registers to the tracked allocation.
75 {
76 registerCount = 0;
77 RegisterSet remainingRegisters(allRegisters_);
78 while (!remainingRegisters.empty(/* float = */ false))
79 registers[registerCount++].reg = AnyRegister(remainingRegisters.takeGeneral());
80 while (!remainingRegisters.empty(/* float = */ true))
81 registers[registerCount++].reg = AnyRegister(remainingRegisters.takeFloat());
82 JS_ASSERT(registerCount <= MAX_REGISTERS);
83 }
85 return true;
86 }
88 bool
89 StupidAllocator::allocationRequiresRegister(const LAllocation *alloc, AnyRegister reg)
90 {
91 if (alloc->isRegister() && alloc->toRegister() == reg)
92 return true;
93 if (alloc->isUse()) {
94 const LUse *use = alloc->toUse();
95 if (use->policy() == LUse::FIXED) {
96 AnyRegister usedReg = GetFixedRegister(virtualRegisters[use->virtualRegister()], use);
97 if (usedReg == reg)
98 return true;
99 }
100 }
101 return false;
102 }
104 bool
105 StupidAllocator::registerIsReserved(LInstruction *ins, AnyRegister reg)
106 {
107 // Whether reg is already reserved for an input or output of ins.
108 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
109 if (allocationRequiresRegister(*alloc, reg))
110 return true;
111 }
112 for (size_t i = 0; i < ins->numTemps(); i++) {
113 if (allocationRequiresRegister(ins->getTemp(i)->output(), reg))
114 return true;
115 }
116 for (size_t i = 0; i < ins->numDefs(); i++) {
117 if (allocationRequiresRegister(ins->getDef(i)->output(), reg))
118 return true;
119 }
120 return false;
121 }
123 AnyRegister
124 StupidAllocator::ensureHasRegister(LInstruction *ins, uint32_t vreg)
125 {
126 // Ensure that vreg is held in a register before ins.
128 // Check if the virtual register is already held in a physical register.
129 RegisterIndex existing = findExistingRegister(vreg);
130 if (existing != UINT32_MAX) {
131 if (registerIsReserved(ins, registers[existing].reg)) {
132 evictRegister(ins, existing);
133 } else {
134 registers[existing].age = ins->id();
135 return registers[existing].reg;
136 }
137 }
139 RegisterIndex best = allocateRegister(ins, vreg);
140 loadRegister(ins, vreg, best, virtualRegisters[vreg]->type());
142 return registers[best].reg;
143 }
145 StupidAllocator::RegisterIndex
146 StupidAllocator::allocateRegister(LInstruction *ins, uint32_t vreg)
147 {
148 // Pick a register for vreg, evicting an existing register if necessary.
149 // Spill code will be placed before ins, and no existing allocated input
150 // for ins will be touched.
151 JS_ASSERT(ins);
153 LDefinition *def = virtualRegisters[vreg];
154 JS_ASSERT(def);
156 RegisterIndex best = UINT32_MAX;
158 for (size_t i = 0; i < registerCount; i++) {
159 AnyRegister reg = registers[i].reg;
161 if (reg.isFloat() != def->isFloatReg())
162 continue;
164 // Skip the register if it is in use for an allocated input or output.
165 if (registerIsReserved(ins, reg))
166 continue;
168 if (registers[i].vreg == MISSING_ALLOCATION ||
169 best == UINT32_MAX ||
170 registers[best].age > registers[i].age)
171 {
172 best = i;
173 }
174 }
176 evictRegister(ins, best);
177 return best;
178 }
180 void
181 StupidAllocator::syncRegister(LInstruction *ins, RegisterIndex index)
182 {
183 if (registers[index].dirty) {
184 LMoveGroup *input = getInputMoveGroup(ins->id());
185 LAllocation *source = new(alloc()) LAllocation(registers[index].reg);
187 uint32_t existing = registers[index].vreg;
188 LAllocation *dest = stackLocation(existing);
189 input->addAfter(source, dest, registers[index].type);
191 registers[index].dirty = false;
192 }
193 }
195 void
196 StupidAllocator::evictRegister(LInstruction *ins, RegisterIndex index)
197 {
198 syncRegister(ins, index);
199 registers[index].set(MISSING_ALLOCATION);
200 }
202 void
203 StupidAllocator::loadRegister(LInstruction *ins, uint32_t vreg, RegisterIndex index, LDefinition::Type type)
204 {
205 // Load a vreg from its stack location to a register.
206 LMoveGroup *input = getInputMoveGroup(ins->id());
207 LAllocation *source = stackLocation(vreg);
208 LAllocation *dest = new(alloc()) LAllocation(registers[index].reg);
209 input->addAfter(source, dest, type);
210 registers[index].set(vreg, ins);
211 registers[index].type = type;
212 }
214 StupidAllocator::RegisterIndex
215 StupidAllocator::findExistingRegister(uint32_t vreg)
216 {
217 for (size_t i = 0; i < registerCount; i++) {
218 if (registers[i].vreg == vreg)
219 return i;
220 }
221 return UINT32_MAX;
222 }
224 bool
225 StupidAllocator::go()
226 {
227 // This register allocator is intended to be as simple as possible, while
228 // still being complicated enough to share properties with more complicated
229 // allocators. Namely, physical registers may be used to carry virtual
230 // registers across LIR instructions, but not across basic blocks.
231 //
232 // This algorithm does not pay any attention to liveness. It is performed
233 // as a single forward pass through the basic blocks in the program. As
234 // virtual registers and temporaries are defined they are assigned physical
235 // registers, evicting existing allocations in an LRU fashion.
237 // For virtual registers not carried in a register, a canonical spill
238 // location is used. Each vreg has a different spill location; since we do
239 // not track liveness we cannot determine that two vregs have disjoint
240 // lifetimes. Thus, the maximum stack height is the number of vregs (scaled
241 // by two on 32 bit platforms to allow storing double values).
242 graph.setLocalSlotCount(DefaultStackSlot(graph.numVirtualRegisters()));
244 if (!init())
245 return false;
247 for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
248 LBlock *block = graph.getBlock(blockIndex);
249 JS_ASSERT(block->mir()->id() == blockIndex);
251 for (size_t i = 0; i < registerCount; i++)
252 registers[i].set(MISSING_ALLOCATION);
254 for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
255 LInstruction *ins = *iter;
257 if (ins == *block->rbegin())
258 syncForBlockEnd(block, ins);
260 allocateForInstruction(ins);
261 }
262 }
264 return true;
265 }
267 void
268 StupidAllocator::syncForBlockEnd(LBlock *block, LInstruction *ins)
269 {
270 // Sync any dirty registers, and update the synced state for phi nodes at
271 // each successor of a block. We cannot conflate the storage for phis with
272 // that of their inputs, as we cannot prove the live ranges of the phi and
273 // its input do not overlap. The values for the two may additionally be
274 // different, as the phi could be for the value of the input in a previous
275 // loop iteration.
277 for (size_t i = 0; i < registerCount; i++)
278 syncRegister(ins, i);
280 LMoveGroup *group = nullptr;
282 MBasicBlock *successor = block->mir()->successorWithPhis();
283 if (successor) {
284 uint32_t position = block->mir()->positionInPhiSuccessor();
285 LBlock *lirsuccessor = graph.getBlock(successor->id());
286 for (size_t i = 0; i < lirsuccessor->numPhis(); i++) {
287 LPhi *phi = lirsuccessor->getPhi(i);
289 uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister();
290 uint32_t destvreg = phi->getDef(0)->virtualRegister();
292 if (sourcevreg == destvreg)
293 continue;
295 LAllocation *source = stackLocation(sourcevreg);
296 LAllocation *dest = stackLocation(destvreg);
298 if (!group) {
299 // The moves we insert here need to happen simultaneously with
300 // each other, yet after any existing moves before the instruction.
301 LMoveGroup *input = getInputMoveGroup(ins->id());
302 if (input->numMoves() == 0) {
303 group = input;
304 } else {
305 group = LMoveGroup::New(alloc());
306 block->insertAfter(input, group);
307 }
308 }
310 group->add(source, dest, phi->getDef(0)->type());
311 }
312 }
313 }
315 void
316 StupidAllocator::allocateForInstruction(LInstruction *ins)
317 {
318 // Sync all registers before making a call.
319 if (ins->isCall()) {
320 for (size_t i = 0; i < registerCount; i++)
321 syncRegister(ins, i);
322 }
324 // Allocate for inputs which are required to be in registers.
325 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
326 if (!alloc->isUse())
327 continue;
328 LUse *use = alloc->toUse();
329 uint32_t vreg = use->virtualRegister();
330 if (use->policy() == LUse::REGISTER) {
331 AnyRegister reg = ensureHasRegister(ins, vreg);
332 alloc.replace(LAllocation(reg));
333 } else if (use->policy() == LUse::FIXED) {
334 AnyRegister reg = GetFixedRegister(virtualRegisters[vreg], use);
335 RegisterIndex index = registerIndex(reg);
336 if (registers[index].vreg != vreg) {
337 evictRegister(ins, index);
338 RegisterIndex existing = findExistingRegister(vreg);
339 if (existing != UINT32_MAX)
340 evictRegister(ins, existing);
341 loadRegister(ins, vreg, index, virtualRegisters[vreg]->type());
342 }
343 alloc.replace(LAllocation(reg));
344 } else {
345 // Inputs which are not required to be in a register are not
346 // allocated until after temps/definitions, as the latter may need
347 // to evict registers which hold these inputs.
348 }
349 }
351 // Find registers to hold all temporaries and outputs of the instruction.
352 for (size_t i = 0; i < ins->numTemps(); i++) {
353 LDefinition *def = ins->getTemp(i);
354 if (!def->isBogusTemp())
355 allocateForDefinition(ins, def);
356 }
357 for (size_t i = 0; i < ins->numDefs(); i++) {
358 LDefinition *def = ins->getDef(i);
359 if (def->policy() != LDefinition::PASSTHROUGH)
360 allocateForDefinition(ins, def);
361 }
363 // Allocate for remaining inputs which do not need to be in registers.
364 for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
365 if (!alloc->isUse())
366 continue;
367 LUse *use = alloc->toUse();
368 uint32_t vreg = use->virtualRegister();
369 JS_ASSERT(use->policy() != LUse::REGISTER && use->policy() != LUse::FIXED);
371 RegisterIndex index = findExistingRegister(vreg);
372 if (index == UINT32_MAX) {
373 LAllocation *stack = stackLocation(use->virtualRegister());
374 alloc.replace(*stack);
375 } else {
376 registers[index].age = ins->id();
377 alloc.replace(LAllocation(registers[index].reg));
378 }
379 }
381 // If this is a call, evict all registers except for those holding outputs.
382 if (ins->isCall()) {
383 for (size_t i = 0; i < registerCount; i++) {
384 if (!registers[i].dirty)
385 registers[i].set(MISSING_ALLOCATION);
386 }
387 }
388 }
390 void
391 StupidAllocator::allocateForDefinition(LInstruction *ins, LDefinition *def)
392 {
393 uint32_t vreg = def->virtualRegister();
395 CodePosition from;
396 if ((def->output()->isRegister() && def->policy() == LDefinition::PRESET) ||
397 def->policy() == LDefinition::MUST_REUSE_INPUT)
398 {
399 // Result will be in a specific register, spill any vreg held in
400 // that register before the instruction.
401 RegisterIndex index =
402 registerIndex(def->policy() == LDefinition::PRESET
403 ? def->output()->toRegister()
404 : ins->getOperand(def->getReusedInput())->toRegister());
405 evictRegister(ins, index);
406 registers[index].set(vreg, ins, true);
407 registers[index].type = virtualRegisters[vreg]->type();
408 def->setOutput(LAllocation(registers[index].reg));
409 } else if (def->policy() == LDefinition::PRESET) {
410 // The result must be a stack location.
411 def->setOutput(*stackLocation(vreg));
412 } else {
413 // Find a register to hold the result of the instruction.
414 RegisterIndex best = allocateRegister(ins, vreg);
415 registers[best].set(vreg, ins, true);
416 registers[best].type = virtualRegisters[vreg]->type();
417 def->setOutput(LAllocation(registers[best].reg));
418 }
419 }