Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_BacktrackingAllocator_h
8 #define jit_BacktrackingAllocator_h
10 #include "mozilla/Array.h"
12 #include "ds/PriorityQueue.h"
13 #include "ds/SplayTree.h"
14 #include "jit/LiveRangeAllocator.h"
16 // Backtracking priority queue based register allocator based on that described
17 // in the following blog post:
18 //
19 // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
21 namespace js {
22 namespace jit {
24 // Information about a group of registers. Registers may be grouped together
25 // when (a) all of their lifetimes are disjoint, (b) they are of the same type
26 // (double / non-double) and (c) it is desirable that they have the same
27 // allocation.
28 struct VirtualRegisterGroup : public TempObject
29 {
30 // All virtual registers in the group.
31 Vector<uint32_t, 2, IonAllocPolicy> registers;
33 // Desired physical register to use for registers in the group.
34 LAllocation allocation;
36 // Spill location to be shared by registers in the group.
37 LAllocation spill;
39 explicit VirtualRegisterGroup(TempAllocator &alloc)
40 : registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
41 {}
43 uint32_t canonicalReg() {
44 uint32_t minimum = registers[0];
45 for (size_t i = 1; i < registers.length(); i++)
46 minimum = Min(minimum, registers[i]);
47 return minimum;
48 }
49 };
51 class BacktrackingVirtualRegister : public VirtualRegister
52 {
53 // If this register's definition is MUST_REUSE_INPUT, whether a copy must
54 // be introduced before the definition that relaxes the policy.
55 bool mustCopyInput_;
57 // Spill location to use for this register.
58 LAllocation canonicalSpill_;
60 // Code position above which the canonical spill cannot be used; such
61 // intervals may overlap other registers in the same group.
62 CodePosition canonicalSpillExclude_;
64 // If this register is associated with a group of other registers,
65 // information about the group. This structure is shared between all
66 // registers in the group.
67 VirtualRegisterGroup *group_;
69 public:
70 explicit BacktrackingVirtualRegister(TempAllocator &alloc)
71 : VirtualRegister(alloc)
72 {}
73 void setMustCopyInput() {
74 mustCopyInput_ = true;
75 }
76 bool mustCopyInput() {
77 return mustCopyInput_;
78 }
80 void setCanonicalSpill(LAllocation alloc) {
81 JS_ASSERT(!alloc.isUse());
82 canonicalSpill_ = alloc;
83 }
84 const LAllocation *canonicalSpill() const {
85 return canonicalSpill_.isUse() ? nullptr : &canonicalSpill_;
86 }
88 void setCanonicalSpillExclude(CodePosition pos) {
89 canonicalSpillExclude_ = pos;
90 }
91 bool hasCanonicalSpillExclude() const {
92 return canonicalSpillExclude_.pos() != 0;
93 }
94 CodePosition canonicalSpillExclude() const {
95 JS_ASSERT(hasCanonicalSpillExclude());
96 return canonicalSpillExclude_;
97 }
99 void setGroup(VirtualRegisterGroup *group) {
100 group_ = group;
101 }
102 VirtualRegisterGroup *group() {
103 return group_;
104 }
105 };
107 // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
108 // where to split.
109 typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
111 class BacktrackingAllocator
112 : private LiveRangeAllocator<BacktrackingVirtualRegister, /* forLSRA = */ false>
113 {
114 // Priority queue element: either an interval or group of intervals and the
115 // associated priority.
116 struct QueueItem
117 {
118 LiveInterval *interval;
119 VirtualRegisterGroup *group;
121 QueueItem(LiveInterval *interval, size_t priority)
122 : interval(interval), group(nullptr), priority_(priority)
123 {}
125 QueueItem(VirtualRegisterGroup *group, size_t priority)
126 : interval(nullptr), group(group), priority_(priority)
127 {}
129 static size_t priority(const QueueItem &v) {
130 return v.priority_;
131 }
133 private:
134 size_t priority_;
135 };
137 PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
139 // A subrange over which a physical register is allocated.
140 struct AllocatedRange {
141 LiveInterval *interval;
142 const LiveInterval::Range *range;
144 AllocatedRange()
145 : interval(nullptr), range(nullptr)
146 {}
148 AllocatedRange(LiveInterval *interval, const LiveInterval::Range *range)
149 : interval(interval), range(range)
150 {}
152 static int compare(const AllocatedRange &v0, const AllocatedRange &v1) {
153 // LiveInterval::Range includes 'from' but excludes 'to'.
154 if (v0.range->to.pos() <= v1.range->from.pos())
155 return -1;
156 if (v0.range->from.pos() >= v1.range->to.pos())
157 return 1;
158 return 0;
159 }
160 };
162 typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
164 // Each physical register is associated with the set of ranges over which
165 // that register is currently allocated.
166 struct PhysicalRegister {
167 bool allocatable;
168 AnyRegister reg;
169 AllocatedRangeSet allocations;
171 PhysicalRegister() : allocatable(false) {}
172 };
173 mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
175 // Ranges of code which are considered to be hot, for which good allocation
176 // should be prioritized.
177 AllocatedRangeSet hotcode;
179 public:
180 BacktrackingAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph)
181 : LiveRangeAllocator<BacktrackingVirtualRegister, /* forLSRA = */ false>(mir, lir, graph)
182 { }
184 bool go();
186 private:
188 typedef Vector<LiveInterval *, 4, SystemAllocPolicy> LiveIntervalVector;
190 bool init();
191 bool canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg);
192 bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
193 bool tryGroupReusedRegister(uint32_t def, uint32_t use);
194 bool groupAndQueueRegisters();
195 bool tryAllocateFixed(LiveInterval *interval, bool *success, bool *pfixed, LiveInterval **pconflicting);
196 bool tryAllocateNonFixed(LiveInterval *interval, bool *success, bool *pfixed, LiveInterval **pconflicting);
197 bool processInterval(LiveInterval *interval);
198 bool processGroup(VirtualRegisterGroup *group);
199 bool setIntervalRequirement(LiveInterval *interval);
200 bool tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval,
201 bool *success, bool *pfixed, LiveInterval **pconflicting);
202 bool tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group,
203 bool *psuccess, bool *pfixed, LiveInterval **pconflicting);
204 bool evictInterval(LiveInterval *interval);
205 void distributeUses(LiveInterval *interval, const LiveIntervalVector &newIntervals);
206 bool split(LiveInterval *interval, const LiveIntervalVector &newIntervals);
207 bool requeueIntervals(const LiveIntervalVector &newIntervals);
208 void spill(LiveInterval *interval);
210 bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy);
211 bool isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy = false);
212 bool isRegisterDefinition(LiveInterval *interval);
213 bool addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg,
214 LiveInterval *spillInterval,
215 CodePosition from, CodePosition to);
217 bool resolveControlFlow();
218 bool reifyAllocations();
219 bool populateSafepoints();
221 void dumpRegisterGroups();
222 void dumpLiveness();
223 void dumpAllocations();
225 struct PrintLiveIntervalRange;
227 bool minimalDef(const LiveInterval *interval, LInstruction *ins);
228 bool minimalUse(const LiveInterval *interval, LInstruction *ins);
229 bool minimalInterval(const LiveInterval *interval, bool *pfixed = nullptr);
231 // Heuristic methods.
233 size_t computePriority(const LiveInterval *interval);
234 size_t computeSpillWeight(const LiveInterval *interval);
236 size_t computePriority(const VirtualRegisterGroup *group);
237 size_t computeSpillWeight(const VirtualRegisterGroup *group);
239 bool chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict);
241 bool splitAt(LiveInterval *interval,
242 const SplitPositionVector &splitPositions);
243 bool trySplitAcrossHotcode(LiveInterval *interval, bool *success);
244 bool trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success);
245 bool splitAtAllRegisterUses(LiveInterval *interval);
246 bool splitAcrossCalls(LiveInterval *interval);
247 };
249 } // namespace jit
250 } // namespace js
252 #endif /* jit_BacktrackingAllocator_h */