Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef jit_BacktrackingAllocator_h |
michael@0 | 8 | #define jit_BacktrackingAllocator_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/Array.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "ds/PriorityQueue.h" |
michael@0 | 13 | #include "ds/SplayTree.h" |
michael@0 | 14 | #include "jit/LiveRangeAllocator.h" |
michael@0 | 15 | |
michael@0 | 16 | // Backtracking priority queue based register allocator based on that described |
michael@0 | 17 | // in the following blog post: |
michael@0 | 18 | // |
michael@0 | 19 | // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html |
michael@0 | 20 | |
michael@0 | 21 | namespace js { |
michael@0 | 22 | namespace jit { |
michael@0 | 23 | |
michael@0 | 24 | // Information about a group of registers. Registers may be grouped together |
michael@0 | 25 | // when (a) all of their lifetimes are disjoint, (b) they are of the same type |
michael@0 | 26 | // (double / non-double) and (c) it is desirable that they have the same |
michael@0 | 27 | // allocation. |
michael@0 | 28 | struct VirtualRegisterGroup : public TempObject |
michael@0 | 29 | { |
michael@0 | 30 | // All virtual registers in the group. |
michael@0 | 31 | Vector<uint32_t, 2, IonAllocPolicy> registers; |
michael@0 | 32 | |
michael@0 | 33 | // Desired physical register to use for registers in the group. |
michael@0 | 34 | LAllocation allocation; |
michael@0 | 35 | |
michael@0 | 36 | // Spill location to be shared by registers in the group. |
michael@0 | 37 | LAllocation spill; |
michael@0 | 38 | |
michael@0 | 39 | explicit VirtualRegisterGroup(TempAllocator &alloc) |
michael@0 | 40 | : registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY)) |
michael@0 | 41 | {} |
michael@0 | 42 | |
michael@0 | 43 | uint32_t canonicalReg() { |
michael@0 | 44 | uint32_t minimum = registers[0]; |
michael@0 | 45 | for (size_t i = 1; i < registers.length(); i++) |
michael@0 | 46 | minimum = Min(minimum, registers[i]); |
michael@0 | 47 | return minimum; |
michael@0 | 48 | } |
michael@0 | 49 | }; |
michael@0 | 50 | |
michael@0 | 51 | class BacktrackingVirtualRegister : public VirtualRegister |
michael@0 | 52 | { |
michael@0 | 53 | // If this register's definition is MUST_REUSE_INPUT, whether a copy must |
michael@0 | 54 | // be introduced before the definition that relaxes the policy. |
michael@0 | 55 | bool mustCopyInput_; |
michael@0 | 56 | |
michael@0 | 57 | // Spill location to use for this register. |
michael@0 | 58 | LAllocation canonicalSpill_; |
michael@0 | 59 | |
michael@0 | 60 | // Code position above which the canonical spill cannot be used; such |
michael@0 | 61 | // intervals may overlap other registers in the same group. |
michael@0 | 62 | CodePosition canonicalSpillExclude_; |
michael@0 | 63 | |
michael@0 | 64 | // If this register is associated with a group of other registers, |
michael@0 | 65 | // information about the group. This structure is shared between all |
michael@0 | 66 | // registers in the group. |
michael@0 | 67 | VirtualRegisterGroup *group_; |
michael@0 | 68 | |
michael@0 | 69 | public: |
michael@0 | 70 | explicit BacktrackingVirtualRegister(TempAllocator &alloc) |
michael@0 | 71 | : VirtualRegister(alloc) |
michael@0 | 72 | {} |
michael@0 | 73 | void setMustCopyInput() { |
michael@0 | 74 | mustCopyInput_ = true; |
michael@0 | 75 | } |
michael@0 | 76 | bool mustCopyInput() { |
michael@0 | 77 | return mustCopyInput_; |
michael@0 | 78 | } |
michael@0 | 79 | |
michael@0 | 80 | void setCanonicalSpill(LAllocation alloc) { |
michael@0 | 81 | JS_ASSERT(!alloc.isUse()); |
michael@0 | 82 | canonicalSpill_ = alloc; |
michael@0 | 83 | } |
michael@0 | 84 | const LAllocation *canonicalSpill() const { |
michael@0 | 85 | return canonicalSpill_.isUse() ? nullptr : &canonicalSpill_; |
michael@0 | 86 | } |
michael@0 | 87 | |
michael@0 | 88 | void setCanonicalSpillExclude(CodePosition pos) { |
michael@0 | 89 | canonicalSpillExclude_ = pos; |
michael@0 | 90 | } |
michael@0 | 91 | bool hasCanonicalSpillExclude() const { |
michael@0 | 92 | return canonicalSpillExclude_.pos() != 0; |
michael@0 | 93 | } |
michael@0 | 94 | CodePosition canonicalSpillExclude() const { |
michael@0 | 95 | JS_ASSERT(hasCanonicalSpillExclude()); |
michael@0 | 96 | return canonicalSpillExclude_; |
michael@0 | 97 | } |
michael@0 | 98 | |
michael@0 | 99 | void setGroup(VirtualRegisterGroup *group) { |
michael@0 | 100 | group_ = group; |
michael@0 | 101 | } |
michael@0 | 102 | VirtualRegisterGroup *group() { |
michael@0 | 103 | return group_; |
michael@0 | 104 | } |
michael@0 | 105 | }; |
michael@0 | 106 | |
michael@0 | 107 | // A sequence of code positions, for tellings BacktrackingAllocator::splitAt |
michael@0 | 108 | // where to split. |
michael@0 | 109 | typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector; |
michael@0 | 110 | |
michael@0 | 111 | class BacktrackingAllocator |
michael@0 | 112 | : private LiveRangeAllocator<BacktrackingVirtualRegister, /* forLSRA = */ false> |
michael@0 | 113 | { |
michael@0 | 114 | // Priority queue element: either an interval or group of intervals and the |
michael@0 | 115 | // associated priority. |
michael@0 | 116 | struct QueueItem |
michael@0 | 117 | { |
michael@0 | 118 | LiveInterval *interval; |
michael@0 | 119 | VirtualRegisterGroup *group; |
michael@0 | 120 | |
michael@0 | 121 | QueueItem(LiveInterval *interval, size_t priority) |
michael@0 | 122 | : interval(interval), group(nullptr), priority_(priority) |
michael@0 | 123 | {} |
michael@0 | 124 | |
michael@0 | 125 | QueueItem(VirtualRegisterGroup *group, size_t priority) |
michael@0 | 126 | : interval(nullptr), group(group), priority_(priority) |
michael@0 | 127 | {} |
michael@0 | 128 | |
michael@0 | 129 | static size_t priority(const QueueItem &v) { |
michael@0 | 130 | return v.priority_; |
michael@0 | 131 | } |
michael@0 | 132 | |
michael@0 | 133 | private: |
michael@0 | 134 | size_t priority_; |
michael@0 | 135 | }; |
michael@0 | 136 | |
michael@0 | 137 | PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue; |
michael@0 | 138 | |
michael@0 | 139 | // A subrange over which a physical register is allocated. |
michael@0 | 140 | struct AllocatedRange { |
michael@0 | 141 | LiveInterval *interval; |
michael@0 | 142 | const LiveInterval::Range *range; |
michael@0 | 143 | |
michael@0 | 144 | AllocatedRange() |
michael@0 | 145 | : interval(nullptr), range(nullptr) |
michael@0 | 146 | {} |
michael@0 | 147 | |
michael@0 | 148 | AllocatedRange(LiveInterval *interval, const LiveInterval::Range *range) |
michael@0 | 149 | : interval(interval), range(range) |
michael@0 | 150 | {} |
michael@0 | 151 | |
michael@0 | 152 | static int compare(const AllocatedRange &v0, const AllocatedRange &v1) { |
michael@0 | 153 | // LiveInterval::Range includes 'from' but excludes 'to'. |
michael@0 | 154 | if (v0.range->to.pos() <= v1.range->from.pos()) |
michael@0 | 155 | return -1; |
michael@0 | 156 | if (v0.range->from.pos() >= v1.range->to.pos()) |
michael@0 | 157 | return 1; |
michael@0 | 158 | return 0; |
michael@0 | 159 | } |
michael@0 | 160 | }; |
michael@0 | 161 | |
michael@0 | 162 | typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet; |
michael@0 | 163 | |
michael@0 | 164 | // Each physical register is associated with the set of ranges over which |
michael@0 | 165 | // that register is currently allocated. |
michael@0 | 166 | struct PhysicalRegister { |
michael@0 | 167 | bool allocatable; |
michael@0 | 168 | AnyRegister reg; |
michael@0 | 169 | AllocatedRangeSet allocations; |
michael@0 | 170 | |
michael@0 | 171 | PhysicalRegister() : allocatable(false) {} |
michael@0 | 172 | }; |
michael@0 | 173 | mozilla::Array<PhysicalRegister, AnyRegister::Total> registers; |
michael@0 | 174 | |
michael@0 | 175 | // Ranges of code which are considered to be hot, for which good allocation |
michael@0 | 176 | // should be prioritized. |
michael@0 | 177 | AllocatedRangeSet hotcode; |
michael@0 | 178 | |
michael@0 | 179 | public: |
michael@0 | 180 | BacktrackingAllocator(MIRGenerator *mir, LIRGenerator *lir, LIRGraph &graph) |
michael@0 | 181 | : LiveRangeAllocator<BacktrackingVirtualRegister, /* forLSRA = */ false>(mir, lir, graph) |
michael@0 | 182 | { } |
michael@0 | 183 | |
michael@0 | 184 | bool go(); |
michael@0 | 185 | |
michael@0 | 186 | private: |
michael@0 | 187 | |
michael@0 | 188 | typedef Vector<LiveInterval *, 4, SystemAllocPolicy> LiveIntervalVector; |
michael@0 | 189 | |
michael@0 | 190 | bool init(); |
michael@0 | 191 | bool canAddToGroup(VirtualRegisterGroup *group, BacktrackingVirtualRegister *reg); |
michael@0 | 192 | bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1); |
michael@0 | 193 | bool tryGroupReusedRegister(uint32_t def, uint32_t use); |
michael@0 | 194 | bool groupAndQueueRegisters(); |
michael@0 | 195 | bool tryAllocateFixed(LiveInterval *interval, bool *success, bool *pfixed, LiveInterval **pconflicting); |
michael@0 | 196 | bool tryAllocateNonFixed(LiveInterval *interval, bool *success, bool *pfixed, LiveInterval **pconflicting); |
michael@0 | 197 | bool processInterval(LiveInterval *interval); |
michael@0 | 198 | bool processGroup(VirtualRegisterGroup *group); |
michael@0 | 199 | bool setIntervalRequirement(LiveInterval *interval); |
michael@0 | 200 | bool tryAllocateRegister(PhysicalRegister &r, LiveInterval *interval, |
michael@0 | 201 | bool *success, bool *pfixed, LiveInterval **pconflicting); |
michael@0 | 202 | bool tryAllocateGroupRegister(PhysicalRegister &r, VirtualRegisterGroup *group, |
michael@0 | 203 | bool *psuccess, bool *pfixed, LiveInterval **pconflicting); |
michael@0 | 204 | bool evictInterval(LiveInterval *interval); |
michael@0 | 205 | void distributeUses(LiveInterval *interval, const LiveIntervalVector &newIntervals); |
michael@0 | 206 | bool split(LiveInterval *interval, const LiveIntervalVector &newIntervals); |
michael@0 | 207 | bool requeueIntervals(const LiveIntervalVector &newIntervals); |
michael@0 | 208 | void spill(LiveInterval *interval); |
michael@0 | 209 | |
michael@0 | 210 | bool isReusedInput(LUse *use, LInstruction *ins, bool considerCopy); |
michael@0 | 211 | bool isRegisterUse(LUse *use, LInstruction *ins, bool considerCopy = false); |
michael@0 | 212 | bool isRegisterDefinition(LiveInterval *interval); |
michael@0 | 213 | bool addLiveInterval(LiveIntervalVector &intervals, uint32_t vreg, |
michael@0 | 214 | LiveInterval *spillInterval, |
michael@0 | 215 | CodePosition from, CodePosition to); |
michael@0 | 216 | |
michael@0 | 217 | bool resolveControlFlow(); |
michael@0 | 218 | bool reifyAllocations(); |
michael@0 | 219 | bool populateSafepoints(); |
michael@0 | 220 | |
michael@0 | 221 | void dumpRegisterGroups(); |
michael@0 | 222 | void dumpLiveness(); |
michael@0 | 223 | void dumpAllocations(); |
michael@0 | 224 | |
michael@0 | 225 | struct PrintLiveIntervalRange; |
michael@0 | 226 | |
michael@0 | 227 | bool minimalDef(const LiveInterval *interval, LInstruction *ins); |
michael@0 | 228 | bool minimalUse(const LiveInterval *interval, LInstruction *ins); |
michael@0 | 229 | bool minimalInterval(const LiveInterval *interval, bool *pfixed = nullptr); |
michael@0 | 230 | |
michael@0 | 231 | // Heuristic methods. |
michael@0 | 232 | |
michael@0 | 233 | size_t computePriority(const LiveInterval *interval); |
michael@0 | 234 | size_t computeSpillWeight(const LiveInterval *interval); |
michael@0 | 235 | |
michael@0 | 236 | size_t computePriority(const VirtualRegisterGroup *group); |
michael@0 | 237 | size_t computeSpillWeight(const VirtualRegisterGroup *group); |
michael@0 | 238 | |
michael@0 | 239 | bool chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict); |
michael@0 | 240 | |
michael@0 | 241 | bool splitAt(LiveInterval *interval, |
michael@0 | 242 | const SplitPositionVector &splitPositions); |
michael@0 | 243 | bool trySplitAcrossHotcode(LiveInterval *interval, bool *success); |
michael@0 | 244 | bool trySplitAfterLastRegisterUse(LiveInterval *interval, LiveInterval *conflict, bool *success); |
michael@0 | 245 | bool splitAtAllRegisterUses(LiveInterval *interval); |
michael@0 | 246 | bool splitAcrossCalls(LiveInterval *interval); |
michael@0 | 247 | }; |
michael@0 | 248 | |
michael@0 | 249 | } // namespace jit |
michael@0 | 250 | } // namespace js |
michael@0 | 251 | |
michael@0 | 252 | #endif /* jit_BacktrackingAllocator_h */ |