Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/UnreachableCodeElimination.h"
9 #include "jit/AliasAnalysis.h"
10 #include "jit/IonAnalysis.h"
11 #include "jit/MIRGenerator.h"
12 #include "jit/ValueNumbering.h"
14 using namespace js;
15 using namespace jit;
17 bool
18 UnreachableCodeElimination::analyze()
19 {
20 // The goal of this routine is to eliminate code that is
21 // unreachable, either because there is no path from the entry
22 // block to the code, or because the path traverses a conditional
23 // branch where the condition is a constant (e.g., "if (false) {
24 // ... }"). The latter can either appear in the source form or
25 // arise due to optimizations.
26 //
27 // The stategy is straightforward. The pass begins with a
28 // depth-first search. We set a bit on each basic block that
29 // is visited. If a block terminates in a conditional branch
30 // predicated on a constant, we rewrite the block to an unconditional
31 // jump and do not visit the now irrelevant basic block.
32 //
33 // Once the initial DFS is complete, we do a second pass over the
34 // blocks to find those that were not reached. Those blocks are
35 // simply removed wholesale. We must also correct any phis that
36 // may be affected..
38 // Pass 1: Identify unreachable blocks (if any).
39 if (!prunePointlessBranchesAndMarkReachableBlocks())
40 return false;
42 return removeUnmarkedBlocksAndCleanup();
43 }
45 bool
46 UnreachableCodeElimination::removeUnmarkedBlocks(size_t marked)
47 {
48 marked_ = marked;
49 return removeUnmarkedBlocksAndCleanup();
50 }
52 bool
53 UnreachableCodeElimination::removeUnmarkedBlocksAndCleanup()
54 {
55 // Everything is reachable, no work required.
56 JS_ASSERT(marked_ <= graph_.numBlocks());
57 if (marked_ == graph_.numBlocks()) {
58 graph_.unmarkBlocks();
59 return true;
60 }
62 // Pass 2: Remove unmarked blocks (see analyze() above).
63 if (!removeUnmarkedBlocksAndClearDominators())
64 return false;
65 graph_.unmarkBlocks();
67 AssertGraphCoherency(graph_);
69 IonSpewPass("UCE-mid-point");
71 // Pass 3: Recompute dominators and tweak phis.
72 BuildDominatorTree(graph_);
73 if (redundantPhis_ && !EliminatePhis(mir_, graph_, ConservativeObservability))
74 return false;
76 // Pass 4: Rerun alias analysis
77 if (rerunAliasAnalysis_) {
78 AliasAnalysis analysis(mir_, graph_);
79 if (!analysis.analyze())
80 return false;
81 }
83 // Pass 5: It's important for optimizations to re-run GVN (and in
84 // turn alias analysis) after UCE if we eliminated branches.
85 if (rerunAliasAnalysis_ && mir_->optimizationInfo().gvnEnabled()) {
86 ValueNumberer gvn(mir_, graph_, mir_->optimizationInfo().gvnKind() == GVN_Optimistic);
87 if (!gvn.clear() || !gvn.analyze())
88 return false;
89 IonSpewPass("GVN-after-UCE");
90 AssertExtendedGraphCoherency(graph_);
92 if (mir_->shouldCancel("GVN-after-UCE"))
93 return false;
94 }
96 return true;
97 }
99 bool
100 UnreachableCodeElimination::enqueue(MBasicBlock *block, BlockList &list)
101 {
102 if (block->isMarked())
103 return true;
105 block->mark();
106 marked_++;
107 return list.append(block);
108 }
110 MBasicBlock *
111 UnreachableCodeElimination::optimizableSuccessor(MBasicBlock *block)
112 {
113 // If the last instruction in `block` is a test instruction of a
114 // constant value, returns the successor that the branch will
115 // always branch to at runtime. Otherwise, returns nullptr.
117 MControlInstruction *ins = block->lastIns();
118 if (!ins->isTest())
119 return nullptr;
121 MTest *testIns = ins->toTest();
122 MDefinition *v = testIns->getOperand(0);
123 if (!v->isConstant())
124 return nullptr;
126 BranchDirection bdir = v->toConstant()->valueToBoolean() ? TRUE_BRANCH : FALSE_BRANCH;
127 return testIns->branchSuccessor(bdir);
128 }
130 bool
131 UnreachableCodeElimination::prunePointlessBranchesAndMarkReachableBlocks()
132 {
133 BlockList worklist, optimizableBlocks;
135 // Process everything reachable from the start block, ignoring any
136 // OSR block.
137 if (!enqueue(graph_.entryBlock(), worklist))
138 return false;
139 while (!worklist.empty()) {
140 if (mir_->shouldCancel("Eliminate Unreachable Code"))
141 return false;
143 MBasicBlock *block = worklist.popCopy();
145 // If this block is a test on a constant operand, only enqueue
146 // the relevant successor. Also, remember the block for later.
147 if (MBasicBlock *succ = optimizableSuccessor(block)) {
148 if (!optimizableBlocks.append(block))
149 return false;
150 if (!enqueue(succ, worklist))
151 return false;
152 } else {
153 // Otherwise just visit all successors.
154 for (size_t i = 0; i < block->numSuccessors(); i++) {
155 MBasicBlock *succ = block->getSuccessor(i);
156 if (!enqueue(succ, worklist))
157 return false;
158 }
159 }
160 }
162 // Now, if there is an OSR block, check that all of its successors
163 // were reachable (bug 880377). If not, we are in danger of
164 // creating a CFG with two disjoint parts, so simply mark all
165 // blocks as reachable. This generally occurs when the TI info for
166 // stack types is incorrect or incomplete, due to operations that
167 // have not yet executed in baseline.
168 if (graph_.osrBlock()) {
169 MBasicBlock *osrBlock = graph_.osrBlock();
170 JS_ASSERT(!osrBlock->isMarked());
171 if (!enqueue(osrBlock, worklist))
172 return false;
173 for (size_t i = 0; i < osrBlock->numSuccessors(); i++) {
174 if (!osrBlock->getSuccessor(i)->isMarked()) {
175 // OSR block has an otherwise unreachable successor, abort.
176 for (MBasicBlockIterator iter(graph_.begin()); iter != graph_.end(); iter++)
177 iter->mark();
178 marked_ = graph_.numBlocks();
179 return true;
180 }
181 }
182 }
184 // Now that we know we will not abort due to OSR, go back and
185 // transform any tests on constant operands into gotos.
186 for (uint32_t i = 0; i < optimizableBlocks.length(); i++) {
187 MBasicBlock *block = optimizableBlocks[i];
188 MBasicBlock *succ = optimizableSuccessor(block);
189 JS_ASSERT(succ);
191 MGoto *gotoIns = MGoto::New(graph_.alloc(), succ);
192 block->discardLastIns();
193 block->end(gotoIns);
194 MBasicBlock *successorWithPhis = block->successorWithPhis();
195 if (successorWithPhis && successorWithPhis != succ)
196 block->setSuccessorWithPhis(nullptr, 0);
197 }
199 return true;
200 }
202 void
203 UnreachableCodeElimination::checkDependencyAndRemoveUsesFromUnmarkedBlocks(MDefinition *instr)
204 {
205 // When the instruction depends on removed block,
206 // alias analysis needs to get rerun to have the right dependency.
207 if (!disableAliasAnalysis_ && instr->dependency() && !instr->dependency()->block()->isMarked())
208 rerunAliasAnalysis_ = true;
210 for (MUseIterator iter(instr->usesBegin()); iter != instr->usesEnd(); ) {
211 if (!iter->consumer()->block()->isMarked()) {
212 instr->setUseRemovedUnchecked();
213 iter = instr->removeUse(iter);
214 } else {
215 iter++;
216 }
217 }
218 }
220 bool
221 UnreachableCodeElimination::removeUnmarkedBlocksAndClearDominators()
222 {
223 // Removes blocks that are not marked from the graph. For blocks
224 // that *are* marked, clears the mark and adjusts the id to its
225 // new value. Also adds blocks that are immediately reachable
226 // from an unmarked block to the frontier.
228 size_t id = marked_;
229 for (PostorderIterator iter(graph_.poBegin()); iter != graph_.poEnd();) {
230 if (mir_->shouldCancel("Eliminate Unreachable Code"))
231 return false;
233 MBasicBlock *block = *iter;
234 iter++;
236 // Unconditionally clear the dominators. It's somewhat complex to
237 // adjust the values and relatively fast to just recompute.
238 block->clearDominatorInfo();
240 if (block->isMarked()) {
241 block->setId(--id);
242 for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
243 checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
244 for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
245 checkDependencyAndRemoveUsesFromUnmarkedBlocks(*iter);
246 } else {
247 if (block->numPredecessors() > 1) {
248 // If this block had phis, then any reachable
249 // predecessors need to have the successorWithPhis
250 // flag cleared.
251 for (size_t i = 0; i < block->numPredecessors(); i++)
252 block->getPredecessor(i)->setSuccessorWithPhis(nullptr, 0);
253 }
255 if (block->isLoopBackedge()) {
256 // NB. We have to update the loop header if we
257 // eliminate the backedge. At first I thought this
258 // check would be insufficient, because it would be
259 // possible to have code like this:
260 //
261 // while (true) {
262 // ...;
263 // if (1 == 1) break;
264 // }
265 //
266 // in which the backedge is removed as part of
267 // rewriting the condition, but no actual blocks are
268 // removed. However, in all such cases, the backedge
269 // would be a critical edge and hence the critical
270 // edge block is being removed.
271 block->loopHeaderOfBackedge()->clearLoopHeader();
272 }
274 for (size_t i = 0, c = block->numSuccessors(); i < c; i++) {
275 MBasicBlock *succ = block->getSuccessor(i);
276 if (succ->isMarked()) {
277 // succ is on the frontier of blocks to be removed:
278 succ->removePredecessor(block);
280 if (!redundantPhis_) {
281 for (MPhiIterator iter(succ->phisBegin()); iter != succ->phisEnd(); iter++) {
282 if (iter->operandIfRedundant()) {
283 redundantPhis_ = true;
284 break;
285 }
286 }
287 }
288 }
289 }
291 graph_.removeBlock(block);
292 }
293 }
295 JS_ASSERT(id == 0);
297 return true;
298 }