Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/ParallelSafetyAnalysis.h"
9 #include "jit/Ion.h"
10 #include "jit/IonAnalysis.h"
11 #include "jit/IonSpewer.h"
12 #include "jit/MIR.h"
13 #include "jit/MIRGenerator.h"
14 #include "jit/MIRGraph.h"
15 #include "jit/UnreachableCodeElimination.h"
17 #include "jsinferinlines.h"
18 #include "jsobjinlines.h"
20 using namespace js;
21 using namespace jit;
23 using parallel::Spew;
24 using parallel::SpewMIR;
25 using parallel::SpewCompile;
27 #define SAFE_OP(op) \
28 virtual bool visit##op(M##op *prop) { return true; }
30 #define CUSTOM_OP(op) \
31 virtual bool visit##op(M##op *prop);
33 #define DROP_OP(op) \
34 virtual bool visit##op(M##op *ins) { \
35 MBasicBlock *block = ins->block(); \
36 block->discard(ins); \
37 return true; \
38 }
40 #define PERMIT(T) (1 << T)
42 #define PERMIT_INT32 (PERMIT(MIRType_Int32))
43 #define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double))
45 #define SPECIALIZED_OP(op, flags) \
46 virtual bool visit##op(M##op *ins) { \
47 return visitSpecializedInstruction(ins, ins->specialization(), flags); \
48 }
50 #define UNSAFE_OP(op) \
51 virtual bool visit##op(M##op *ins) { \
52 SpewMIR(ins, "Unsafe"); \
53 return markUnsafe(); \
54 }
56 #define WRITE_GUARDED_OP(op, obj) \
57 virtual bool visit##op(M##op *prop) { \
58 return insertWriteGuard(prop, prop->obj()); \
59 }
61 #define MAYBE_WRITE_GUARDED_OP(op, obj) \
62 virtual bool visit##op(M##op *prop) { \
63 if (prop->racy()) \
64 return true; \
65 return insertWriteGuard(prop, prop->obj()); \
66 }
68 class ParallelSafetyVisitor : public MInstructionVisitor
69 {
70 MIRGraph &graph_;
71 bool unsafe_;
72 MDefinition *cx_;
74 bool insertWriteGuard(MInstruction *writeInstruction, MDefinition *valueBeingWritten);
76 bool replaceWithNewPar(MInstruction *newInstruction, JSObject *templateObject);
77 bool replace(MInstruction *oldInstruction, MInstruction *replacementInstruction);
79 bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags);
81 // Intended for use in a visitXyz() instruction like "return
82 // markUnsafe()". Sets the unsafe flag and returns true (since
83 // this does not indicate an unrecoverable compilation failure).
84 bool markUnsafe() {
85 JS_ASSERT(!unsafe_);
86 unsafe_ = true;
87 return true;
88 }
90 TempAllocator &alloc() const {
91 return graph_.alloc();
92 }
94 public:
95 ParallelSafetyVisitor(MIRGraph &graph)
96 : graph_(graph),
97 unsafe_(false),
98 cx_(nullptr)
99 { }
101 void clearUnsafe() { unsafe_ = false; }
102 bool unsafe() { return unsafe_; }
103 MDefinition *ForkJoinContext() {
104 if (!cx_)
105 cx_ = graph_.forkJoinContext();
106 return cx_;
107 }
109 bool convertToBailout(MBasicBlock *block, MInstruction *ins);
111 // I am taking the policy of blacklisting everything that's not
112 // obviously safe for now. We can loosen as we need.
114 SAFE_OP(Constant)
115 UNSAFE_OP(CloneLiteral)
116 SAFE_OP(Parameter)
117 SAFE_OP(Callee)
118 SAFE_OP(TableSwitch)
119 SAFE_OP(Goto)
120 SAFE_OP(Test)
121 SAFE_OP(Compare)
122 SAFE_OP(Phi)
123 SAFE_OP(Beta)
124 UNSAFE_OP(OsrValue)
125 UNSAFE_OP(OsrScopeChain)
126 UNSAFE_OP(OsrReturnValue)
127 UNSAFE_OP(OsrArgumentsObject)
128 UNSAFE_OP(ReturnFromCtor)
129 CUSTOM_OP(CheckOverRecursed)
130 UNSAFE_OP(DefVar)
131 UNSAFE_OP(DefFun)
132 UNSAFE_OP(CreateThis)
133 CUSTOM_OP(CreateThisWithTemplate)
134 UNSAFE_OP(CreateThisWithProto)
135 UNSAFE_OP(CreateArgumentsObject)
136 UNSAFE_OP(GetArgumentsObjectArg)
137 UNSAFE_OP(SetArgumentsObjectArg)
138 UNSAFE_OP(ComputeThis)
139 UNSAFE_OP(LoadArrowThis)
140 CUSTOM_OP(Call)
141 UNSAFE_OP(ApplyArgs)
142 UNSAFE_OP(ArraySplice)
143 UNSAFE_OP(Bail)
144 UNSAFE_OP(AssertFloat32)
145 UNSAFE_OP(GetDynamicName)
146 UNSAFE_OP(FilterArgumentsOrEval)
147 UNSAFE_OP(CallDirectEval)
148 SAFE_OP(BitNot)
149 SAFE_OP(TypeOf)
150 UNSAFE_OP(ToId)
151 SAFE_OP(BitAnd)
152 SAFE_OP(BitOr)
153 SAFE_OP(BitXor)
154 SAFE_OP(Lsh)
155 SAFE_OP(Rsh)
156 SAFE_OP(Ursh)
157 SPECIALIZED_OP(MinMax, PERMIT_NUMERIC)
158 SAFE_OP(Abs)
159 SAFE_OP(Sqrt)
160 UNSAFE_OP(Atan2)
161 UNSAFE_OP(Hypot)
162 CUSTOM_OP(MathFunction)
163 SPECIALIZED_OP(Add, PERMIT_NUMERIC)
164 SPECIALIZED_OP(Sub, PERMIT_NUMERIC)
165 SPECIALIZED_OP(Mul, PERMIT_NUMERIC)
166 SPECIALIZED_OP(Div, PERMIT_NUMERIC)
167 SPECIALIZED_OP(Mod, PERMIT_NUMERIC)
168 CUSTOM_OP(Concat)
169 SAFE_OP(ConcatPar)
170 UNSAFE_OP(CharCodeAt)
171 UNSAFE_OP(FromCharCode)
172 UNSAFE_OP(StringSplit)
173 SAFE_OP(Return)
174 CUSTOM_OP(Throw)
175 SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc.
176 SAFE_OP(Unbox)
177 SAFE_OP(GuardObject)
178 SAFE_OP(ToDouble)
179 SAFE_OP(ToFloat32)
180 SAFE_OP(ToInt32)
181 SAFE_OP(TruncateToInt32)
182 SAFE_OP(MaybeToDoubleElement)
183 CUSTOM_OP(ToString)
184 SAFE_OP(NewSlots)
185 CUSTOM_OP(NewArray)
186 CUSTOM_OP(NewObject)
187 CUSTOM_OP(NewCallObject)
188 CUSTOM_OP(NewRunOnceCallObject)
189 CUSTOM_OP(NewDerivedTypedObject)
190 UNSAFE_OP(InitElem)
191 UNSAFE_OP(InitElemGetterSetter)
192 UNSAFE_OP(MutateProto)
193 UNSAFE_OP(InitProp)
194 UNSAFE_OP(InitPropGetterSetter)
195 SAFE_OP(Start)
196 UNSAFE_OP(OsrEntry)
197 SAFE_OP(Nop)
198 UNSAFE_OP(RegExp)
199 CUSTOM_OP(Lambda)
200 UNSAFE_OP(LambdaArrow)
201 UNSAFE_OP(ImplicitThis)
202 SAFE_OP(Slots)
203 SAFE_OP(Elements)
204 SAFE_OP(ConstantElements)
205 SAFE_OP(LoadSlot)
206 WRITE_GUARDED_OP(StoreSlot, slots)
207 SAFE_OP(FunctionEnvironment) // just a load of func env ptr
208 SAFE_OP(FilterTypeSet)
209 SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
210 SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
211 UNSAFE_OP(PostWriteBarrier)
212 SAFE_OP(GetPropertyCache)
213 SAFE_OP(GetPropertyPolymorphic)
214 UNSAFE_OP(SetPropertyPolymorphic)
215 SAFE_OP(GetElementCache)
216 WRITE_GUARDED_OP(SetElementCache, object)
217 UNSAFE_OP(BindNameCache)
218 SAFE_OP(GuardShape)
219 SAFE_OP(GuardObjectType)
220 SAFE_OP(GuardObjectIdentity)
221 SAFE_OP(GuardClass)
222 SAFE_OP(AssertRange)
223 SAFE_OP(ArrayLength)
224 WRITE_GUARDED_OP(SetArrayLength, elements)
225 SAFE_OP(TypedArrayLength)
226 SAFE_OP(TypedArrayElements)
227 SAFE_OP(TypedObjectElements)
228 SAFE_OP(SetTypedObjectOffset)
229 SAFE_OP(InitializedLength)
230 WRITE_GUARDED_OP(SetInitializedLength, elements)
231 SAFE_OP(Not)
232 SAFE_OP(NeuterCheck)
233 SAFE_OP(BoundsCheck)
234 SAFE_OP(BoundsCheckLower)
235 SAFE_OP(LoadElement)
236 SAFE_OP(LoadElementHole)
237 MAYBE_WRITE_GUARDED_OP(StoreElement, elements)
238 WRITE_GUARDED_OP(StoreElementHole, elements)
239 UNSAFE_OP(ArrayPopShift)
240 UNSAFE_OP(ArrayPush)
241 SAFE_OP(LoadTypedArrayElement)
242 SAFE_OP(LoadTypedArrayElementHole)
243 SAFE_OP(LoadTypedArrayElementStatic)
244 MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements)
245 WRITE_GUARDED_OP(StoreTypedArrayElementHole, elements)
246 UNSAFE_OP(StoreTypedArrayElementStatic)
247 UNSAFE_OP(ClampToUint8)
248 SAFE_OP(LoadFixedSlot)
249 WRITE_GUARDED_OP(StoreFixedSlot, object)
250 UNSAFE_OP(CallGetProperty)
251 UNSAFE_OP(GetNameCache)
252 UNSAFE_OP(CallGetIntrinsicValue)
253 UNSAFE_OP(CallsiteCloneCache)
254 UNSAFE_OP(CallGetElement)
255 WRITE_GUARDED_OP(CallSetElement, object)
256 UNSAFE_OP(CallInitElementArray)
257 WRITE_GUARDED_OP(CallSetProperty, object)
258 UNSAFE_OP(DeleteProperty)
259 UNSAFE_OP(DeleteElement)
260 WRITE_GUARDED_OP(SetPropertyCache, object)
261 UNSAFE_OP(IteratorStart)
262 UNSAFE_OP(IteratorNext)
263 UNSAFE_OP(IteratorMore)
264 UNSAFE_OP(IteratorEnd)
265 SAFE_OP(StringLength)
266 SAFE_OP(ArgumentsLength)
267 SAFE_OP(GetFrameArgument)
268 UNSAFE_OP(SetFrameArgument)
269 UNSAFE_OP(RunOncePrologue)
270 CUSTOM_OP(Rest)
271 SAFE_OP(RestPar)
272 SAFE_OP(Floor)
273 SAFE_OP(Round)
274 UNSAFE_OP(InstanceOf)
275 CUSTOM_OP(InterruptCheck)
276 SAFE_OP(ForkJoinContext)
277 SAFE_OP(ForkJoinGetSlice)
278 SAFE_OP(NewPar)
279 SAFE_OP(NewDenseArrayPar)
280 SAFE_OP(NewCallObjectPar)
281 SAFE_OP(LambdaPar)
282 SAFE_OP(AbortPar)
283 UNSAFE_OP(ArrayConcat)
284 UNSAFE_OP(GetDOMProperty)
285 UNSAFE_OP(GetDOMMember)
286 UNSAFE_OP(SetDOMProperty)
287 UNSAFE_OP(NewStringObject)
288 UNSAFE_OP(Random)
289 SAFE_OP(Pow)
290 SAFE_OP(PowHalf)
291 UNSAFE_OP(RegExpTest)
292 UNSAFE_OP(RegExpExec)
293 UNSAFE_OP(RegExpReplace)
294 UNSAFE_OP(StringReplace)
295 UNSAFE_OP(CallInstanceOf)
296 UNSAFE_OP(ProfilerStackOp)
297 UNSAFE_OP(GuardString)
298 UNSAFE_OP(NewDeclEnvObject)
299 UNSAFE_OP(In)
300 UNSAFE_OP(InArray)
301 SAFE_OP(GuardThreadExclusive)
302 SAFE_OP(InterruptCheckPar)
303 SAFE_OP(CheckOverRecursedPar)
304 SAFE_OP(FunctionDispatch)
305 SAFE_OP(TypeObjectDispatch)
306 SAFE_OP(IsCallable)
307 SAFE_OP(HaveSameClass)
308 SAFE_OP(HasClass)
309 UNSAFE_OP(EffectiveAddress)
310 UNSAFE_OP(AsmJSUnsignedToDouble)
311 UNSAFE_OP(AsmJSUnsignedToFloat32)
312 UNSAFE_OP(AsmJSNeg)
313 UNSAFE_OP(AsmJSLoadHeap)
314 UNSAFE_OP(AsmJSStoreHeap)
315 UNSAFE_OP(AsmJSLoadGlobalVar)
316 UNSAFE_OP(AsmJSStoreGlobalVar)
317 UNSAFE_OP(AsmJSLoadFuncPtr)
318 UNSAFE_OP(AsmJSLoadFFIFunc)
319 UNSAFE_OP(AsmJSReturn)
320 UNSAFE_OP(AsmJSVoidReturn)
321 UNSAFE_OP(AsmJSPassStackArg)
322 UNSAFE_OP(AsmJSParameter)
323 UNSAFE_OP(AsmJSCall)
324 DROP_OP(RecompileCheck)
326 // It looks like this could easily be made safe:
327 UNSAFE_OP(ConvertElementsToDoubles)
328 };
330 bool
331 ParallelSafetyAnalysis::analyze()
332 {
333 // Walk the basic blocks in a DFS. When we encounter a block with an
334 // unsafe instruction, then we know that this block will bailout when
335 // executed. Therefore, we replace the block.
336 //
337 // We don't need a worklist, though, because the graph is sorted
338 // in RPO. Therefore, we just use the marked flags to tell us
339 // when we visited some predecessor of the current block.
340 ParallelSafetyVisitor visitor(graph_);
341 graph_.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR.
342 uint32_t marked = 0;
343 for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
344 if (mir_->shouldCancel("ParallelSafetyAnalysis"))
345 return false;
347 if (block->isMarked()) {
348 // Iterate through and transform the instructions. Stop
349 // if we encounter an inherently unsafe operation, in
350 // which case we will transform this block into a bailout
351 // block.
352 MInstruction *instr = nullptr;
353 for (MInstructionIterator ins(block->begin());
354 ins != block->end() && !visitor.unsafe();)
355 {
356 if (mir_->shouldCancel("ParallelSafetyAnalysis"))
357 return false;
359 // We may be removing or replacing the current
360 // instruction, so advance `ins` now. Remember the
361 // last instr. we looked at for use later if it should
362 // prove unsafe.
363 instr = *ins++;
365 if (!instr->accept(&visitor)) {
366 SpewMIR(instr, "Unaccepted");
367 return false;
368 }
369 }
371 if (!visitor.unsafe()) {
372 // Count the number of reachable blocks.
373 marked++;
375 // Block consists of only safe instructions. Visit its successors.
376 for (uint32_t i = 0; i < block->numSuccessors(); i++)
377 block->getSuccessor(i)->mark();
378 } else {
379 // Block contains an unsafe instruction. That means that once
380 // we enter this block, we are guaranteed to bailout.
382 // If this is the entry block, then there is no point
383 // in even trying to execute this function as it will
384 // always bailout.
385 if (*block == graph_.entryBlock()) {
386 Spew(SpewCompile, "Entry block contains unsafe MIR");
387 return false;
388 }
390 // Otherwise, create a replacement that will.
391 if (!visitor.convertToBailout(*block, instr))
392 return false;
394 JS_ASSERT(!block->isMarked());
395 }
396 }
397 }
399 Spew(SpewCompile, "Safe");
400 IonSpewPass("ParallelSafetyAnalysis");
402 UnreachableCodeElimination uce(mir_, graph_);
403 if (!uce.removeUnmarkedBlocks(marked))
404 return false;
405 IonSpewPass("UCEAfterParallelSafetyAnalysis");
406 AssertExtendedGraphCoherency(graph_);
408 if (!removeResumePointOperands())
409 return false;
410 IonSpewPass("RemoveResumePointOperands");
411 AssertExtendedGraphCoherency(graph_);
413 return true;
414 }
416 bool
417 ParallelSafetyAnalysis::removeResumePointOperands()
418 {
419 // In parallel exec mode, nothing is effectful, therefore we do
420 // not need to reconstruct interpreter state and can simply
421 // bailout by returning a special code. Ideally we'd either
422 // remove the unused resume points or else never generate them in
423 // the first place, but I encountered various assertions and
424 // crashes attempting to do that, so for the time being I simply
425 // replace their operands with undefined. This prevents them from
426 // interfering with DCE and other optimizations. It is also *necessary*
427 // to handle cases like this:
428 //
429 // foo(a, b, c.bar())
430 //
431 // where `foo` was deemed to be an unsafe function to call. This
432 // is because without neutering the ResumePoints, they would still
433 // refer to the MPassArg nodes generated for the call to foo().
434 // But the call to foo() is dead and has been removed, leading to
435 // an inconsistent IR and assertions at codegen time.
437 MConstant *udef = nullptr;
438 for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
439 if (udef)
440 replaceOperandsOnResumePoint(block->entryResumePoint(), udef);
442 for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) {
443 if (ins->isStart()) {
444 JS_ASSERT(udef == nullptr);
445 udef = MConstant::New(graph_.alloc(), UndefinedValue());
446 block->insertAfter(*ins, udef);
447 } else if (udef) {
448 if (MResumePoint *resumePoint = ins->resumePoint())
449 replaceOperandsOnResumePoint(resumePoint, udef);
450 }
451 }
452 }
453 return true;
454 }
456 void
457 ParallelSafetyAnalysis::replaceOperandsOnResumePoint(MResumePoint *resumePoint,
458 MDefinition *withDef)
459 {
460 for (size_t i = 0, e = resumePoint->numOperands(); i < e; i++)
461 resumePoint->replaceOperand(i, withDef);
462 }
464 bool
465 ParallelSafetyVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins)
466 {
467 JS_ASSERT(unsafe()); // `block` must have contained unsafe items
468 JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here
470 // Clear the unsafe flag for subsequent blocks.
471 clearUnsafe();
473 // This block is no longer reachable.
474 block->unmark();
476 // Create a bailout block for each predecessor. In principle, we
477 // only need one bailout block--in fact, only one per graph! But I
478 // found this approach easier to implement given the design of the
479 // MIR Graph construction routines. Besides, most often `block`
480 // has only one predecessor. Also, using multiple blocks helps to
481 // keep the PC information more accurate (though replacing `block`
482 // with exactly one bailout would be just as good).
483 for (size_t i = 0; i < block->numPredecessors(); i++) {
484 MBasicBlock *pred = block->getPredecessor(i);
486 // We only care about incoming edges from reachable predecessors.
487 if (!pred->isMarked())
488 continue;
490 // create bailout block to insert on this edge
491 MBasicBlock *bailBlock = MBasicBlock::NewAbortPar(graph_, block->info(), pred,
492 block->pc(),
493 block->entryResumePoint());
494 if (!bailBlock)
495 return false;
497 // if `block` had phis, we are replacing it with `bailBlock` which does not
498 if (pred->successorWithPhis() == block)
499 pred->setSuccessorWithPhis(nullptr, 0);
501 // redirect the predecessor to the bailout block
502 uint32_t succIdx = pred->getSuccessorIndex(block);
503 pred->replaceSuccessor(succIdx, bailBlock);
505 // Insert the bailout block after `block` in the execution
506 // order. This should satisfy the RPO requirements and
507 // moreover ensures that we will visit this block in our outer
508 // walk, thus allowing us to keep the count of marked blocks
509 // accurate.
510 graph_.insertBlockAfter(block, bailBlock);
511 bailBlock->mark();
512 }
514 return true;
515 }
517 /////////////////////////////////////////////////////////////////////////////
518 // Memory allocation
519 //
520 // Simple memory allocation opcodes---those which ultimately compile
521 // down to a (possibly inlined) invocation of NewGCThing()---are
522 // replaced with MNewPar, which is supplied with the thread context.
523 // These allocations will take place using per-helper-thread arenas.
525 bool
526 ParallelSafetyVisitor::visitCreateThisWithTemplate(MCreateThisWithTemplate *ins)
527 {
528 return replaceWithNewPar(ins, ins->templateObject());
529 }
531 bool
532 ParallelSafetyVisitor::visitNewCallObject(MNewCallObject *ins)
533 {
534 replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins));
535 return true;
536 }
538 bool
539 ParallelSafetyVisitor::visitNewRunOnceCallObject(MNewRunOnceCallObject *ins)
540 {
541 replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins));
542 return true;
543 }
545 bool
546 ParallelSafetyVisitor::visitLambda(MLambda *ins)
547 {
548 if (ins->info().singletonType || ins->info().useNewTypeForClone) {
549 // slow path: bail on parallel execution.
550 return markUnsafe();
551 }
553 // fast path: replace with LambdaPar op
554 replace(ins, MLambdaPar::New(alloc(), ForkJoinContext(), ins));
555 return true;
556 }
558 bool
559 ParallelSafetyVisitor::visitNewObject(MNewObject *newInstruction)
560 {
561 if (newInstruction->shouldUseVM()) {
562 SpewMIR(newInstruction, "should use VM");
563 return markUnsafe();
564 }
566 return replaceWithNewPar(newInstruction, newInstruction->templateObject());
567 }
569 bool
570 ParallelSafetyVisitor::visitNewArray(MNewArray *newInstruction)
571 {
572 if (newInstruction->shouldUseVM()) {
573 SpewMIR(newInstruction, "should use VM");
574 return markUnsafe();
575 }
577 return replaceWithNewPar(newInstruction, newInstruction->templateObject());
578 }
580 bool
581 ParallelSafetyVisitor::visitNewDerivedTypedObject(MNewDerivedTypedObject *ins)
582 {
583 // FIXME(Bug 984090) -- There should really be a parallel-safe
584 // version of NewDerivedTypedObject. However, until that is
585 // implemented, let's just ignore those with 0 uses, since they
586 // will be stripped out by DCE later.
587 if (ins->useCount() == 0)
588 return true;
590 SpewMIR(ins, "visitNewDerivedTypedObject");
591 return markUnsafe();
592 }
594 bool
595 ParallelSafetyVisitor::visitRest(MRest *ins)
596 {
597 return replace(ins, MRestPar::New(alloc(), ForkJoinContext(), ins));
598 }
600 bool
601 ParallelSafetyVisitor::visitMathFunction(MMathFunction *ins)
602 {
603 return replace(ins, MMathFunction::New(alloc(), ins->input(), ins->function(), nullptr));
604 }
606 bool
607 ParallelSafetyVisitor::visitConcat(MConcat *ins)
608 {
609 return replace(ins, MConcatPar::New(alloc(), ForkJoinContext(), ins));
610 }
612 bool
613 ParallelSafetyVisitor::visitToString(MToString *ins)
614 {
615 MIRType inputType = ins->input()->type();
616 if (inputType != MIRType_Int32 && inputType != MIRType_Double)
617 return markUnsafe();
618 return true;
619 }
621 bool
622 ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction,
623 JSObject *templateObject)
624 {
625 replace(newInstruction, MNewPar::New(alloc(), ForkJoinContext(), templateObject));
626 return true;
627 }
629 bool
630 ParallelSafetyVisitor::replace(MInstruction *oldInstruction,
631 MInstruction *replacementInstruction)
632 {
633 MBasicBlock *block = oldInstruction->block();
634 block->insertBefore(oldInstruction, replacementInstruction);
635 oldInstruction->replaceAllUsesWith(replacementInstruction);
636 block->discard(oldInstruction);
637 return true;
638 }
640 /////////////////////////////////////////////////////////////////////////////
641 // Write Guards
642 //
643 // We only want to permit writes to locally guarded objects.
644 // Furthermore, we want to avoid PICs and other non-thread-safe things
645 // (though perhaps we should support PICs at some point). If we
646 // cannot determine the origin of an object, we can insert a write
647 // guard which will check whether the object was allocated from the
648 // per-thread-arena or not.
650 bool
651 ParallelSafetyVisitor::insertWriteGuard(MInstruction *writeInstruction,
652 MDefinition *valueBeingWritten)
653 {
654 // Many of the write operations do not take the JS object
655 // but rather something derived from it, such as the elements.
656 // So we need to identify the JS object:
657 MDefinition *object;
658 switch (valueBeingWritten->type()) {
659 case MIRType_Object:
660 object = valueBeingWritten;
661 break;
663 case MIRType_Slots:
664 switch (valueBeingWritten->op()) {
665 case MDefinition::Op_Slots:
666 object = valueBeingWritten->toSlots()->object();
667 break;
669 case MDefinition::Op_NewSlots:
670 // Values produced by new slots will ALWAYS be
671 // thread-local.
672 return true;
674 default:
675 SpewMIR(writeInstruction, "cannot insert write guard for %s",
676 valueBeingWritten->opName());
677 return markUnsafe();
678 }
679 break;
681 case MIRType_Elements:
682 switch (valueBeingWritten->op()) {
683 case MDefinition::Op_Elements:
684 object = valueBeingWritten->toElements()->object();
685 break;
687 case MDefinition::Op_TypedArrayElements:
688 object = valueBeingWritten->toTypedArrayElements()->object();
689 break;
691 case MDefinition::Op_TypedObjectElements:
692 object = valueBeingWritten->toTypedObjectElements()->object();
693 break;
695 default:
696 SpewMIR(writeInstruction, "cannot insert write guard for %s",
697 valueBeingWritten->opName());
698 return markUnsafe();
699 }
700 break;
702 default:
703 SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d",
704 valueBeingWritten->type());
705 return markUnsafe();
706 }
708 if (object->isUnbox())
709 object = object->toUnbox()->input();
711 switch (object->op()) {
712 case MDefinition::Op_NewPar:
713 // MNewPar will always be creating something thread-local, omit the guard
714 SpewMIR(writeInstruction, "write to NewPar prop does not require guard");
715 return true;
716 default:
717 break;
718 }
720 MBasicBlock *block = writeInstruction->block();
721 MGuardThreadExclusive *writeGuard =
722 MGuardThreadExclusive::New(alloc(), ForkJoinContext(), object);
723 block->insertBefore(writeInstruction, writeGuard);
724 writeGuard->adjustInputs(alloc(), writeGuard);
725 return true;
726 }
728 /////////////////////////////////////////////////////////////////////////////
729 // Calls
730 //
731 // We only support calls to interpreted functions that that have already been
732 // Ion compiled. If a function has no IonScript, we bail out.
734 bool
735 ParallelSafetyVisitor::visitCall(MCall *ins)
736 {
737 // DOM? Scary.
738 if (ins->isCallDOMNative()) {
739 SpewMIR(ins, "call to dom function");
740 return markUnsafe();
741 }
743 JSFunction *target = ins->getSingleTarget();
744 if (target) {
745 // Non-parallel native? Scary
746 if (target->isNative() && !target->hasParallelNative()) {
747 SpewMIR(ins, "call to non-parallel native function");
748 return markUnsafe();
749 }
750 return true;
751 }
753 if (ins->isConstructing()) {
754 SpewMIR(ins, "call to unknown constructor");
755 return markUnsafe();
756 }
758 return true;
759 }
761 /////////////////////////////////////////////////////////////////////////////
762 // Stack limit, interrupts
763 //
764 // In sequential Ion code, the stack limit is stored in the JSRuntime.
765 // We store it in the thread context. We therefore need a separate
766 // instruction to access it, one parameterized by the thread context.
767 // Similar considerations apply to checking for interrupts.
769 bool
770 ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
771 {
772 return replace(ins, MCheckOverRecursedPar::New(alloc(), ForkJoinContext()));
773 }
775 bool
776 ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins)
777 {
778 return replace(ins, MInterruptCheckPar::New(alloc(), ForkJoinContext()));
779 }
781 /////////////////////////////////////////////////////////////////////////////
782 // Specialized ops
783 //
784 // Some ops, like +, can be specialized to ints/doubles. Anything
785 // else is terrifying.
786 //
787 // TODO---Eventually, we should probably permit arbitrary + but bail
788 // if the operands are not both integers/floats.
790 bool
791 ParallelSafetyVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec,
792 uint32_t flags)
793 {
794 uint32_t flag = 1 << spec;
795 if (flags & flag)
796 return true;
798 SpewMIR(ins, "specialized to unacceptable type %d", spec);
799 return markUnsafe();
800 }
802 /////////////////////////////////////////////////////////////////////////////
803 // Throw
805 bool
806 ParallelSafetyVisitor::visitThrow(MThrow *thr)
807 {
808 MBasicBlock *block = thr->block();
809 JS_ASSERT(block->lastIns() == thr);
810 block->discardLastIns();
811 MAbortPar *bailout = MAbortPar::New(alloc());
812 if (!bailout)
813 return false;
814 block->end(bailout);
815 return true;
816 }
818 ///////////////////////////////////////////////////////////////////////////
819 // Callee extraction
820 //
821 // See comments in header file.
823 static bool
824 GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc,
825 types::TemporaryTypeSet *calleeTypes, CallTargetVector &targets);
827 static bool
828 AddCallTarget(HandleScript script, CallTargetVector &targets);
830 bool
831 jit::AddPossibleCallees(JSContext *cx, MIRGraph &graph, CallTargetVector &targets)
832 {
833 for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
834 for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++)
835 {
836 if (!ins->isCall())
837 continue;
839 MCall *callIns = ins->toCall();
841 RootedFunction target(cx, callIns->getSingleTarget());
842 if (target) {
843 JS_ASSERT_IF(!target->isInterpreted(), target->hasParallelNative());
845 if (target->isInterpreted()) {
846 RootedScript script(cx, target->getOrCreateScript(cx));
847 if (!script || !AddCallTarget(script, targets))
848 return false;
849 }
851 continue;
852 }
854 types::TemporaryTypeSet *calleeTypes = callIns->getFunction()->resultTypeSet();
855 RootedScript script(cx, callIns->block()->info().script());
856 if (!GetPossibleCallees(cx,
857 script,
858 callIns->resumePoint()->pc(),
859 calleeTypes,
860 targets))
861 return false;
862 }
863 }
865 return true;
866 }
868 static bool
869 GetPossibleCallees(JSContext *cx,
870 HandleScript script,
871 jsbytecode *pc,
872 types::TemporaryTypeSet *calleeTypes,
873 CallTargetVector &targets)
874 {
875 if (!calleeTypes || calleeTypes->baseFlags() != 0)
876 return true;
878 unsigned objCount = calleeTypes->getObjectCount();
880 if (objCount == 0)
881 return true;
883 RootedFunction rootedFun(cx);
884 RootedScript rootedScript(cx);
885 for (unsigned i = 0; i < objCount; i++) {
886 JSObject *obj = calleeTypes->getSingleObject(i);
887 if (obj && obj->is<JSFunction>()) {
888 rootedFun = &obj->as<JSFunction>();
889 } else {
890 types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
891 if (!typeObj)
892 continue;
893 rootedFun = typeObj->interpretedFunction;
894 if (!rootedFun)
895 continue;
896 }
898 if (!rootedFun->isInterpreted())
899 continue;
901 rootedScript = rootedFun->getOrCreateScript(cx);
902 if (!rootedScript)
903 return false;
905 if (rootedScript->shouldCloneAtCallsite()) {
906 rootedFun = CloneFunctionAtCallsite(cx, rootedFun, script, pc);
907 if (!rootedFun)
908 return false;
909 rootedScript = rootedFun->nonLazyScript();
910 }
912 // check if this call target is already known
913 if (!AddCallTarget(rootedScript, targets))
914 return false;
915 }
917 return true;
918 }
920 static bool
921 AddCallTarget(HandleScript script, CallTargetVector &targets)
922 {
923 for (size_t i = 0; i < targets.length(); i++) {
924 if (targets[i] == script)
925 return true;
926 }
928 if (!targets.append(script))
929 return false;
931 return true;
932 }