michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/ParallelSafetyAnalysis.h" michael@0: michael@0: #include "jit/Ion.h" michael@0: #include "jit/IonAnalysis.h" michael@0: #include "jit/IonSpewer.h" michael@0: #include "jit/MIR.h" michael@0: #include "jit/MIRGenerator.h" michael@0: #include "jit/MIRGraph.h" michael@0: #include "jit/UnreachableCodeElimination.h" michael@0: michael@0: #include "jsinferinlines.h" michael@0: #include "jsobjinlines.h" michael@0: michael@0: using namespace js; michael@0: using namespace jit; michael@0: michael@0: using parallel::Spew; michael@0: using parallel::SpewMIR; michael@0: using parallel::SpewCompile; michael@0: michael@0: #define SAFE_OP(op) \ michael@0: virtual bool visit##op(M##op *prop) { return true; } michael@0: michael@0: #define CUSTOM_OP(op) \ michael@0: virtual bool visit##op(M##op *prop); michael@0: michael@0: #define DROP_OP(op) \ michael@0: virtual bool visit##op(M##op *ins) { \ michael@0: MBasicBlock *block = ins->block(); \ michael@0: block->discard(ins); \ michael@0: return true; \ michael@0: } michael@0: michael@0: #define PERMIT(T) (1 << T) michael@0: michael@0: #define PERMIT_INT32 (PERMIT(MIRType_Int32)) michael@0: #define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double)) michael@0: michael@0: #define SPECIALIZED_OP(op, flags) \ michael@0: virtual bool visit##op(M##op *ins) { \ michael@0: return visitSpecializedInstruction(ins, ins->specialization(), flags); \ michael@0: } michael@0: michael@0: #define UNSAFE_OP(op) \ michael@0: virtual bool visit##op(M##op *ins) { \ michael@0: SpewMIR(ins, "Unsafe"); \ michael@0: return markUnsafe(); \ michael@0: } michael@0: michael@0: #define WRITE_GUARDED_OP(op, obj) \ michael@0: virtual bool visit##op(M##op *prop) { \ michael@0: return insertWriteGuard(prop, prop->obj()); \ michael@0: } michael@0: michael@0: #define MAYBE_WRITE_GUARDED_OP(op, obj) \ michael@0: virtual bool visit##op(M##op *prop) { \ michael@0: if (prop->racy()) \ michael@0: return true; \ michael@0: return insertWriteGuard(prop, prop->obj()); \ michael@0: } michael@0: michael@0: class ParallelSafetyVisitor : public MInstructionVisitor michael@0: { michael@0: MIRGraph &graph_; michael@0: bool unsafe_; michael@0: MDefinition *cx_; michael@0: michael@0: bool insertWriteGuard(MInstruction *writeInstruction, MDefinition *valueBeingWritten); michael@0: michael@0: bool replaceWithNewPar(MInstruction *newInstruction, JSObject *templateObject); michael@0: bool replace(MInstruction *oldInstruction, MInstruction *replacementInstruction); michael@0: michael@0: bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags); michael@0: michael@0: // Intended for use in a visitXyz() instruction like "return michael@0: // markUnsafe()". Sets the unsafe flag and returns true (since michael@0: // this does not indicate an unrecoverable compilation failure). michael@0: bool markUnsafe() { michael@0: JS_ASSERT(!unsafe_); michael@0: unsafe_ = true; michael@0: return true; michael@0: } michael@0: michael@0: TempAllocator &alloc() const { michael@0: return graph_.alloc(); michael@0: } michael@0: michael@0: public: michael@0: ParallelSafetyVisitor(MIRGraph &graph) michael@0: : graph_(graph), michael@0: unsafe_(false), michael@0: cx_(nullptr) michael@0: { } michael@0: michael@0: void clearUnsafe() { unsafe_ = false; } michael@0: bool unsafe() { return unsafe_; } michael@0: MDefinition *ForkJoinContext() { michael@0: if (!cx_) michael@0: cx_ = graph_.forkJoinContext(); michael@0: return cx_; michael@0: } michael@0: michael@0: bool convertToBailout(MBasicBlock *block, MInstruction *ins); michael@0: michael@0: // I am taking the policy of blacklisting everything that's not michael@0: // obviously safe for now. We can loosen as we need. michael@0: michael@0: SAFE_OP(Constant) michael@0: UNSAFE_OP(CloneLiteral) michael@0: SAFE_OP(Parameter) michael@0: SAFE_OP(Callee) michael@0: SAFE_OP(TableSwitch) michael@0: SAFE_OP(Goto) michael@0: SAFE_OP(Test) michael@0: SAFE_OP(Compare) michael@0: SAFE_OP(Phi) michael@0: SAFE_OP(Beta) michael@0: UNSAFE_OP(OsrValue) michael@0: UNSAFE_OP(OsrScopeChain) michael@0: UNSAFE_OP(OsrReturnValue) michael@0: UNSAFE_OP(OsrArgumentsObject) michael@0: UNSAFE_OP(ReturnFromCtor) michael@0: CUSTOM_OP(CheckOverRecursed) michael@0: UNSAFE_OP(DefVar) michael@0: UNSAFE_OP(DefFun) michael@0: UNSAFE_OP(CreateThis) michael@0: CUSTOM_OP(CreateThisWithTemplate) michael@0: UNSAFE_OP(CreateThisWithProto) michael@0: UNSAFE_OP(CreateArgumentsObject) michael@0: UNSAFE_OP(GetArgumentsObjectArg) michael@0: UNSAFE_OP(SetArgumentsObjectArg) michael@0: UNSAFE_OP(ComputeThis) michael@0: UNSAFE_OP(LoadArrowThis) michael@0: CUSTOM_OP(Call) michael@0: UNSAFE_OP(ApplyArgs) michael@0: UNSAFE_OP(ArraySplice) michael@0: UNSAFE_OP(Bail) michael@0: UNSAFE_OP(AssertFloat32) michael@0: UNSAFE_OP(GetDynamicName) michael@0: UNSAFE_OP(FilterArgumentsOrEval) michael@0: UNSAFE_OP(CallDirectEval) michael@0: SAFE_OP(BitNot) michael@0: SAFE_OP(TypeOf) michael@0: UNSAFE_OP(ToId) michael@0: SAFE_OP(BitAnd) michael@0: SAFE_OP(BitOr) michael@0: SAFE_OP(BitXor) michael@0: SAFE_OP(Lsh) michael@0: SAFE_OP(Rsh) michael@0: SAFE_OP(Ursh) michael@0: SPECIALIZED_OP(MinMax, PERMIT_NUMERIC) michael@0: SAFE_OP(Abs) michael@0: SAFE_OP(Sqrt) michael@0: UNSAFE_OP(Atan2) michael@0: UNSAFE_OP(Hypot) michael@0: CUSTOM_OP(MathFunction) michael@0: SPECIALIZED_OP(Add, PERMIT_NUMERIC) michael@0: SPECIALIZED_OP(Sub, PERMIT_NUMERIC) michael@0: SPECIALIZED_OP(Mul, PERMIT_NUMERIC) michael@0: SPECIALIZED_OP(Div, PERMIT_NUMERIC) michael@0: SPECIALIZED_OP(Mod, PERMIT_NUMERIC) michael@0: CUSTOM_OP(Concat) michael@0: SAFE_OP(ConcatPar) michael@0: UNSAFE_OP(CharCodeAt) michael@0: UNSAFE_OP(FromCharCode) michael@0: UNSAFE_OP(StringSplit) michael@0: SAFE_OP(Return) michael@0: CUSTOM_OP(Throw) michael@0: SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc. michael@0: SAFE_OP(Unbox) michael@0: SAFE_OP(GuardObject) michael@0: SAFE_OP(ToDouble) michael@0: SAFE_OP(ToFloat32) michael@0: SAFE_OP(ToInt32) michael@0: SAFE_OP(TruncateToInt32) michael@0: SAFE_OP(MaybeToDoubleElement) michael@0: CUSTOM_OP(ToString) michael@0: SAFE_OP(NewSlots) michael@0: CUSTOM_OP(NewArray) michael@0: CUSTOM_OP(NewObject) michael@0: CUSTOM_OP(NewCallObject) michael@0: CUSTOM_OP(NewRunOnceCallObject) michael@0: CUSTOM_OP(NewDerivedTypedObject) michael@0: UNSAFE_OP(InitElem) michael@0: UNSAFE_OP(InitElemGetterSetter) michael@0: UNSAFE_OP(MutateProto) michael@0: UNSAFE_OP(InitProp) michael@0: UNSAFE_OP(InitPropGetterSetter) michael@0: SAFE_OP(Start) michael@0: UNSAFE_OP(OsrEntry) michael@0: SAFE_OP(Nop) michael@0: UNSAFE_OP(RegExp) michael@0: CUSTOM_OP(Lambda) michael@0: UNSAFE_OP(LambdaArrow) michael@0: UNSAFE_OP(ImplicitThis) michael@0: SAFE_OP(Slots) michael@0: SAFE_OP(Elements) michael@0: SAFE_OP(ConstantElements) michael@0: SAFE_OP(LoadSlot) michael@0: WRITE_GUARDED_OP(StoreSlot, slots) michael@0: SAFE_OP(FunctionEnvironment) // just a load of func env ptr michael@0: SAFE_OP(FilterTypeSet) michael@0: SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us michael@0: SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us michael@0: UNSAFE_OP(PostWriteBarrier) michael@0: SAFE_OP(GetPropertyCache) michael@0: SAFE_OP(GetPropertyPolymorphic) michael@0: UNSAFE_OP(SetPropertyPolymorphic) michael@0: SAFE_OP(GetElementCache) michael@0: WRITE_GUARDED_OP(SetElementCache, object) michael@0: UNSAFE_OP(BindNameCache) michael@0: SAFE_OP(GuardShape) michael@0: SAFE_OP(GuardObjectType) michael@0: SAFE_OP(GuardObjectIdentity) michael@0: SAFE_OP(GuardClass) michael@0: SAFE_OP(AssertRange) michael@0: SAFE_OP(ArrayLength) michael@0: WRITE_GUARDED_OP(SetArrayLength, elements) michael@0: SAFE_OP(TypedArrayLength) michael@0: SAFE_OP(TypedArrayElements) michael@0: SAFE_OP(TypedObjectElements) michael@0: SAFE_OP(SetTypedObjectOffset) michael@0: SAFE_OP(InitializedLength) michael@0: WRITE_GUARDED_OP(SetInitializedLength, elements) michael@0: SAFE_OP(Not) michael@0: SAFE_OP(NeuterCheck) michael@0: SAFE_OP(BoundsCheck) michael@0: SAFE_OP(BoundsCheckLower) michael@0: SAFE_OP(LoadElement) michael@0: SAFE_OP(LoadElementHole) michael@0: MAYBE_WRITE_GUARDED_OP(StoreElement, elements) michael@0: WRITE_GUARDED_OP(StoreElementHole, elements) michael@0: UNSAFE_OP(ArrayPopShift) michael@0: UNSAFE_OP(ArrayPush) michael@0: SAFE_OP(LoadTypedArrayElement) michael@0: SAFE_OP(LoadTypedArrayElementHole) michael@0: SAFE_OP(LoadTypedArrayElementStatic) michael@0: MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements) michael@0: WRITE_GUARDED_OP(StoreTypedArrayElementHole, elements) michael@0: UNSAFE_OP(StoreTypedArrayElementStatic) michael@0: UNSAFE_OP(ClampToUint8) michael@0: SAFE_OP(LoadFixedSlot) michael@0: WRITE_GUARDED_OP(StoreFixedSlot, object) michael@0: UNSAFE_OP(CallGetProperty) michael@0: UNSAFE_OP(GetNameCache) michael@0: UNSAFE_OP(CallGetIntrinsicValue) michael@0: UNSAFE_OP(CallsiteCloneCache) michael@0: UNSAFE_OP(CallGetElement) michael@0: WRITE_GUARDED_OP(CallSetElement, object) michael@0: UNSAFE_OP(CallInitElementArray) michael@0: WRITE_GUARDED_OP(CallSetProperty, object) michael@0: UNSAFE_OP(DeleteProperty) michael@0: UNSAFE_OP(DeleteElement) michael@0: WRITE_GUARDED_OP(SetPropertyCache, object) michael@0: UNSAFE_OP(IteratorStart) michael@0: UNSAFE_OP(IteratorNext) michael@0: UNSAFE_OP(IteratorMore) michael@0: UNSAFE_OP(IteratorEnd) michael@0: SAFE_OP(StringLength) michael@0: SAFE_OP(ArgumentsLength) michael@0: SAFE_OP(GetFrameArgument) michael@0: UNSAFE_OP(SetFrameArgument) michael@0: UNSAFE_OP(RunOncePrologue) michael@0: CUSTOM_OP(Rest) michael@0: SAFE_OP(RestPar) michael@0: SAFE_OP(Floor) michael@0: SAFE_OP(Round) michael@0: UNSAFE_OP(InstanceOf) michael@0: CUSTOM_OP(InterruptCheck) michael@0: SAFE_OP(ForkJoinContext) michael@0: SAFE_OP(ForkJoinGetSlice) michael@0: SAFE_OP(NewPar) michael@0: SAFE_OP(NewDenseArrayPar) michael@0: SAFE_OP(NewCallObjectPar) michael@0: SAFE_OP(LambdaPar) michael@0: SAFE_OP(AbortPar) michael@0: UNSAFE_OP(ArrayConcat) michael@0: UNSAFE_OP(GetDOMProperty) michael@0: UNSAFE_OP(GetDOMMember) michael@0: UNSAFE_OP(SetDOMProperty) michael@0: UNSAFE_OP(NewStringObject) michael@0: UNSAFE_OP(Random) michael@0: SAFE_OP(Pow) michael@0: SAFE_OP(PowHalf) michael@0: UNSAFE_OP(RegExpTest) michael@0: UNSAFE_OP(RegExpExec) michael@0: UNSAFE_OP(RegExpReplace) michael@0: UNSAFE_OP(StringReplace) michael@0: UNSAFE_OP(CallInstanceOf) michael@0: UNSAFE_OP(ProfilerStackOp) michael@0: UNSAFE_OP(GuardString) michael@0: UNSAFE_OP(NewDeclEnvObject) michael@0: UNSAFE_OP(In) michael@0: UNSAFE_OP(InArray) michael@0: SAFE_OP(GuardThreadExclusive) michael@0: SAFE_OP(InterruptCheckPar) michael@0: SAFE_OP(CheckOverRecursedPar) michael@0: SAFE_OP(FunctionDispatch) michael@0: SAFE_OP(TypeObjectDispatch) michael@0: SAFE_OP(IsCallable) michael@0: SAFE_OP(HaveSameClass) michael@0: SAFE_OP(HasClass) michael@0: UNSAFE_OP(EffectiveAddress) michael@0: UNSAFE_OP(AsmJSUnsignedToDouble) michael@0: UNSAFE_OP(AsmJSUnsignedToFloat32) michael@0: UNSAFE_OP(AsmJSNeg) michael@0: UNSAFE_OP(AsmJSLoadHeap) michael@0: UNSAFE_OP(AsmJSStoreHeap) michael@0: UNSAFE_OP(AsmJSLoadGlobalVar) michael@0: UNSAFE_OP(AsmJSStoreGlobalVar) michael@0: UNSAFE_OP(AsmJSLoadFuncPtr) michael@0: UNSAFE_OP(AsmJSLoadFFIFunc) michael@0: UNSAFE_OP(AsmJSReturn) michael@0: UNSAFE_OP(AsmJSVoidReturn) michael@0: UNSAFE_OP(AsmJSPassStackArg) michael@0: UNSAFE_OP(AsmJSParameter) michael@0: UNSAFE_OP(AsmJSCall) michael@0: DROP_OP(RecompileCheck) michael@0: michael@0: // It looks like this could easily be made safe: michael@0: UNSAFE_OP(ConvertElementsToDoubles) michael@0: }; michael@0: michael@0: bool michael@0: ParallelSafetyAnalysis::analyze() michael@0: { michael@0: // Walk the basic blocks in a DFS. When we encounter a block with an michael@0: // unsafe instruction, then we know that this block will bailout when michael@0: // executed. Therefore, we replace the block. michael@0: // michael@0: // We don't need a worklist, though, because the graph is sorted michael@0: // in RPO. Therefore, we just use the marked flags to tell us michael@0: // when we visited some predecessor of the current block. michael@0: ParallelSafetyVisitor visitor(graph_); michael@0: graph_.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR. michael@0: uint32_t marked = 0; michael@0: for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { michael@0: if (mir_->shouldCancel("ParallelSafetyAnalysis")) michael@0: return false; michael@0: michael@0: if (block->isMarked()) { michael@0: // Iterate through and transform the instructions. Stop michael@0: // if we encounter an inherently unsafe operation, in michael@0: // which case we will transform this block into a bailout michael@0: // block. michael@0: MInstruction *instr = nullptr; michael@0: for (MInstructionIterator ins(block->begin()); michael@0: ins != block->end() && !visitor.unsafe();) michael@0: { michael@0: if (mir_->shouldCancel("ParallelSafetyAnalysis")) michael@0: return false; michael@0: michael@0: // We may be removing or replacing the current michael@0: // instruction, so advance `ins` now. Remember the michael@0: // last instr. we looked at for use later if it should michael@0: // prove unsafe. michael@0: instr = *ins++; michael@0: michael@0: if (!instr->accept(&visitor)) { michael@0: SpewMIR(instr, "Unaccepted"); michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (!visitor.unsafe()) { michael@0: // Count the number of reachable blocks. michael@0: marked++; michael@0: michael@0: // Block consists of only safe instructions. Visit its successors. michael@0: for (uint32_t i = 0; i < block->numSuccessors(); i++) michael@0: block->getSuccessor(i)->mark(); michael@0: } else { michael@0: // Block contains an unsafe instruction. That means that once michael@0: // we enter this block, we are guaranteed to bailout. michael@0: michael@0: // If this is the entry block, then there is no point michael@0: // in even trying to execute this function as it will michael@0: // always bailout. michael@0: if (*block == graph_.entryBlock()) { michael@0: Spew(SpewCompile, "Entry block contains unsafe MIR"); michael@0: return false; michael@0: } michael@0: michael@0: // Otherwise, create a replacement that will. michael@0: if (!visitor.convertToBailout(*block, instr)) michael@0: return false; michael@0: michael@0: JS_ASSERT(!block->isMarked()); michael@0: } michael@0: } michael@0: } michael@0: michael@0: Spew(SpewCompile, "Safe"); michael@0: IonSpewPass("ParallelSafetyAnalysis"); michael@0: michael@0: UnreachableCodeElimination uce(mir_, graph_); michael@0: if (!uce.removeUnmarkedBlocks(marked)) michael@0: return false; michael@0: IonSpewPass("UCEAfterParallelSafetyAnalysis"); michael@0: AssertExtendedGraphCoherency(graph_); michael@0: michael@0: if (!removeResumePointOperands()) michael@0: return false; michael@0: IonSpewPass("RemoveResumePointOperands"); michael@0: AssertExtendedGraphCoherency(graph_); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyAnalysis::removeResumePointOperands() michael@0: { michael@0: // In parallel exec mode, nothing is effectful, therefore we do michael@0: // not need to reconstruct interpreter state and can simply michael@0: // bailout by returning a special code. Ideally we'd either michael@0: // remove the unused resume points or else never generate them in michael@0: // the first place, but I encountered various assertions and michael@0: // crashes attempting to do that, so for the time being I simply michael@0: // replace their operands with undefined. This prevents them from michael@0: // interfering with DCE and other optimizations. It is also *necessary* michael@0: // to handle cases like this: michael@0: // michael@0: // foo(a, b, c.bar()) michael@0: // michael@0: // where `foo` was deemed to be an unsafe function to call. This michael@0: // is because without neutering the ResumePoints, they would still michael@0: // refer to the MPassArg nodes generated for the call to foo(). michael@0: // But the call to foo() is dead and has been removed, leading to michael@0: // an inconsistent IR and assertions at codegen time. michael@0: michael@0: MConstant *udef = nullptr; michael@0: for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { michael@0: if (udef) michael@0: replaceOperandsOnResumePoint(block->entryResumePoint(), udef); michael@0: michael@0: for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) { michael@0: if (ins->isStart()) { michael@0: JS_ASSERT(udef == nullptr); michael@0: udef = MConstant::New(graph_.alloc(), UndefinedValue()); michael@0: block->insertAfter(*ins, udef); michael@0: } else if (udef) { michael@0: if (MResumePoint *resumePoint = ins->resumePoint()) michael@0: replaceOperandsOnResumePoint(resumePoint, udef); michael@0: } michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: ParallelSafetyAnalysis::replaceOperandsOnResumePoint(MResumePoint *resumePoint, michael@0: MDefinition *withDef) michael@0: { michael@0: for (size_t i = 0, e = resumePoint->numOperands(); i < e; i++) michael@0: resumePoint->replaceOperand(i, withDef); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins) michael@0: { michael@0: JS_ASSERT(unsafe()); // `block` must have contained unsafe items michael@0: JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here michael@0: michael@0: // Clear the unsafe flag for subsequent blocks. michael@0: clearUnsafe(); michael@0: michael@0: // This block is no longer reachable. michael@0: block->unmark(); michael@0: michael@0: // Create a bailout block for each predecessor. In principle, we michael@0: // only need one bailout block--in fact, only one per graph! But I michael@0: // found this approach easier to implement given the design of the michael@0: // MIR Graph construction routines. Besides, most often `block` michael@0: // has only one predecessor. Also, using multiple blocks helps to michael@0: // keep the PC information more accurate (though replacing `block` michael@0: // with exactly one bailout would be just as good). michael@0: for (size_t i = 0; i < block->numPredecessors(); i++) { michael@0: MBasicBlock *pred = block->getPredecessor(i); michael@0: michael@0: // We only care about incoming edges from reachable predecessors. michael@0: if (!pred->isMarked()) michael@0: continue; michael@0: michael@0: // create bailout block to insert on this edge michael@0: MBasicBlock *bailBlock = MBasicBlock::NewAbortPar(graph_, block->info(), pred, michael@0: block->pc(), michael@0: block->entryResumePoint()); michael@0: if (!bailBlock) michael@0: return false; michael@0: michael@0: // if `block` had phis, we are replacing it with `bailBlock` which does not michael@0: if (pred->successorWithPhis() == block) michael@0: pred->setSuccessorWithPhis(nullptr, 0); michael@0: michael@0: // redirect the predecessor to the bailout block michael@0: uint32_t succIdx = pred->getSuccessorIndex(block); michael@0: pred->replaceSuccessor(succIdx, bailBlock); michael@0: michael@0: // Insert the bailout block after `block` in the execution michael@0: // order. This should satisfy the RPO requirements and michael@0: // moreover ensures that we will visit this block in our outer michael@0: // walk, thus allowing us to keep the count of marked blocks michael@0: // accurate. michael@0: graph_.insertBlockAfter(block, bailBlock); michael@0: bailBlock->mark(); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////////////////// michael@0: // Memory allocation michael@0: // michael@0: // Simple memory allocation opcodes---those which ultimately compile michael@0: // down to a (possibly inlined) invocation of NewGCThing()---are michael@0: // replaced with MNewPar, which is supplied with the thread context. michael@0: // These allocations will take place using per-helper-thread arenas. michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitCreateThisWithTemplate(MCreateThisWithTemplate *ins) michael@0: { michael@0: return replaceWithNewPar(ins, ins->templateObject()); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitNewCallObject(MNewCallObject *ins) michael@0: { michael@0: replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitNewRunOnceCallObject(MNewRunOnceCallObject *ins) michael@0: { michael@0: replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitLambda(MLambda *ins) michael@0: { michael@0: if (ins->info().singletonType || ins->info().useNewTypeForClone) { michael@0: // slow path: bail on parallel execution. michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: // fast path: replace with LambdaPar op michael@0: replace(ins, MLambdaPar::New(alloc(), ForkJoinContext(), ins)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitNewObject(MNewObject *newInstruction) michael@0: { michael@0: if (newInstruction->shouldUseVM()) { michael@0: SpewMIR(newInstruction, "should use VM"); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: return replaceWithNewPar(newInstruction, newInstruction->templateObject()); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitNewArray(MNewArray *newInstruction) michael@0: { michael@0: if (newInstruction->shouldUseVM()) { michael@0: SpewMIR(newInstruction, "should use VM"); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: return replaceWithNewPar(newInstruction, newInstruction->templateObject()); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitNewDerivedTypedObject(MNewDerivedTypedObject *ins) michael@0: { michael@0: // FIXME(Bug 984090) -- There should really be a parallel-safe michael@0: // version of NewDerivedTypedObject. However, until that is michael@0: // implemented, let's just ignore those with 0 uses, since they michael@0: // will be stripped out by DCE later. michael@0: if (ins->useCount() == 0) michael@0: return true; michael@0: michael@0: SpewMIR(ins, "visitNewDerivedTypedObject"); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitRest(MRest *ins) michael@0: { michael@0: return replace(ins, MRestPar::New(alloc(), ForkJoinContext(), ins)); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitMathFunction(MMathFunction *ins) michael@0: { michael@0: return replace(ins, MMathFunction::New(alloc(), ins->input(), ins->function(), nullptr)); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitConcat(MConcat *ins) michael@0: { michael@0: return replace(ins, MConcatPar::New(alloc(), ForkJoinContext(), ins)); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitToString(MToString *ins) michael@0: { michael@0: MIRType inputType = ins->input()->type(); michael@0: if (inputType != MIRType_Int32 && inputType != MIRType_Double) michael@0: return markUnsafe(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction, michael@0: JSObject *templateObject) michael@0: { michael@0: replace(newInstruction, MNewPar::New(alloc(), ForkJoinContext(), templateObject)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::replace(MInstruction *oldInstruction, michael@0: MInstruction *replacementInstruction) michael@0: { michael@0: MBasicBlock *block = oldInstruction->block(); michael@0: block->insertBefore(oldInstruction, replacementInstruction); michael@0: oldInstruction->replaceAllUsesWith(replacementInstruction); michael@0: block->discard(oldInstruction); michael@0: return true; michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////////////////// michael@0: // Write Guards michael@0: // michael@0: // We only want to permit writes to locally guarded objects. michael@0: // Furthermore, we want to avoid PICs and other non-thread-safe things michael@0: // (though perhaps we should support PICs at some point). If we michael@0: // cannot determine the origin of an object, we can insert a write michael@0: // guard which will check whether the object was allocated from the michael@0: // per-thread-arena or not. michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::insertWriteGuard(MInstruction *writeInstruction, michael@0: MDefinition *valueBeingWritten) michael@0: { michael@0: // Many of the write operations do not take the JS object michael@0: // but rather something derived from it, such as the elements. michael@0: // So we need to identify the JS object: michael@0: MDefinition *object; michael@0: switch (valueBeingWritten->type()) { michael@0: case MIRType_Object: michael@0: object = valueBeingWritten; michael@0: break; michael@0: michael@0: case MIRType_Slots: michael@0: switch (valueBeingWritten->op()) { michael@0: case MDefinition::Op_Slots: michael@0: object = valueBeingWritten->toSlots()->object(); michael@0: break; michael@0: michael@0: case MDefinition::Op_NewSlots: michael@0: // Values produced by new slots will ALWAYS be michael@0: // thread-local. michael@0: return true; michael@0: michael@0: default: michael@0: SpewMIR(writeInstruction, "cannot insert write guard for %s", michael@0: valueBeingWritten->opName()); michael@0: return markUnsafe(); michael@0: } michael@0: break; michael@0: michael@0: case MIRType_Elements: michael@0: switch (valueBeingWritten->op()) { michael@0: case MDefinition::Op_Elements: michael@0: object = valueBeingWritten->toElements()->object(); michael@0: break; michael@0: michael@0: case MDefinition::Op_TypedArrayElements: michael@0: object = valueBeingWritten->toTypedArrayElements()->object(); michael@0: break; michael@0: michael@0: case MDefinition::Op_TypedObjectElements: michael@0: object = valueBeingWritten->toTypedObjectElements()->object(); michael@0: break; michael@0: michael@0: default: michael@0: SpewMIR(writeInstruction, "cannot insert write guard for %s", michael@0: valueBeingWritten->opName()); michael@0: return markUnsafe(); michael@0: } michael@0: break; michael@0: michael@0: default: michael@0: SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d", michael@0: valueBeingWritten->type()); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: if (object->isUnbox()) michael@0: object = object->toUnbox()->input(); michael@0: michael@0: switch (object->op()) { michael@0: case MDefinition::Op_NewPar: michael@0: // MNewPar will always be creating something thread-local, omit the guard michael@0: SpewMIR(writeInstruction, "write to NewPar prop does not require guard"); michael@0: return true; michael@0: default: michael@0: break; michael@0: } michael@0: michael@0: MBasicBlock *block = writeInstruction->block(); michael@0: MGuardThreadExclusive *writeGuard = michael@0: MGuardThreadExclusive::New(alloc(), ForkJoinContext(), object); michael@0: block->insertBefore(writeInstruction, writeGuard); michael@0: writeGuard->adjustInputs(alloc(), writeGuard); michael@0: return true; michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////////////////// michael@0: // Calls michael@0: // michael@0: // We only support calls to interpreted functions that that have already been michael@0: // Ion compiled. If a function has no IonScript, we bail out. michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitCall(MCall *ins) michael@0: { michael@0: // DOM? Scary. michael@0: if (ins->isCallDOMNative()) { michael@0: SpewMIR(ins, "call to dom function"); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: JSFunction *target = ins->getSingleTarget(); michael@0: if (target) { michael@0: // Non-parallel native? Scary michael@0: if (target->isNative() && !target->hasParallelNative()) { michael@0: SpewMIR(ins, "call to non-parallel native function"); michael@0: return markUnsafe(); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: if (ins->isConstructing()) { michael@0: SpewMIR(ins, "call to unknown constructor"); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////////////////// michael@0: // Stack limit, interrupts michael@0: // michael@0: // In sequential Ion code, the stack limit is stored in the JSRuntime. michael@0: // We store it in the thread context. We therefore need a separate michael@0: // instruction to access it, one parameterized by the thread context. michael@0: // Similar considerations apply to checking for interrupts. michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins) michael@0: { michael@0: return replace(ins, MCheckOverRecursedPar::New(alloc(), ForkJoinContext())); michael@0: } michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins) michael@0: { michael@0: return replace(ins, MInterruptCheckPar::New(alloc(), ForkJoinContext())); michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////////////////// michael@0: // Specialized ops michael@0: // michael@0: // Some ops, like +, can be specialized to ints/doubles. Anything michael@0: // else is terrifying. michael@0: // michael@0: // TODO---Eventually, we should probably permit arbitrary + but bail michael@0: // if the operands are not both integers/floats. michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec, michael@0: uint32_t flags) michael@0: { michael@0: uint32_t flag = 1 << spec; michael@0: if (flags & flag) michael@0: return true; michael@0: michael@0: SpewMIR(ins, "specialized to unacceptable type %d", spec); michael@0: return markUnsafe(); michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////////////////// michael@0: // Throw michael@0: michael@0: bool michael@0: ParallelSafetyVisitor::visitThrow(MThrow *thr) michael@0: { michael@0: MBasicBlock *block = thr->block(); michael@0: JS_ASSERT(block->lastIns() == thr); michael@0: block->discardLastIns(); michael@0: MAbortPar *bailout = MAbortPar::New(alloc()); michael@0: if (!bailout) michael@0: return false; michael@0: block->end(bailout); michael@0: return true; michael@0: } michael@0: michael@0: /////////////////////////////////////////////////////////////////////////// michael@0: // Callee extraction michael@0: // michael@0: // See comments in header file. michael@0: michael@0: static bool michael@0: GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc, michael@0: types::TemporaryTypeSet *calleeTypes, CallTargetVector &targets); michael@0: michael@0: static bool michael@0: AddCallTarget(HandleScript script, CallTargetVector &targets); michael@0: michael@0: bool michael@0: jit::AddPossibleCallees(JSContext *cx, MIRGraph &graph, CallTargetVector &targets) michael@0: { michael@0: for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { michael@0: for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) michael@0: { michael@0: if (!ins->isCall()) michael@0: continue; michael@0: michael@0: MCall *callIns = ins->toCall(); michael@0: michael@0: RootedFunction target(cx, callIns->getSingleTarget()); michael@0: if (target) { michael@0: JS_ASSERT_IF(!target->isInterpreted(), target->hasParallelNative()); michael@0: michael@0: if (target->isInterpreted()) { michael@0: RootedScript script(cx, target->getOrCreateScript(cx)); michael@0: if (!script || !AddCallTarget(script, targets)) michael@0: return false; michael@0: } michael@0: michael@0: continue; michael@0: } michael@0: michael@0: types::TemporaryTypeSet *calleeTypes = callIns->getFunction()->resultTypeSet(); michael@0: RootedScript script(cx, callIns->block()->info().script()); michael@0: if (!GetPossibleCallees(cx, michael@0: script, michael@0: callIns->resumePoint()->pc(), michael@0: calleeTypes, michael@0: targets)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: GetPossibleCallees(JSContext *cx, michael@0: HandleScript script, michael@0: jsbytecode *pc, michael@0: types::TemporaryTypeSet *calleeTypes, michael@0: CallTargetVector &targets) michael@0: { michael@0: if (!calleeTypes || calleeTypes->baseFlags() != 0) michael@0: return true; michael@0: michael@0: unsigned objCount = calleeTypes->getObjectCount(); michael@0: michael@0: if (objCount == 0) michael@0: return true; michael@0: michael@0: RootedFunction rootedFun(cx); michael@0: RootedScript rootedScript(cx); michael@0: for (unsigned i = 0; i < objCount; i++) { michael@0: JSObject *obj = calleeTypes->getSingleObject(i); michael@0: if (obj && obj->is()) { michael@0: rootedFun = &obj->as(); michael@0: } else { michael@0: types::TypeObject *typeObj = calleeTypes->getTypeObject(i); michael@0: if (!typeObj) michael@0: continue; michael@0: rootedFun = typeObj->interpretedFunction; michael@0: if (!rootedFun) michael@0: continue; michael@0: } michael@0: michael@0: if (!rootedFun->isInterpreted()) michael@0: continue; michael@0: michael@0: rootedScript = rootedFun->getOrCreateScript(cx); michael@0: if (!rootedScript) michael@0: return false; michael@0: michael@0: if (rootedScript->shouldCloneAtCallsite()) { michael@0: rootedFun = CloneFunctionAtCallsite(cx, rootedFun, script, pc); michael@0: if (!rootedFun) michael@0: return false; michael@0: rootedScript = rootedFun->nonLazyScript(); michael@0: } michael@0: michael@0: // check if this call target is already known michael@0: if (!AddCallTarget(rootedScript, targets)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: static bool michael@0: AddCallTarget(HandleScript script, CallTargetVector &targets) michael@0: { michael@0: for (size_t i = 0; i < targets.length(); i++) { michael@0: if (targets[i] == script) michael@0: return true; michael@0: } michael@0: michael@0: if (!targets.append(script)) michael@0: return false; michael@0: michael@0: return true; michael@0: }