1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/jit/ParallelSafetyAnalysis.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,932 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * This Source Code Form is subject to the terms of the Mozilla Public 1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.9 + 1.10 +#include "jit/ParallelSafetyAnalysis.h" 1.11 + 1.12 +#include "jit/Ion.h" 1.13 +#include "jit/IonAnalysis.h" 1.14 +#include "jit/IonSpewer.h" 1.15 +#include "jit/MIR.h" 1.16 +#include "jit/MIRGenerator.h" 1.17 +#include "jit/MIRGraph.h" 1.18 +#include "jit/UnreachableCodeElimination.h" 1.19 + 1.20 +#include "jsinferinlines.h" 1.21 +#include "jsobjinlines.h" 1.22 + 1.23 +using namespace js; 1.24 +using namespace jit; 1.25 + 1.26 +using parallel::Spew; 1.27 +using parallel::SpewMIR; 1.28 +using parallel::SpewCompile; 1.29 + 1.30 +#define SAFE_OP(op) \ 1.31 + virtual bool visit##op(M##op *prop) { return true; } 1.32 + 1.33 +#define CUSTOM_OP(op) \ 1.34 + virtual bool visit##op(M##op *prop); 1.35 + 1.36 +#define DROP_OP(op) \ 1.37 + virtual bool visit##op(M##op *ins) { \ 1.38 + MBasicBlock *block = ins->block(); \ 1.39 + block->discard(ins); \ 1.40 + return true; \ 1.41 + } 1.42 + 1.43 +#define PERMIT(T) (1 << T) 1.44 + 1.45 +#define PERMIT_INT32 (PERMIT(MIRType_Int32)) 1.46 +#define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double)) 1.47 + 1.48 +#define SPECIALIZED_OP(op, flags) \ 1.49 + virtual bool visit##op(M##op *ins) { \ 1.50 + return visitSpecializedInstruction(ins, ins->specialization(), flags); \ 1.51 + } 1.52 + 1.53 +#define UNSAFE_OP(op) \ 1.54 + virtual bool visit##op(M##op *ins) { \ 1.55 + SpewMIR(ins, "Unsafe"); \ 1.56 + return markUnsafe(); \ 1.57 + } 1.58 + 1.59 +#define WRITE_GUARDED_OP(op, obj) \ 1.60 + virtual bool visit##op(M##op *prop) { \ 1.61 + return insertWriteGuard(prop, prop->obj()); \ 1.62 + } 1.63 + 1.64 +#define MAYBE_WRITE_GUARDED_OP(op, obj) \ 1.65 + virtual bool visit##op(M##op *prop) { \ 1.66 + if (prop->racy()) \ 1.67 + return true; \ 1.68 + return insertWriteGuard(prop, prop->obj()); \ 1.69 + } 1.70 + 1.71 +class ParallelSafetyVisitor : public MInstructionVisitor 1.72 +{ 1.73 + MIRGraph &graph_; 1.74 + bool unsafe_; 1.75 + MDefinition *cx_; 1.76 + 1.77 + bool insertWriteGuard(MInstruction *writeInstruction, MDefinition *valueBeingWritten); 1.78 + 1.79 + bool replaceWithNewPar(MInstruction *newInstruction, JSObject *templateObject); 1.80 + bool replace(MInstruction *oldInstruction, MInstruction *replacementInstruction); 1.81 + 1.82 + bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags); 1.83 + 1.84 + // Intended for use in a visitXyz() instruction like "return 1.85 + // markUnsafe()". Sets the unsafe flag and returns true (since 1.86 + // this does not indicate an unrecoverable compilation failure). 1.87 + bool markUnsafe() { 1.88 + JS_ASSERT(!unsafe_); 1.89 + unsafe_ = true; 1.90 + return true; 1.91 + } 1.92 + 1.93 + TempAllocator &alloc() const { 1.94 + return graph_.alloc(); 1.95 + } 1.96 + 1.97 + public: 1.98 + ParallelSafetyVisitor(MIRGraph &graph) 1.99 + : graph_(graph), 1.100 + unsafe_(false), 1.101 + cx_(nullptr) 1.102 + { } 1.103 + 1.104 + void clearUnsafe() { unsafe_ = false; } 1.105 + bool unsafe() { return unsafe_; } 1.106 + MDefinition *ForkJoinContext() { 1.107 + if (!cx_) 1.108 + cx_ = graph_.forkJoinContext(); 1.109 + return cx_; 1.110 + } 1.111 + 1.112 + bool convertToBailout(MBasicBlock *block, MInstruction *ins); 1.113 + 1.114 + // I am taking the policy of blacklisting everything that's not 1.115 + // obviously safe for now. We can loosen as we need. 1.116 + 1.117 + SAFE_OP(Constant) 1.118 + UNSAFE_OP(CloneLiteral) 1.119 + SAFE_OP(Parameter) 1.120 + SAFE_OP(Callee) 1.121 + SAFE_OP(TableSwitch) 1.122 + SAFE_OP(Goto) 1.123 + SAFE_OP(Test) 1.124 + SAFE_OP(Compare) 1.125 + SAFE_OP(Phi) 1.126 + SAFE_OP(Beta) 1.127 + UNSAFE_OP(OsrValue) 1.128 + UNSAFE_OP(OsrScopeChain) 1.129 + UNSAFE_OP(OsrReturnValue) 1.130 + UNSAFE_OP(OsrArgumentsObject) 1.131 + UNSAFE_OP(ReturnFromCtor) 1.132 + CUSTOM_OP(CheckOverRecursed) 1.133 + UNSAFE_OP(DefVar) 1.134 + UNSAFE_OP(DefFun) 1.135 + UNSAFE_OP(CreateThis) 1.136 + CUSTOM_OP(CreateThisWithTemplate) 1.137 + UNSAFE_OP(CreateThisWithProto) 1.138 + UNSAFE_OP(CreateArgumentsObject) 1.139 + UNSAFE_OP(GetArgumentsObjectArg) 1.140 + UNSAFE_OP(SetArgumentsObjectArg) 1.141 + UNSAFE_OP(ComputeThis) 1.142 + UNSAFE_OP(LoadArrowThis) 1.143 + CUSTOM_OP(Call) 1.144 + UNSAFE_OP(ApplyArgs) 1.145 + UNSAFE_OP(ArraySplice) 1.146 + UNSAFE_OP(Bail) 1.147 + UNSAFE_OP(AssertFloat32) 1.148 + UNSAFE_OP(GetDynamicName) 1.149 + UNSAFE_OP(FilterArgumentsOrEval) 1.150 + UNSAFE_OP(CallDirectEval) 1.151 + SAFE_OP(BitNot) 1.152 + SAFE_OP(TypeOf) 1.153 + UNSAFE_OP(ToId) 1.154 + SAFE_OP(BitAnd) 1.155 + SAFE_OP(BitOr) 1.156 + SAFE_OP(BitXor) 1.157 + SAFE_OP(Lsh) 1.158 + SAFE_OP(Rsh) 1.159 + SAFE_OP(Ursh) 1.160 + SPECIALIZED_OP(MinMax, PERMIT_NUMERIC) 1.161 + SAFE_OP(Abs) 1.162 + SAFE_OP(Sqrt) 1.163 + UNSAFE_OP(Atan2) 1.164 + UNSAFE_OP(Hypot) 1.165 + CUSTOM_OP(MathFunction) 1.166 + SPECIALIZED_OP(Add, PERMIT_NUMERIC) 1.167 + SPECIALIZED_OP(Sub, PERMIT_NUMERIC) 1.168 + SPECIALIZED_OP(Mul, PERMIT_NUMERIC) 1.169 + SPECIALIZED_OP(Div, PERMIT_NUMERIC) 1.170 + SPECIALIZED_OP(Mod, PERMIT_NUMERIC) 1.171 + CUSTOM_OP(Concat) 1.172 + SAFE_OP(ConcatPar) 1.173 + UNSAFE_OP(CharCodeAt) 1.174 + UNSAFE_OP(FromCharCode) 1.175 + UNSAFE_OP(StringSplit) 1.176 + SAFE_OP(Return) 1.177 + CUSTOM_OP(Throw) 1.178 + SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc. 1.179 + SAFE_OP(Unbox) 1.180 + SAFE_OP(GuardObject) 1.181 + SAFE_OP(ToDouble) 1.182 + SAFE_OP(ToFloat32) 1.183 + SAFE_OP(ToInt32) 1.184 + SAFE_OP(TruncateToInt32) 1.185 + SAFE_OP(MaybeToDoubleElement) 1.186 + CUSTOM_OP(ToString) 1.187 + SAFE_OP(NewSlots) 1.188 + CUSTOM_OP(NewArray) 1.189 + CUSTOM_OP(NewObject) 1.190 + CUSTOM_OP(NewCallObject) 1.191 + CUSTOM_OP(NewRunOnceCallObject) 1.192 + CUSTOM_OP(NewDerivedTypedObject) 1.193 + UNSAFE_OP(InitElem) 1.194 + UNSAFE_OP(InitElemGetterSetter) 1.195 + UNSAFE_OP(MutateProto) 1.196 + UNSAFE_OP(InitProp) 1.197 + UNSAFE_OP(InitPropGetterSetter) 1.198 + SAFE_OP(Start) 1.199 + UNSAFE_OP(OsrEntry) 1.200 + SAFE_OP(Nop) 1.201 + UNSAFE_OP(RegExp) 1.202 + CUSTOM_OP(Lambda) 1.203 + UNSAFE_OP(LambdaArrow) 1.204 + UNSAFE_OP(ImplicitThis) 1.205 + SAFE_OP(Slots) 1.206 + SAFE_OP(Elements) 1.207 + SAFE_OP(ConstantElements) 1.208 + SAFE_OP(LoadSlot) 1.209 + WRITE_GUARDED_OP(StoreSlot, slots) 1.210 + SAFE_OP(FunctionEnvironment) // just a load of func env ptr 1.211 + SAFE_OP(FilterTypeSet) 1.212 + SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us 1.213 + SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us 1.214 + UNSAFE_OP(PostWriteBarrier) 1.215 + SAFE_OP(GetPropertyCache) 1.216 + SAFE_OP(GetPropertyPolymorphic) 1.217 + UNSAFE_OP(SetPropertyPolymorphic) 1.218 + SAFE_OP(GetElementCache) 1.219 + WRITE_GUARDED_OP(SetElementCache, object) 1.220 + UNSAFE_OP(BindNameCache) 1.221 + SAFE_OP(GuardShape) 1.222 + SAFE_OP(GuardObjectType) 1.223 + SAFE_OP(GuardObjectIdentity) 1.224 + SAFE_OP(GuardClass) 1.225 + SAFE_OP(AssertRange) 1.226 + SAFE_OP(ArrayLength) 1.227 + WRITE_GUARDED_OP(SetArrayLength, elements) 1.228 + SAFE_OP(TypedArrayLength) 1.229 + SAFE_OP(TypedArrayElements) 1.230 + SAFE_OP(TypedObjectElements) 1.231 + SAFE_OP(SetTypedObjectOffset) 1.232 + SAFE_OP(InitializedLength) 1.233 + WRITE_GUARDED_OP(SetInitializedLength, elements) 1.234 + SAFE_OP(Not) 1.235 + SAFE_OP(NeuterCheck) 1.236 + SAFE_OP(BoundsCheck) 1.237 + SAFE_OP(BoundsCheckLower) 1.238 + SAFE_OP(LoadElement) 1.239 + SAFE_OP(LoadElementHole) 1.240 + MAYBE_WRITE_GUARDED_OP(StoreElement, elements) 1.241 + WRITE_GUARDED_OP(StoreElementHole, elements) 1.242 + UNSAFE_OP(ArrayPopShift) 1.243 + UNSAFE_OP(ArrayPush) 1.244 + SAFE_OP(LoadTypedArrayElement) 1.245 + SAFE_OP(LoadTypedArrayElementHole) 1.246 + SAFE_OP(LoadTypedArrayElementStatic) 1.247 + MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements) 1.248 + WRITE_GUARDED_OP(StoreTypedArrayElementHole, elements) 1.249 + UNSAFE_OP(StoreTypedArrayElementStatic) 1.250 + UNSAFE_OP(ClampToUint8) 1.251 + SAFE_OP(LoadFixedSlot) 1.252 + WRITE_GUARDED_OP(StoreFixedSlot, object) 1.253 + UNSAFE_OP(CallGetProperty) 1.254 + UNSAFE_OP(GetNameCache) 1.255 + UNSAFE_OP(CallGetIntrinsicValue) 1.256 + UNSAFE_OP(CallsiteCloneCache) 1.257 + UNSAFE_OP(CallGetElement) 1.258 + WRITE_GUARDED_OP(CallSetElement, object) 1.259 + UNSAFE_OP(CallInitElementArray) 1.260 + WRITE_GUARDED_OP(CallSetProperty, object) 1.261 + UNSAFE_OP(DeleteProperty) 1.262 + UNSAFE_OP(DeleteElement) 1.263 + WRITE_GUARDED_OP(SetPropertyCache, object) 1.264 + UNSAFE_OP(IteratorStart) 1.265 + UNSAFE_OP(IteratorNext) 1.266 + UNSAFE_OP(IteratorMore) 1.267 + UNSAFE_OP(IteratorEnd) 1.268 + SAFE_OP(StringLength) 1.269 + SAFE_OP(ArgumentsLength) 1.270 + SAFE_OP(GetFrameArgument) 1.271 + UNSAFE_OP(SetFrameArgument) 1.272 + UNSAFE_OP(RunOncePrologue) 1.273 + CUSTOM_OP(Rest) 1.274 + SAFE_OP(RestPar) 1.275 + SAFE_OP(Floor) 1.276 + SAFE_OP(Round) 1.277 + UNSAFE_OP(InstanceOf) 1.278 + CUSTOM_OP(InterruptCheck) 1.279 + SAFE_OP(ForkJoinContext) 1.280 + SAFE_OP(ForkJoinGetSlice) 1.281 + SAFE_OP(NewPar) 1.282 + SAFE_OP(NewDenseArrayPar) 1.283 + SAFE_OP(NewCallObjectPar) 1.284 + SAFE_OP(LambdaPar) 1.285 + SAFE_OP(AbortPar) 1.286 + UNSAFE_OP(ArrayConcat) 1.287 + UNSAFE_OP(GetDOMProperty) 1.288 + UNSAFE_OP(GetDOMMember) 1.289 + UNSAFE_OP(SetDOMProperty) 1.290 + UNSAFE_OP(NewStringObject) 1.291 + UNSAFE_OP(Random) 1.292 + SAFE_OP(Pow) 1.293 + SAFE_OP(PowHalf) 1.294 + UNSAFE_OP(RegExpTest) 1.295 + UNSAFE_OP(RegExpExec) 1.296 + UNSAFE_OP(RegExpReplace) 1.297 + UNSAFE_OP(StringReplace) 1.298 + UNSAFE_OP(CallInstanceOf) 1.299 + UNSAFE_OP(ProfilerStackOp) 1.300 + UNSAFE_OP(GuardString) 1.301 + UNSAFE_OP(NewDeclEnvObject) 1.302 + UNSAFE_OP(In) 1.303 + UNSAFE_OP(InArray) 1.304 + SAFE_OP(GuardThreadExclusive) 1.305 + SAFE_OP(InterruptCheckPar) 1.306 + SAFE_OP(CheckOverRecursedPar) 1.307 + SAFE_OP(FunctionDispatch) 1.308 + SAFE_OP(TypeObjectDispatch) 1.309 + SAFE_OP(IsCallable) 1.310 + SAFE_OP(HaveSameClass) 1.311 + SAFE_OP(HasClass) 1.312 + UNSAFE_OP(EffectiveAddress) 1.313 + UNSAFE_OP(AsmJSUnsignedToDouble) 1.314 + UNSAFE_OP(AsmJSUnsignedToFloat32) 1.315 + UNSAFE_OP(AsmJSNeg) 1.316 + UNSAFE_OP(AsmJSLoadHeap) 1.317 + UNSAFE_OP(AsmJSStoreHeap) 1.318 + UNSAFE_OP(AsmJSLoadGlobalVar) 1.319 + UNSAFE_OP(AsmJSStoreGlobalVar) 1.320 + UNSAFE_OP(AsmJSLoadFuncPtr) 1.321 + UNSAFE_OP(AsmJSLoadFFIFunc) 1.322 + UNSAFE_OP(AsmJSReturn) 1.323 + UNSAFE_OP(AsmJSVoidReturn) 1.324 + UNSAFE_OP(AsmJSPassStackArg) 1.325 + UNSAFE_OP(AsmJSParameter) 1.326 + UNSAFE_OP(AsmJSCall) 1.327 + DROP_OP(RecompileCheck) 1.328 + 1.329 + // It looks like this could easily be made safe: 1.330 + UNSAFE_OP(ConvertElementsToDoubles) 1.331 +}; 1.332 + 1.333 +bool 1.334 +ParallelSafetyAnalysis::analyze() 1.335 +{ 1.336 + // Walk the basic blocks in a DFS. When we encounter a block with an 1.337 + // unsafe instruction, then we know that this block will bailout when 1.338 + // executed. Therefore, we replace the block. 1.339 + // 1.340 + // We don't need a worklist, though, because the graph is sorted 1.341 + // in RPO. Therefore, we just use the marked flags to tell us 1.342 + // when we visited some predecessor of the current block. 1.343 + ParallelSafetyVisitor visitor(graph_); 1.344 + graph_.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR. 1.345 + uint32_t marked = 0; 1.346 + for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { 1.347 + if (mir_->shouldCancel("ParallelSafetyAnalysis")) 1.348 + return false; 1.349 + 1.350 + if (block->isMarked()) { 1.351 + // Iterate through and transform the instructions. Stop 1.352 + // if we encounter an inherently unsafe operation, in 1.353 + // which case we will transform this block into a bailout 1.354 + // block. 1.355 + MInstruction *instr = nullptr; 1.356 + for (MInstructionIterator ins(block->begin()); 1.357 + ins != block->end() && !visitor.unsafe();) 1.358 + { 1.359 + if (mir_->shouldCancel("ParallelSafetyAnalysis")) 1.360 + return false; 1.361 + 1.362 + // We may be removing or replacing the current 1.363 + // instruction, so advance `ins` now. Remember the 1.364 + // last instr. we looked at for use later if it should 1.365 + // prove unsafe. 1.366 + instr = *ins++; 1.367 + 1.368 + if (!instr->accept(&visitor)) { 1.369 + SpewMIR(instr, "Unaccepted"); 1.370 + return false; 1.371 + } 1.372 + } 1.373 + 1.374 + if (!visitor.unsafe()) { 1.375 + // Count the number of reachable blocks. 1.376 + marked++; 1.377 + 1.378 + // Block consists of only safe instructions. Visit its successors. 1.379 + for (uint32_t i = 0; i < block->numSuccessors(); i++) 1.380 + block->getSuccessor(i)->mark(); 1.381 + } else { 1.382 + // Block contains an unsafe instruction. That means that once 1.383 + // we enter this block, we are guaranteed to bailout. 1.384 + 1.385 + // If this is the entry block, then there is no point 1.386 + // in even trying to execute this function as it will 1.387 + // always bailout. 1.388 + if (*block == graph_.entryBlock()) { 1.389 + Spew(SpewCompile, "Entry block contains unsafe MIR"); 1.390 + return false; 1.391 + } 1.392 + 1.393 + // Otherwise, create a replacement that will. 1.394 + if (!visitor.convertToBailout(*block, instr)) 1.395 + return false; 1.396 + 1.397 + JS_ASSERT(!block->isMarked()); 1.398 + } 1.399 + } 1.400 + } 1.401 + 1.402 + Spew(SpewCompile, "Safe"); 1.403 + IonSpewPass("ParallelSafetyAnalysis"); 1.404 + 1.405 + UnreachableCodeElimination uce(mir_, graph_); 1.406 + if (!uce.removeUnmarkedBlocks(marked)) 1.407 + return false; 1.408 + IonSpewPass("UCEAfterParallelSafetyAnalysis"); 1.409 + AssertExtendedGraphCoherency(graph_); 1.410 + 1.411 + if (!removeResumePointOperands()) 1.412 + return false; 1.413 + IonSpewPass("RemoveResumePointOperands"); 1.414 + AssertExtendedGraphCoherency(graph_); 1.415 + 1.416 + return true; 1.417 +} 1.418 + 1.419 +bool 1.420 +ParallelSafetyAnalysis::removeResumePointOperands() 1.421 +{ 1.422 + // In parallel exec mode, nothing is effectful, therefore we do 1.423 + // not need to reconstruct interpreter state and can simply 1.424 + // bailout by returning a special code. Ideally we'd either 1.425 + // remove the unused resume points or else never generate them in 1.426 + // the first place, but I encountered various assertions and 1.427 + // crashes attempting to do that, so for the time being I simply 1.428 + // replace their operands with undefined. This prevents them from 1.429 + // interfering with DCE and other optimizations. It is also *necessary* 1.430 + // to handle cases like this: 1.431 + // 1.432 + // foo(a, b, c.bar()) 1.433 + // 1.434 + // where `foo` was deemed to be an unsafe function to call. This 1.435 + // is because without neutering the ResumePoints, they would still 1.436 + // refer to the MPassArg nodes generated for the call to foo(). 1.437 + // But the call to foo() is dead and has been removed, leading to 1.438 + // an inconsistent IR and assertions at codegen time. 1.439 + 1.440 + MConstant *udef = nullptr; 1.441 + for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { 1.442 + if (udef) 1.443 + replaceOperandsOnResumePoint(block->entryResumePoint(), udef); 1.444 + 1.445 + for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) { 1.446 + if (ins->isStart()) { 1.447 + JS_ASSERT(udef == nullptr); 1.448 + udef = MConstant::New(graph_.alloc(), UndefinedValue()); 1.449 + block->insertAfter(*ins, udef); 1.450 + } else if (udef) { 1.451 + if (MResumePoint *resumePoint = ins->resumePoint()) 1.452 + replaceOperandsOnResumePoint(resumePoint, udef); 1.453 + } 1.454 + } 1.455 + } 1.456 + return true; 1.457 +} 1.458 + 1.459 +void 1.460 +ParallelSafetyAnalysis::replaceOperandsOnResumePoint(MResumePoint *resumePoint, 1.461 + MDefinition *withDef) 1.462 +{ 1.463 + for (size_t i = 0, e = resumePoint->numOperands(); i < e; i++) 1.464 + resumePoint->replaceOperand(i, withDef); 1.465 +} 1.466 + 1.467 +bool 1.468 +ParallelSafetyVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins) 1.469 +{ 1.470 + JS_ASSERT(unsafe()); // `block` must have contained unsafe items 1.471 + JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here 1.472 + 1.473 + // Clear the unsafe flag for subsequent blocks. 1.474 + clearUnsafe(); 1.475 + 1.476 + // This block is no longer reachable. 1.477 + block->unmark(); 1.478 + 1.479 + // Create a bailout block for each predecessor. In principle, we 1.480 + // only need one bailout block--in fact, only one per graph! But I 1.481 + // found this approach easier to implement given the design of the 1.482 + // MIR Graph construction routines. Besides, most often `block` 1.483 + // has only one predecessor. Also, using multiple blocks helps to 1.484 + // keep the PC information more accurate (though replacing `block` 1.485 + // with exactly one bailout would be just as good). 1.486 + for (size_t i = 0; i < block->numPredecessors(); i++) { 1.487 + MBasicBlock *pred = block->getPredecessor(i); 1.488 + 1.489 + // We only care about incoming edges from reachable predecessors. 1.490 + if (!pred->isMarked()) 1.491 + continue; 1.492 + 1.493 + // create bailout block to insert on this edge 1.494 + MBasicBlock *bailBlock = MBasicBlock::NewAbortPar(graph_, block->info(), pred, 1.495 + block->pc(), 1.496 + block->entryResumePoint()); 1.497 + if (!bailBlock) 1.498 + return false; 1.499 + 1.500 + // if `block` had phis, we are replacing it with `bailBlock` which does not 1.501 + if (pred->successorWithPhis() == block) 1.502 + pred->setSuccessorWithPhis(nullptr, 0); 1.503 + 1.504 + // redirect the predecessor to the bailout block 1.505 + uint32_t succIdx = pred->getSuccessorIndex(block); 1.506 + pred->replaceSuccessor(succIdx, bailBlock); 1.507 + 1.508 + // Insert the bailout block after `block` in the execution 1.509 + // order. This should satisfy the RPO requirements and 1.510 + // moreover ensures that we will visit this block in our outer 1.511 + // walk, thus allowing us to keep the count of marked blocks 1.512 + // accurate. 1.513 + graph_.insertBlockAfter(block, bailBlock); 1.514 + bailBlock->mark(); 1.515 + } 1.516 + 1.517 + return true; 1.518 +} 1.519 + 1.520 +///////////////////////////////////////////////////////////////////////////// 1.521 +// Memory allocation 1.522 +// 1.523 +// Simple memory allocation opcodes---those which ultimately compile 1.524 +// down to a (possibly inlined) invocation of NewGCThing()---are 1.525 +// replaced with MNewPar, which is supplied with the thread context. 1.526 +// These allocations will take place using per-helper-thread arenas. 1.527 + 1.528 +bool 1.529 +ParallelSafetyVisitor::visitCreateThisWithTemplate(MCreateThisWithTemplate *ins) 1.530 +{ 1.531 + return replaceWithNewPar(ins, ins->templateObject()); 1.532 +} 1.533 + 1.534 +bool 1.535 +ParallelSafetyVisitor::visitNewCallObject(MNewCallObject *ins) 1.536 +{ 1.537 + replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins)); 1.538 + return true; 1.539 +} 1.540 + 1.541 +bool 1.542 +ParallelSafetyVisitor::visitNewRunOnceCallObject(MNewRunOnceCallObject *ins) 1.543 +{ 1.544 + replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins)); 1.545 + return true; 1.546 +} 1.547 + 1.548 +bool 1.549 +ParallelSafetyVisitor::visitLambda(MLambda *ins) 1.550 +{ 1.551 + if (ins->info().singletonType || ins->info().useNewTypeForClone) { 1.552 + // slow path: bail on parallel execution. 1.553 + return markUnsafe(); 1.554 + } 1.555 + 1.556 + // fast path: replace with LambdaPar op 1.557 + replace(ins, MLambdaPar::New(alloc(), ForkJoinContext(), ins)); 1.558 + return true; 1.559 +} 1.560 + 1.561 +bool 1.562 +ParallelSafetyVisitor::visitNewObject(MNewObject *newInstruction) 1.563 +{ 1.564 + if (newInstruction->shouldUseVM()) { 1.565 + SpewMIR(newInstruction, "should use VM"); 1.566 + return markUnsafe(); 1.567 + } 1.568 + 1.569 + return replaceWithNewPar(newInstruction, newInstruction->templateObject()); 1.570 +} 1.571 + 1.572 +bool 1.573 +ParallelSafetyVisitor::visitNewArray(MNewArray *newInstruction) 1.574 +{ 1.575 + if (newInstruction->shouldUseVM()) { 1.576 + SpewMIR(newInstruction, "should use VM"); 1.577 + return markUnsafe(); 1.578 + } 1.579 + 1.580 + return replaceWithNewPar(newInstruction, newInstruction->templateObject()); 1.581 +} 1.582 + 1.583 +bool 1.584 +ParallelSafetyVisitor::visitNewDerivedTypedObject(MNewDerivedTypedObject *ins) 1.585 +{ 1.586 + // FIXME(Bug 984090) -- There should really be a parallel-safe 1.587 + // version of NewDerivedTypedObject. However, until that is 1.588 + // implemented, let's just ignore those with 0 uses, since they 1.589 + // will be stripped out by DCE later. 1.590 + if (ins->useCount() == 0) 1.591 + return true; 1.592 + 1.593 + SpewMIR(ins, "visitNewDerivedTypedObject"); 1.594 + return markUnsafe(); 1.595 +} 1.596 + 1.597 +bool 1.598 +ParallelSafetyVisitor::visitRest(MRest *ins) 1.599 +{ 1.600 + return replace(ins, MRestPar::New(alloc(), ForkJoinContext(), ins)); 1.601 +} 1.602 + 1.603 +bool 1.604 +ParallelSafetyVisitor::visitMathFunction(MMathFunction *ins) 1.605 +{ 1.606 + return replace(ins, MMathFunction::New(alloc(), ins->input(), ins->function(), nullptr)); 1.607 +} 1.608 + 1.609 +bool 1.610 +ParallelSafetyVisitor::visitConcat(MConcat *ins) 1.611 +{ 1.612 + return replace(ins, MConcatPar::New(alloc(), ForkJoinContext(), ins)); 1.613 +} 1.614 + 1.615 +bool 1.616 +ParallelSafetyVisitor::visitToString(MToString *ins) 1.617 +{ 1.618 + MIRType inputType = ins->input()->type(); 1.619 + if (inputType != MIRType_Int32 && inputType != MIRType_Double) 1.620 + return markUnsafe(); 1.621 + return true; 1.622 +} 1.623 + 1.624 +bool 1.625 +ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction, 1.626 + JSObject *templateObject) 1.627 +{ 1.628 + replace(newInstruction, MNewPar::New(alloc(), ForkJoinContext(), templateObject)); 1.629 + return true; 1.630 +} 1.631 + 1.632 +bool 1.633 +ParallelSafetyVisitor::replace(MInstruction *oldInstruction, 1.634 + MInstruction *replacementInstruction) 1.635 +{ 1.636 + MBasicBlock *block = oldInstruction->block(); 1.637 + block->insertBefore(oldInstruction, replacementInstruction); 1.638 + oldInstruction->replaceAllUsesWith(replacementInstruction); 1.639 + block->discard(oldInstruction); 1.640 + return true; 1.641 +} 1.642 + 1.643 +///////////////////////////////////////////////////////////////////////////// 1.644 +// Write Guards 1.645 +// 1.646 +// We only want to permit writes to locally guarded objects. 1.647 +// Furthermore, we want to avoid PICs and other non-thread-safe things 1.648 +// (though perhaps we should support PICs at some point). If we 1.649 +// cannot determine the origin of an object, we can insert a write 1.650 +// guard which will check whether the object was allocated from the 1.651 +// per-thread-arena or not. 1.652 + 1.653 +bool 1.654 +ParallelSafetyVisitor::insertWriteGuard(MInstruction *writeInstruction, 1.655 + MDefinition *valueBeingWritten) 1.656 +{ 1.657 + // Many of the write operations do not take the JS object 1.658 + // but rather something derived from it, such as the elements. 1.659 + // So we need to identify the JS object: 1.660 + MDefinition *object; 1.661 + switch (valueBeingWritten->type()) { 1.662 + case MIRType_Object: 1.663 + object = valueBeingWritten; 1.664 + break; 1.665 + 1.666 + case MIRType_Slots: 1.667 + switch (valueBeingWritten->op()) { 1.668 + case MDefinition::Op_Slots: 1.669 + object = valueBeingWritten->toSlots()->object(); 1.670 + break; 1.671 + 1.672 + case MDefinition::Op_NewSlots: 1.673 + // Values produced by new slots will ALWAYS be 1.674 + // thread-local. 1.675 + return true; 1.676 + 1.677 + default: 1.678 + SpewMIR(writeInstruction, "cannot insert write guard for %s", 1.679 + valueBeingWritten->opName()); 1.680 + return markUnsafe(); 1.681 + } 1.682 + break; 1.683 + 1.684 + case MIRType_Elements: 1.685 + switch (valueBeingWritten->op()) { 1.686 + case MDefinition::Op_Elements: 1.687 + object = valueBeingWritten->toElements()->object(); 1.688 + break; 1.689 + 1.690 + case MDefinition::Op_TypedArrayElements: 1.691 + object = valueBeingWritten->toTypedArrayElements()->object(); 1.692 + break; 1.693 + 1.694 + case MDefinition::Op_TypedObjectElements: 1.695 + object = valueBeingWritten->toTypedObjectElements()->object(); 1.696 + break; 1.697 + 1.698 + default: 1.699 + SpewMIR(writeInstruction, "cannot insert write guard for %s", 1.700 + valueBeingWritten->opName()); 1.701 + return markUnsafe(); 1.702 + } 1.703 + break; 1.704 + 1.705 + default: 1.706 + SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d", 1.707 + valueBeingWritten->type()); 1.708 + return markUnsafe(); 1.709 + } 1.710 + 1.711 + if (object->isUnbox()) 1.712 + object = object->toUnbox()->input(); 1.713 + 1.714 + switch (object->op()) { 1.715 + case MDefinition::Op_NewPar: 1.716 + // MNewPar will always be creating something thread-local, omit the guard 1.717 + SpewMIR(writeInstruction, "write to NewPar prop does not require guard"); 1.718 + return true; 1.719 + default: 1.720 + break; 1.721 + } 1.722 + 1.723 + MBasicBlock *block = writeInstruction->block(); 1.724 + MGuardThreadExclusive *writeGuard = 1.725 + MGuardThreadExclusive::New(alloc(), ForkJoinContext(), object); 1.726 + block->insertBefore(writeInstruction, writeGuard); 1.727 + writeGuard->adjustInputs(alloc(), writeGuard); 1.728 + return true; 1.729 +} 1.730 + 1.731 +///////////////////////////////////////////////////////////////////////////// 1.732 +// Calls 1.733 +// 1.734 +// We only support calls to interpreted functions that that have already been 1.735 +// Ion compiled. If a function has no IonScript, we bail out. 1.736 + 1.737 +bool 1.738 +ParallelSafetyVisitor::visitCall(MCall *ins) 1.739 +{ 1.740 + // DOM? Scary. 1.741 + if (ins->isCallDOMNative()) { 1.742 + SpewMIR(ins, "call to dom function"); 1.743 + return markUnsafe(); 1.744 + } 1.745 + 1.746 + JSFunction *target = ins->getSingleTarget(); 1.747 + if (target) { 1.748 + // Non-parallel native? Scary 1.749 + if (target->isNative() && !target->hasParallelNative()) { 1.750 + SpewMIR(ins, "call to non-parallel native function"); 1.751 + return markUnsafe(); 1.752 + } 1.753 + return true; 1.754 + } 1.755 + 1.756 + if (ins->isConstructing()) { 1.757 + SpewMIR(ins, "call to unknown constructor"); 1.758 + return markUnsafe(); 1.759 + } 1.760 + 1.761 + return true; 1.762 +} 1.763 + 1.764 +///////////////////////////////////////////////////////////////////////////// 1.765 +// Stack limit, interrupts 1.766 +// 1.767 +// In sequential Ion code, the stack limit is stored in the JSRuntime. 1.768 +// We store it in the thread context. We therefore need a separate 1.769 +// instruction to access it, one parameterized by the thread context. 1.770 +// Similar considerations apply to checking for interrupts. 1.771 + 1.772 +bool 1.773 +ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins) 1.774 +{ 1.775 + return replace(ins, MCheckOverRecursedPar::New(alloc(), ForkJoinContext())); 1.776 +} 1.777 + 1.778 +bool 1.779 +ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins) 1.780 +{ 1.781 + return replace(ins, MInterruptCheckPar::New(alloc(), ForkJoinContext())); 1.782 +} 1.783 + 1.784 +///////////////////////////////////////////////////////////////////////////// 1.785 +// Specialized ops 1.786 +// 1.787 +// Some ops, like +, can be specialized to ints/doubles. Anything 1.788 +// else is terrifying. 1.789 +// 1.790 +// TODO---Eventually, we should probably permit arbitrary + but bail 1.791 +// if the operands are not both integers/floats. 1.792 + 1.793 +bool 1.794 +ParallelSafetyVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec, 1.795 + uint32_t flags) 1.796 +{ 1.797 + uint32_t flag = 1 << spec; 1.798 + if (flags & flag) 1.799 + return true; 1.800 + 1.801 + SpewMIR(ins, "specialized to unacceptable type %d", spec); 1.802 + return markUnsafe(); 1.803 +} 1.804 + 1.805 +///////////////////////////////////////////////////////////////////////////// 1.806 +// Throw 1.807 + 1.808 +bool 1.809 +ParallelSafetyVisitor::visitThrow(MThrow *thr) 1.810 +{ 1.811 + MBasicBlock *block = thr->block(); 1.812 + JS_ASSERT(block->lastIns() == thr); 1.813 + block->discardLastIns(); 1.814 + MAbortPar *bailout = MAbortPar::New(alloc()); 1.815 + if (!bailout) 1.816 + return false; 1.817 + block->end(bailout); 1.818 + return true; 1.819 +} 1.820 + 1.821 +/////////////////////////////////////////////////////////////////////////// 1.822 +// Callee extraction 1.823 +// 1.824 +// See comments in header file. 1.825 + 1.826 +static bool 1.827 +GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc, 1.828 + types::TemporaryTypeSet *calleeTypes, CallTargetVector &targets); 1.829 + 1.830 +static bool 1.831 +AddCallTarget(HandleScript script, CallTargetVector &targets); 1.832 + 1.833 +bool 1.834 +jit::AddPossibleCallees(JSContext *cx, MIRGraph &graph, CallTargetVector &targets) 1.835 +{ 1.836 + for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { 1.837 + for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) 1.838 + { 1.839 + if (!ins->isCall()) 1.840 + continue; 1.841 + 1.842 + MCall *callIns = ins->toCall(); 1.843 + 1.844 + RootedFunction target(cx, callIns->getSingleTarget()); 1.845 + if (target) { 1.846 + JS_ASSERT_IF(!target->isInterpreted(), target->hasParallelNative()); 1.847 + 1.848 + if (target->isInterpreted()) { 1.849 + RootedScript script(cx, target->getOrCreateScript(cx)); 1.850 + if (!script || !AddCallTarget(script, targets)) 1.851 + return false; 1.852 + } 1.853 + 1.854 + continue; 1.855 + } 1.856 + 1.857 + types::TemporaryTypeSet *calleeTypes = callIns->getFunction()->resultTypeSet(); 1.858 + RootedScript script(cx, callIns->block()->info().script()); 1.859 + if (!GetPossibleCallees(cx, 1.860 + script, 1.861 + callIns->resumePoint()->pc(), 1.862 + calleeTypes, 1.863 + targets)) 1.864 + return false; 1.865 + } 1.866 + } 1.867 + 1.868 + return true; 1.869 +} 1.870 + 1.871 +static bool 1.872 +GetPossibleCallees(JSContext *cx, 1.873 + HandleScript script, 1.874 + jsbytecode *pc, 1.875 + types::TemporaryTypeSet *calleeTypes, 1.876 + CallTargetVector &targets) 1.877 +{ 1.878 + if (!calleeTypes || calleeTypes->baseFlags() != 0) 1.879 + return true; 1.880 + 1.881 + unsigned objCount = calleeTypes->getObjectCount(); 1.882 + 1.883 + if (objCount == 0) 1.884 + return true; 1.885 + 1.886 + RootedFunction rootedFun(cx); 1.887 + RootedScript rootedScript(cx); 1.888 + for (unsigned i = 0; i < objCount; i++) { 1.889 + JSObject *obj = calleeTypes->getSingleObject(i); 1.890 + if (obj && obj->is<JSFunction>()) { 1.891 + rootedFun = &obj->as<JSFunction>(); 1.892 + } else { 1.893 + types::TypeObject *typeObj = calleeTypes->getTypeObject(i); 1.894 + if (!typeObj) 1.895 + continue; 1.896 + rootedFun = typeObj->interpretedFunction; 1.897 + if (!rootedFun) 1.898 + continue; 1.899 + } 1.900 + 1.901 + if (!rootedFun->isInterpreted()) 1.902 + continue; 1.903 + 1.904 + rootedScript = rootedFun->getOrCreateScript(cx); 1.905 + if (!rootedScript) 1.906 + return false; 1.907 + 1.908 + if (rootedScript->shouldCloneAtCallsite()) { 1.909 + rootedFun = CloneFunctionAtCallsite(cx, rootedFun, script, pc); 1.910 + if (!rootedFun) 1.911 + return false; 1.912 + rootedScript = rootedFun->nonLazyScript(); 1.913 + } 1.914 + 1.915 + // check if this call target is already known 1.916 + if (!AddCallTarget(rootedScript, targets)) 1.917 + return false; 1.918 + } 1.919 + 1.920 + return true; 1.921 +} 1.922 + 1.923 +static bool 1.924 +AddCallTarget(HandleScript script, CallTargetVector &targets) 1.925 +{ 1.926 + for (size_t i = 0; i < targets.length(); i++) { 1.927 + if (targets[i] == script) 1.928 + return true; 1.929 + } 1.930 + 1.931 + if (!targets.append(script)) 1.932 + return false; 1.933 + 1.934 + return true; 1.935 +}