Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/ParallelSafetyAnalysis.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "jit/Ion.h" |
michael@0 | 10 | #include "jit/IonAnalysis.h" |
michael@0 | 11 | #include "jit/IonSpewer.h" |
michael@0 | 12 | #include "jit/MIR.h" |
michael@0 | 13 | #include "jit/MIRGenerator.h" |
michael@0 | 14 | #include "jit/MIRGraph.h" |
michael@0 | 15 | #include "jit/UnreachableCodeElimination.h" |
michael@0 | 16 | |
michael@0 | 17 | #include "jsinferinlines.h" |
michael@0 | 18 | #include "jsobjinlines.h" |
michael@0 | 19 | |
michael@0 | 20 | using namespace js; |
michael@0 | 21 | using namespace jit; |
michael@0 | 22 | |
michael@0 | 23 | using parallel::Spew; |
michael@0 | 24 | using parallel::SpewMIR; |
michael@0 | 25 | using parallel::SpewCompile; |
michael@0 | 26 | |
michael@0 | 27 | #define SAFE_OP(op) \ |
michael@0 | 28 | virtual bool visit##op(M##op *prop) { return true; } |
michael@0 | 29 | |
michael@0 | 30 | #define CUSTOM_OP(op) \ |
michael@0 | 31 | virtual bool visit##op(M##op *prop); |
michael@0 | 32 | |
michael@0 | 33 | #define DROP_OP(op) \ |
michael@0 | 34 | virtual bool visit##op(M##op *ins) { \ |
michael@0 | 35 | MBasicBlock *block = ins->block(); \ |
michael@0 | 36 | block->discard(ins); \ |
michael@0 | 37 | return true; \ |
michael@0 | 38 | } |
michael@0 | 39 | |
michael@0 | 40 | #define PERMIT(T) (1 << T) |
michael@0 | 41 | |
michael@0 | 42 | #define PERMIT_INT32 (PERMIT(MIRType_Int32)) |
michael@0 | 43 | #define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double)) |
michael@0 | 44 | |
michael@0 | 45 | #define SPECIALIZED_OP(op, flags) \ |
michael@0 | 46 | virtual bool visit##op(M##op *ins) { \ |
michael@0 | 47 | return visitSpecializedInstruction(ins, ins->specialization(), flags); \ |
michael@0 | 48 | } |
michael@0 | 49 | |
michael@0 | 50 | #define UNSAFE_OP(op) \ |
michael@0 | 51 | virtual bool visit##op(M##op *ins) { \ |
michael@0 | 52 | SpewMIR(ins, "Unsafe"); \ |
michael@0 | 53 | return markUnsafe(); \ |
michael@0 | 54 | } |
michael@0 | 55 | |
michael@0 | 56 | #define WRITE_GUARDED_OP(op, obj) \ |
michael@0 | 57 | virtual bool visit##op(M##op *prop) { \ |
michael@0 | 58 | return insertWriteGuard(prop, prop->obj()); \ |
michael@0 | 59 | } |
michael@0 | 60 | |
michael@0 | 61 | #define MAYBE_WRITE_GUARDED_OP(op, obj) \ |
michael@0 | 62 | virtual bool visit##op(M##op *prop) { \ |
michael@0 | 63 | if (prop->racy()) \ |
michael@0 | 64 | return true; \ |
michael@0 | 65 | return insertWriteGuard(prop, prop->obj()); \ |
michael@0 | 66 | } |
michael@0 | 67 | |
michael@0 | 68 | class ParallelSafetyVisitor : public MInstructionVisitor |
michael@0 | 69 | { |
michael@0 | 70 | MIRGraph &graph_; |
michael@0 | 71 | bool unsafe_; |
michael@0 | 72 | MDefinition *cx_; |
michael@0 | 73 | |
michael@0 | 74 | bool insertWriteGuard(MInstruction *writeInstruction, MDefinition *valueBeingWritten); |
michael@0 | 75 | |
michael@0 | 76 | bool replaceWithNewPar(MInstruction *newInstruction, JSObject *templateObject); |
michael@0 | 77 | bool replace(MInstruction *oldInstruction, MInstruction *replacementInstruction); |
michael@0 | 78 | |
michael@0 | 79 | bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags); |
michael@0 | 80 | |
michael@0 | 81 | // Intended for use in a visitXyz() instruction like "return |
michael@0 | 82 | // markUnsafe()". Sets the unsafe flag and returns true (since |
michael@0 | 83 | // this does not indicate an unrecoverable compilation failure). |
michael@0 | 84 | bool markUnsafe() { |
michael@0 | 85 | JS_ASSERT(!unsafe_); |
michael@0 | 86 | unsafe_ = true; |
michael@0 | 87 | return true; |
michael@0 | 88 | } |
michael@0 | 89 | |
michael@0 | 90 | TempAllocator &alloc() const { |
michael@0 | 91 | return graph_.alloc(); |
michael@0 | 92 | } |
michael@0 | 93 | |
michael@0 | 94 | public: |
michael@0 | 95 | ParallelSafetyVisitor(MIRGraph &graph) |
michael@0 | 96 | : graph_(graph), |
michael@0 | 97 | unsafe_(false), |
michael@0 | 98 | cx_(nullptr) |
michael@0 | 99 | { } |
michael@0 | 100 | |
michael@0 | 101 | void clearUnsafe() { unsafe_ = false; } |
michael@0 | 102 | bool unsafe() { return unsafe_; } |
michael@0 | 103 | MDefinition *ForkJoinContext() { |
michael@0 | 104 | if (!cx_) |
michael@0 | 105 | cx_ = graph_.forkJoinContext(); |
michael@0 | 106 | return cx_; |
michael@0 | 107 | } |
michael@0 | 108 | |
michael@0 | 109 | bool convertToBailout(MBasicBlock *block, MInstruction *ins); |
michael@0 | 110 | |
michael@0 | 111 | // I am taking the policy of blacklisting everything that's not |
michael@0 | 112 | // obviously safe for now. We can loosen as we need. |
michael@0 | 113 | |
michael@0 | 114 | SAFE_OP(Constant) |
michael@0 | 115 | UNSAFE_OP(CloneLiteral) |
michael@0 | 116 | SAFE_OP(Parameter) |
michael@0 | 117 | SAFE_OP(Callee) |
michael@0 | 118 | SAFE_OP(TableSwitch) |
michael@0 | 119 | SAFE_OP(Goto) |
michael@0 | 120 | SAFE_OP(Test) |
michael@0 | 121 | SAFE_OP(Compare) |
michael@0 | 122 | SAFE_OP(Phi) |
michael@0 | 123 | SAFE_OP(Beta) |
michael@0 | 124 | UNSAFE_OP(OsrValue) |
michael@0 | 125 | UNSAFE_OP(OsrScopeChain) |
michael@0 | 126 | UNSAFE_OP(OsrReturnValue) |
michael@0 | 127 | UNSAFE_OP(OsrArgumentsObject) |
michael@0 | 128 | UNSAFE_OP(ReturnFromCtor) |
michael@0 | 129 | CUSTOM_OP(CheckOverRecursed) |
michael@0 | 130 | UNSAFE_OP(DefVar) |
michael@0 | 131 | UNSAFE_OP(DefFun) |
michael@0 | 132 | UNSAFE_OP(CreateThis) |
michael@0 | 133 | CUSTOM_OP(CreateThisWithTemplate) |
michael@0 | 134 | UNSAFE_OP(CreateThisWithProto) |
michael@0 | 135 | UNSAFE_OP(CreateArgumentsObject) |
michael@0 | 136 | UNSAFE_OP(GetArgumentsObjectArg) |
michael@0 | 137 | UNSAFE_OP(SetArgumentsObjectArg) |
michael@0 | 138 | UNSAFE_OP(ComputeThis) |
michael@0 | 139 | UNSAFE_OP(LoadArrowThis) |
michael@0 | 140 | CUSTOM_OP(Call) |
michael@0 | 141 | UNSAFE_OP(ApplyArgs) |
michael@0 | 142 | UNSAFE_OP(ArraySplice) |
michael@0 | 143 | UNSAFE_OP(Bail) |
michael@0 | 144 | UNSAFE_OP(AssertFloat32) |
michael@0 | 145 | UNSAFE_OP(GetDynamicName) |
michael@0 | 146 | UNSAFE_OP(FilterArgumentsOrEval) |
michael@0 | 147 | UNSAFE_OP(CallDirectEval) |
michael@0 | 148 | SAFE_OP(BitNot) |
michael@0 | 149 | SAFE_OP(TypeOf) |
michael@0 | 150 | UNSAFE_OP(ToId) |
michael@0 | 151 | SAFE_OP(BitAnd) |
michael@0 | 152 | SAFE_OP(BitOr) |
michael@0 | 153 | SAFE_OP(BitXor) |
michael@0 | 154 | SAFE_OP(Lsh) |
michael@0 | 155 | SAFE_OP(Rsh) |
michael@0 | 156 | SAFE_OP(Ursh) |
michael@0 | 157 | SPECIALIZED_OP(MinMax, PERMIT_NUMERIC) |
michael@0 | 158 | SAFE_OP(Abs) |
michael@0 | 159 | SAFE_OP(Sqrt) |
michael@0 | 160 | UNSAFE_OP(Atan2) |
michael@0 | 161 | UNSAFE_OP(Hypot) |
michael@0 | 162 | CUSTOM_OP(MathFunction) |
michael@0 | 163 | SPECIALIZED_OP(Add, PERMIT_NUMERIC) |
michael@0 | 164 | SPECIALIZED_OP(Sub, PERMIT_NUMERIC) |
michael@0 | 165 | SPECIALIZED_OP(Mul, PERMIT_NUMERIC) |
michael@0 | 166 | SPECIALIZED_OP(Div, PERMIT_NUMERIC) |
michael@0 | 167 | SPECIALIZED_OP(Mod, PERMIT_NUMERIC) |
michael@0 | 168 | CUSTOM_OP(Concat) |
michael@0 | 169 | SAFE_OP(ConcatPar) |
michael@0 | 170 | UNSAFE_OP(CharCodeAt) |
michael@0 | 171 | UNSAFE_OP(FromCharCode) |
michael@0 | 172 | UNSAFE_OP(StringSplit) |
michael@0 | 173 | SAFE_OP(Return) |
michael@0 | 174 | CUSTOM_OP(Throw) |
michael@0 | 175 | SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc. |
michael@0 | 176 | SAFE_OP(Unbox) |
michael@0 | 177 | SAFE_OP(GuardObject) |
michael@0 | 178 | SAFE_OP(ToDouble) |
michael@0 | 179 | SAFE_OP(ToFloat32) |
michael@0 | 180 | SAFE_OP(ToInt32) |
michael@0 | 181 | SAFE_OP(TruncateToInt32) |
michael@0 | 182 | SAFE_OP(MaybeToDoubleElement) |
michael@0 | 183 | CUSTOM_OP(ToString) |
michael@0 | 184 | SAFE_OP(NewSlots) |
michael@0 | 185 | CUSTOM_OP(NewArray) |
michael@0 | 186 | CUSTOM_OP(NewObject) |
michael@0 | 187 | CUSTOM_OP(NewCallObject) |
michael@0 | 188 | CUSTOM_OP(NewRunOnceCallObject) |
michael@0 | 189 | CUSTOM_OP(NewDerivedTypedObject) |
michael@0 | 190 | UNSAFE_OP(InitElem) |
michael@0 | 191 | UNSAFE_OP(InitElemGetterSetter) |
michael@0 | 192 | UNSAFE_OP(MutateProto) |
michael@0 | 193 | UNSAFE_OP(InitProp) |
michael@0 | 194 | UNSAFE_OP(InitPropGetterSetter) |
michael@0 | 195 | SAFE_OP(Start) |
michael@0 | 196 | UNSAFE_OP(OsrEntry) |
michael@0 | 197 | SAFE_OP(Nop) |
michael@0 | 198 | UNSAFE_OP(RegExp) |
michael@0 | 199 | CUSTOM_OP(Lambda) |
michael@0 | 200 | UNSAFE_OP(LambdaArrow) |
michael@0 | 201 | UNSAFE_OP(ImplicitThis) |
michael@0 | 202 | SAFE_OP(Slots) |
michael@0 | 203 | SAFE_OP(Elements) |
michael@0 | 204 | SAFE_OP(ConstantElements) |
michael@0 | 205 | SAFE_OP(LoadSlot) |
michael@0 | 206 | WRITE_GUARDED_OP(StoreSlot, slots) |
michael@0 | 207 | SAFE_OP(FunctionEnvironment) // just a load of func env ptr |
michael@0 | 208 | SAFE_OP(FilterTypeSet) |
michael@0 | 209 | SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us |
michael@0 | 210 | SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us |
michael@0 | 211 | UNSAFE_OP(PostWriteBarrier) |
michael@0 | 212 | SAFE_OP(GetPropertyCache) |
michael@0 | 213 | SAFE_OP(GetPropertyPolymorphic) |
michael@0 | 214 | UNSAFE_OP(SetPropertyPolymorphic) |
michael@0 | 215 | SAFE_OP(GetElementCache) |
michael@0 | 216 | WRITE_GUARDED_OP(SetElementCache, object) |
michael@0 | 217 | UNSAFE_OP(BindNameCache) |
michael@0 | 218 | SAFE_OP(GuardShape) |
michael@0 | 219 | SAFE_OP(GuardObjectType) |
michael@0 | 220 | SAFE_OP(GuardObjectIdentity) |
michael@0 | 221 | SAFE_OP(GuardClass) |
michael@0 | 222 | SAFE_OP(AssertRange) |
michael@0 | 223 | SAFE_OP(ArrayLength) |
michael@0 | 224 | WRITE_GUARDED_OP(SetArrayLength, elements) |
michael@0 | 225 | SAFE_OP(TypedArrayLength) |
michael@0 | 226 | SAFE_OP(TypedArrayElements) |
michael@0 | 227 | SAFE_OP(TypedObjectElements) |
michael@0 | 228 | SAFE_OP(SetTypedObjectOffset) |
michael@0 | 229 | SAFE_OP(InitializedLength) |
michael@0 | 230 | WRITE_GUARDED_OP(SetInitializedLength, elements) |
michael@0 | 231 | SAFE_OP(Not) |
michael@0 | 232 | SAFE_OP(NeuterCheck) |
michael@0 | 233 | SAFE_OP(BoundsCheck) |
michael@0 | 234 | SAFE_OP(BoundsCheckLower) |
michael@0 | 235 | SAFE_OP(LoadElement) |
michael@0 | 236 | SAFE_OP(LoadElementHole) |
michael@0 | 237 | MAYBE_WRITE_GUARDED_OP(StoreElement, elements) |
michael@0 | 238 | WRITE_GUARDED_OP(StoreElementHole, elements) |
michael@0 | 239 | UNSAFE_OP(ArrayPopShift) |
michael@0 | 240 | UNSAFE_OP(ArrayPush) |
michael@0 | 241 | SAFE_OP(LoadTypedArrayElement) |
michael@0 | 242 | SAFE_OP(LoadTypedArrayElementHole) |
michael@0 | 243 | SAFE_OP(LoadTypedArrayElementStatic) |
michael@0 | 244 | MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements) |
michael@0 | 245 | WRITE_GUARDED_OP(StoreTypedArrayElementHole, elements) |
michael@0 | 246 | UNSAFE_OP(StoreTypedArrayElementStatic) |
michael@0 | 247 | UNSAFE_OP(ClampToUint8) |
michael@0 | 248 | SAFE_OP(LoadFixedSlot) |
michael@0 | 249 | WRITE_GUARDED_OP(StoreFixedSlot, object) |
michael@0 | 250 | UNSAFE_OP(CallGetProperty) |
michael@0 | 251 | UNSAFE_OP(GetNameCache) |
michael@0 | 252 | UNSAFE_OP(CallGetIntrinsicValue) |
michael@0 | 253 | UNSAFE_OP(CallsiteCloneCache) |
michael@0 | 254 | UNSAFE_OP(CallGetElement) |
michael@0 | 255 | WRITE_GUARDED_OP(CallSetElement, object) |
michael@0 | 256 | UNSAFE_OP(CallInitElementArray) |
michael@0 | 257 | WRITE_GUARDED_OP(CallSetProperty, object) |
michael@0 | 258 | UNSAFE_OP(DeleteProperty) |
michael@0 | 259 | UNSAFE_OP(DeleteElement) |
michael@0 | 260 | WRITE_GUARDED_OP(SetPropertyCache, object) |
michael@0 | 261 | UNSAFE_OP(IteratorStart) |
michael@0 | 262 | UNSAFE_OP(IteratorNext) |
michael@0 | 263 | UNSAFE_OP(IteratorMore) |
michael@0 | 264 | UNSAFE_OP(IteratorEnd) |
michael@0 | 265 | SAFE_OP(StringLength) |
michael@0 | 266 | SAFE_OP(ArgumentsLength) |
michael@0 | 267 | SAFE_OP(GetFrameArgument) |
michael@0 | 268 | UNSAFE_OP(SetFrameArgument) |
michael@0 | 269 | UNSAFE_OP(RunOncePrologue) |
michael@0 | 270 | CUSTOM_OP(Rest) |
michael@0 | 271 | SAFE_OP(RestPar) |
michael@0 | 272 | SAFE_OP(Floor) |
michael@0 | 273 | SAFE_OP(Round) |
michael@0 | 274 | UNSAFE_OP(InstanceOf) |
michael@0 | 275 | CUSTOM_OP(InterruptCheck) |
michael@0 | 276 | SAFE_OP(ForkJoinContext) |
michael@0 | 277 | SAFE_OP(ForkJoinGetSlice) |
michael@0 | 278 | SAFE_OP(NewPar) |
michael@0 | 279 | SAFE_OP(NewDenseArrayPar) |
michael@0 | 280 | SAFE_OP(NewCallObjectPar) |
michael@0 | 281 | SAFE_OP(LambdaPar) |
michael@0 | 282 | SAFE_OP(AbortPar) |
michael@0 | 283 | UNSAFE_OP(ArrayConcat) |
michael@0 | 284 | UNSAFE_OP(GetDOMProperty) |
michael@0 | 285 | UNSAFE_OP(GetDOMMember) |
michael@0 | 286 | UNSAFE_OP(SetDOMProperty) |
michael@0 | 287 | UNSAFE_OP(NewStringObject) |
michael@0 | 288 | UNSAFE_OP(Random) |
michael@0 | 289 | SAFE_OP(Pow) |
michael@0 | 290 | SAFE_OP(PowHalf) |
michael@0 | 291 | UNSAFE_OP(RegExpTest) |
michael@0 | 292 | UNSAFE_OP(RegExpExec) |
michael@0 | 293 | UNSAFE_OP(RegExpReplace) |
michael@0 | 294 | UNSAFE_OP(StringReplace) |
michael@0 | 295 | UNSAFE_OP(CallInstanceOf) |
michael@0 | 296 | UNSAFE_OP(ProfilerStackOp) |
michael@0 | 297 | UNSAFE_OP(GuardString) |
michael@0 | 298 | UNSAFE_OP(NewDeclEnvObject) |
michael@0 | 299 | UNSAFE_OP(In) |
michael@0 | 300 | UNSAFE_OP(InArray) |
michael@0 | 301 | SAFE_OP(GuardThreadExclusive) |
michael@0 | 302 | SAFE_OP(InterruptCheckPar) |
michael@0 | 303 | SAFE_OP(CheckOverRecursedPar) |
michael@0 | 304 | SAFE_OP(FunctionDispatch) |
michael@0 | 305 | SAFE_OP(TypeObjectDispatch) |
michael@0 | 306 | SAFE_OP(IsCallable) |
michael@0 | 307 | SAFE_OP(HaveSameClass) |
michael@0 | 308 | SAFE_OP(HasClass) |
michael@0 | 309 | UNSAFE_OP(EffectiveAddress) |
michael@0 | 310 | UNSAFE_OP(AsmJSUnsignedToDouble) |
michael@0 | 311 | UNSAFE_OP(AsmJSUnsignedToFloat32) |
michael@0 | 312 | UNSAFE_OP(AsmJSNeg) |
michael@0 | 313 | UNSAFE_OP(AsmJSLoadHeap) |
michael@0 | 314 | UNSAFE_OP(AsmJSStoreHeap) |
michael@0 | 315 | UNSAFE_OP(AsmJSLoadGlobalVar) |
michael@0 | 316 | UNSAFE_OP(AsmJSStoreGlobalVar) |
michael@0 | 317 | UNSAFE_OP(AsmJSLoadFuncPtr) |
michael@0 | 318 | UNSAFE_OP(AsmJSLoadFFIFunc) |
michael@0 | 319 | UNSAFE_OP(AsmJSReturn) |
michael@0 | 320 | UNSAFE_OP(AsmJSVoidReturn) |
michael@0 | 321 | UNSAFE_OP(AsmJSPassStackArg) |
michael@0 | 322 | UNSAFE_OP(AsmJSParameter) |
michael@0 | 323 | UNSAFE_OP(AsmJSCall) |
michael@0 | 324 | DROP_OP(RecompileCheck) |
michael@0 | 325 | |
michael@0 | 326 | // It looks like this could easily be made safe: |
michael@0 | 327 | UNSAFE_OP(ConvertElementsToDoubles) |
michael@0 | 328 | }; |
michael@0 | 329 | |
michael@0 | 330 | bool |
michael@0 | 331 | ParallelSafetyAnalysis::analyze() |
michael@0 | 332 | { |
michael@0 | 333 | // Walk the basic blocks in a DFS. When we encounter a block with an |
michael@0 | 334 | // unsafe instruction, then we know that this block will bailout when |
michael@0 | 335 | // executed. Therefore, we replace the block. |
michael@0 | 336 | // |
michael@0 | 337 | // We don't need a worklist, though, because the graph is sorted |
michael@0 | 338 | // in RPO. Therefore, we just use the marked flags to tell us |
michael@0 | 339 | // when we visited some predecessor of the current block. |
michael@0 | 340 | ParallelSafetyVisitor visitor(graph_); |
michael@0 | 341 | graph_.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR. |
michael@0 | 342 | uint32_t marked = 0; |
michael@0 | 343 | for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { |
michael@0 | 344 | if (mir_->shouldCancel("ParallelSafetyAnalysis")) |
michael@0 | 345 | return false; |
michael@0 | 346 | |
michael@0 | 347 | if (block->isMarked()) { |
michael@0 | 348 | // Iterate through and transform the instructions. Stop |
michael@0 | 349 | // if we encounter an inherently unsafe operation, in |
michael@0 | 350 | // which case we will transform this block into a bailout |
michael@0 | 351 | // block. |
michael@0 | 352 | MInstruction *instr = nullptr; |
michael@0 | 353 | for (MInstructionIterator ins(block->begin()); |
michael@0 | 354 | ins != block->end() && !visitor.unsafe();) |
michael@0 | 355 | { |
michael@0 | 356 | if (mir_->shouldCancel("ParallelSafetyAnalysis")) |
michael@0 | 357 | return false; |
michael@0 | 358 | |
michael@0 | 359 | // We may be removing or replacing the current |
michael@0 | 360 | // instruction, so advance `ins` now. Remember the |
michael@0 | 361 | // last instr. we looked at for use later if it should |
michael@0 | 362 | // prove unsafe. |
michael@0 | 363 | instr = *ins++; |
michael@0 | 364 | |
michael@0 | 365 | if (!instr->accept(&visitor)) { |
michael@0 | 366 | SpewMIR(instr, "Unaccepted"); |
michael@0 | 367 | return false; |
michael@0 | 368 | } |
michael@0 | 369 | } |
michael@0 | 370 | |
michael@0 | 371 | if (!visitor.unsafe()) { |
michael@0 | 372 | // Count the number of reachable blocks. |
michael@0 | 373 | marked++; |
michael@0 | 374 | |
michael@0 | 375 | // Block consists of only safe instructions. Visit its successors. |
michael@0 | 376 | for (uint32_t i = 0; i < block->numSuccessors(); i++) |
michael@0 | 377 | block->getSuccessor(i)->mark(); |
michael@0 | 378 | } else { |
michael@0 | 379 | // Block contains an unsafe instruction. That means that once |
michael@0 | 380 | // we enter this block, we are guaranteed to bailout. |
michael@0 | 381 | |
michael@0 | 382 | // If this is the entry block, then there is no point |
michael@0 | 383 | // in even trying to execute this function as it will |
michael@0 | 384 | // always bailout. |
michael@0 | 385 | if (*block == graph_.entryBlock()) { |
michael@0 | 386 | Spew(SpewCompile, "Entry block contains unsafe MIR"); |
michael@0 | 387 | return false; |
michael@0 | 388 | } |
michael@0 | 389 | |
michael@0 | 390 | // Otherwise, create a replacement that will. |
michael@0 | 391 | if (!visitor.convertToBailout(*block, instr)) |
michael@0 | 392 | return false; |
michael@0 | 393 | |
michael@0 | 394 | JS_ASSERT(!block->isMarked()); |
michael@0 | 395 | } |
michael@0 | 396 | } |
michael@0 | 397 | } |
michael@0 | 398 | |
michael@0 | 399 | Spew(SpewCompile, "Safe"); |
michael@0 | 400 | IonSpewPass("ParallelSafetyAnalysis"); |
michael@0 | 401 | |
michael@0 | 402 | UnreachableCodeElimination uce(mir_, graph_); |
michael@0 | 403 | if (!uce.removeUnmarkedBlocks(marked)) |
michael@0 | 404 | return false; |
michael@0 | 405 | IonSpewPass("UCEAfterParallelSafetyAnalysis"); |
michael@0 | 406 | AssertExtendedGraphCoherency(graph_); |
michael@0 | 407 | |
michael@0 | 408 | if (!removeResumePointOperands()) |
michael@0 | 409 | return false; |
michael@0 | 410 | IonSpewPass("RemoveResumePointOperands"); |
michael@0 | 411 | AssertExtendedGraphCoherency(graph_); |
michael@0 | 412 | |
michael@0 | 413 | return true; |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | bool |
michael@0 | 417 | ParallelSafetyAnalysis::removeResumePointOperands() |
michael@0 | 418 | { |
michael@0 | 419 | // In parallel exec mode, nothing is effectful, therefore we do |
michael@0 | 420 | // not need to reconstruct interpreter state and can simply |
michael@0 | 421 | // bailout by returning a special code. Ideally we'd either |
michael@0 | 422 | // remove the unused resume points or else never generate them in |
michael@0 | 423 | // the first place, but I encountered various assertions and |
michael@0 | 424 | // crashes attempting to do that, so for the time being I simply |
michael@0 | 425 | // replace their operands with undefined. This prevents them from |
michael@0 | 426 | // interfering with DCE and other optimizations. It is also *necessary* |
michael@0 | 427 | // to handle cases like this: |
michael@0 | 428 | // |
michael@0 | 429 | // foo(a, b, c.bar()) |
michael@0 | 430 | // |
michael@0 | 431 | // where `foo` was deemed to be an unsafe function to call. This |
michael@0 | 432 | // is because without neutering the ResumePoints, they would still |
michael@0 | 433 | // refer to the MPassArg nodes generated for the call to foo(). |
michael@0 | 434 | // But the call to foo() is dead and has been removed, leading to |
michael@0 | 435 | // an inconsistent IR and assertions at codegen time. |
michael@0 | 436 | |
michael@0 | 437 | MConstant *udef = nullptr; |
michael@0 | 438 | for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { |
michael@0 | 439 | if (udef) |
michael@0 | 440 | replaceOperandsOnResumePoint(block->entryResumePoint(), udef); |
michael@0 | 441 | |
michael@0 | 442 | for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) { |
michael@0 | 443 | if (ins->isStart()) { |
michael@0 | 444 | JS_ASSERT(udef == nullptr); |
michael@0 | 445 | udef = MConstant::New(graph_.alloc(), UndefinedValue()); |
michael@0 | 446 | block->insertAfter(*ins, udef); |
michael@0 | 447 | } else if (udef) { |
michael@0 | 448 | if (MResumePoint *resumePoint = ins->resumePoint()) |
michael@0 | 449 | replaceOperandsOnResumePoint(resumePoint, udef); |
michael@0 | 450 | } |
michael@0 | 451 | } |
michael@0 | 452 | } |
michael@0 | 453 | return true; |
michael@0 | 454 | } |
michael@0 | 455 | |
michael@0 | 456 | void |
michael@0 | 457 | ParallelSafetyAnalysis::replaceOperandsOnResumePoint(MResumePoint *resumePoint, |
michael@0 | 458 | MDefinition *withDef) |
michael@0 | 459 | { |
michael@0 | 460 | for (size_t i = 0, e = resumePoint->numOperands(); i < e; i++) |
michael@0 | 461 | resumePoint->replaceOperand(i, withDef); |
michael@0 | 462 | } |
michael@0 | 463 | |
michael@0 | 464 | bool |
michael@0 | 465 | ParallelSafetyVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins) |
michael@0 | 466 | { |
michael@0 | 467 | JS_ASSERT(unsafe()); // `block` must have contained unsafe items |
michael@0 | 468 | JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here |
michael@0 | 469 | |
michael@0 | 470 | // Clear the unsafe flag for subsequent blocks. |
michael@0 | 471 | clearUnsafe(); |
michael@0 | 472 | |
michael@0 | 473 | // This block is no longer reachable. |
michael@0 | 474 | block->unmark(); |
michael@0 | 475 | |
michael@0 | 476 | // Create a bailout block for each predecessor. In principle, we |
michael@0 | 477 | // only need one bailout block--in fact, only one per graph! But I |
michael@0 | 478 | // found this approach easier to implement given the design of the |
michael@0 | 479 | // MIR Graph construction routines. Besides, most often `block` |
michael@0 | 480 | // has only one predecessor. Also, using multiple blocks helps to |
michael@0 | 481 | // keep the PC information more accurate (though replacing `block` |
michael@0 | 482 | // with exactly one bailout would be just as good). |
michael@0 | 483 | for (size_t i = 0; i < block->numPredecessors(); i++) { |
michael@0 | 484 | MBasicBlock *pred = block->getPredecessor(i); |
michael@0 | 485 | |
michael@0 | 486 | // We only care about incoming edges from reachable predecessors. |
michael@0 | 487 | if (!pred->isMarked()) |
michael@0 | 488 | continue; |
michael@0 | 489 | |
michael@0 | 490 | // create bailout block to insert on this edge |
michael@0 | 491 | MBasicBlock *bailBlock = MBasicBlock::NewAbortPar(graph_, block->info(), pred, |
michael@0 | 492 | block->pc(), |
michael@0 | 493 | block->entryResumePoint()); |
michael@0 | 494 | if (!bailBlock) |
michael@0 | 495 | return false; |
michael@0 | 496 | |
michael@0 | 497 | // if `block` had phis, we are replacing it with `bailBlock` which does not |
michael@0 | 498 | if (pred->successorWithPhis() == block) |
michael@0 | 499 | pred->setSuccessorWithPhis(nullptr, 0); |
michael@0 | 500 | |
michael@0 | 501 | // redirect the predecessor to the bailout block |
michael@0 | 502 | uint32_t succIdx = pred->getSuccessorIndex(block); |
michael@0 | 503 | pred->replaceSuccessor(succIdx, bailBlock); |
michael@0 | 504 | |
michael@0 | 505 | // Insert the bailout block after `block` in the execution |
michael@0 | 506 | // order. This should satisfy the RPO requirements and |
michael@0 | 507 | // moreover ensures that we will visit this block in our outer |
michael@0 | 508 | // walk, thus allowing us to keep the count of marked blocks |
michael@0 | 509 | // accurate. |
michael@0 | 510 | graph_.insertBlockAfter(block, bailBlock); |
michael@0 | 511 | bailBlock->mark(); |
michael@0 | 512 | } |
michael@0 | 513 | |
michael@0 | 514 | return true; |
michael@0 | 515 | } |
michael@0 | 516 | |
michael@0 | 517 | ///////////////////////////////////////////////////////////////////////////// |
michael@0 | 518 | // Memory allocation |
michael@0 | 519 | // |
michael@0 | 520 | // Simple memory allocation opcodes---those which ultimately compile |
michael@0 | 521 | // down to a (possibly inlined) invocation of NewGCThing()---are |
michael@0 | 522 | // replaced with MNewPar, which is supplied with the thread context. |
michael@0 | 523 | // These allocations will take place using per-helper-thread arenas. |
michael@0 | 524 | |
michael@0 | 525 | bool |
michael@0 | 526 | ParallelSafetyVisitor::visitCreateThisWithTemplate(MCreateThisWithTemplate *ins) |
michael@0 | 527 | { |
michael@0 | 528 | return replaceWithNewPar(ins, ins->templateObject()); |
michael@0 | 529 | } |
michael@0 | 530 | |
michael@0 | 531 | bool |
michael@0 | 532 | ParallelSafetyVisitor::visitNewCallObject(MNewCallObject *ins) |
michael@0 | 533 | { |
michael@0 | 534 | replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins)); |
michael@0 | 535 | return true; |
michael@0 | 536 | } |
michael@0 | 537 | |
michael@0 | 538 | bool |
michael@0 | 539 | ParallelSafetyVisitor::visitNewRunOnceCallObject(MNewRunOnceCallObject *ins) |
michael@0 | 540 | { |
michael@0 | 541 | replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins)); |
michael@0 | 542 | return true; |
michael@0 | 543 | } |
michael@0 | 544 | |
michael@0 | 545 | bool |
michael@0 | 546 | ParallelSafetyVisitor::visitLambda(MLambda *ins) |
michael@0 | 547 | { |
michael@0 | 548 | if (ins->info().singletonType || ins->info().useNewTypeForClone) { |
michael@0 | 549 | // slow path: bail on parallel execution. |
michael@0 | 550 | return markUnsafe(); |
michael@0 | 551 | } |
michael@0 | 552 | |
michael@0 | 553 | // fast path: replace with LambdaPar op |
michael@0 | 554 | replace(ins, MLambdaPar::New(alloc(), ForkJoinContext(), ins)); |
michael@0 | 555 | return true; |
michael@0 | 556 | } |
michael@0 | 557 | |
michael@0 | 558 | bool |
michael@0 | 559 | ParallelSafetyVisitor::visitNewObject(MNewObject *newInstruction) |
michael@0 | 560 | { |
michael@0 | 561 | if (newInstruction->shouldUseVM()) { |
michael@0 | 562 | SpewMIR(newInstruction, "should use VM"); |
michael@0 | 563 | return markUnsafe(); |
michael@0 | 564 | } |
michael@0 | 565 | |
michael@0 | 566 | return replaceWithNewPar(newInstruction, newInstruction->templateObject()); |
michael@0 | 567 | } |
michael@0 | 568 | |
michael@0 | 569 | bool |
michael@0 | 570 | ParallelSafetyVisitor::visitNewArray(MNewArray *newInstruction) |
michael@0 | 571 | { |
michael@0 | 572 | if (newInstruction->shouldUseVM()) { |
michael@0 | 573 | SpewMIR(newInstruction, "should use VM"); |
michael@0 | 574 | return markUnsafe(); |
michael@0 | 575 | } |
michael@0 | 576 | |
michael@0 | 577 | return replaceWithNewPar(newInstruction, newInstruction->templateObject()); |
michael@0 | 578 | } |
michael@0 | 579 | |
michael@0 | 580 | bool |
michael@0 | 581 | ParallelSafetyVisitor::visitNewDerivedTypedObject(MNewDerivedTypedObject *ins) |
michael@0 | 582 | { |
michael@0 | 583 | // FIXME(Bug 984090) -- There should really be a parallel-safe |
michael@0 | 584 | // version of NewDerivedTypedObject. However, until that is |
michael@0 | 585 | // implemented, let's just ignore those with 0 uses, since they |
michael@0 | 586 | // will be stripped out by DCE later. |
michael@0 | 587 | if (ins->useCount() == 0) |
michael@0 | 588 | return true; |
michael@0 | 589 | |
michael@0 | 590 | SpewMIR(ins, "visitNewDerivedTypedObject"); |
michael@0 | 591 | return markUnsafe(); |
michael@0 | 592 | } |
michael@0 | 593 | |
michael@0 | 594 | bool |
michael@0 | 595 | ParallelSafetyVisitor::visitRest(MRest *ins) |
michael@0 | 596 | { |
michael@0 | 597 | return replace(ins, MRestPar::New(alloc(), ForkJoinContext(), ins)); |
michael@0 | 598 | } |
michael@0 | 599 | |
michael@0 | 600 | bool |
michael@0 | 601 | ParallelSafetyVisitor::visitMathFunction(MMathFunction *ins) |
michael@0 | 602 | { |
michael@0 | 603 | return replace(ins, MMathFunction::New(alloc(), ins->input(), ins->function(), nullptr)); |
michael@0 | 604 | } |
michael@0 | 605 | |
michael@0 | 606 | bool |
michael@0 | 607 | ParallelSafetyVisitor::visitConcat(MConcat *ins) |
michael@0 | 608 | { |
michael@0 | 609 | return replace(ins, MConcatPar::New(alloc(), ForkJoinContext(), ins)); |
michael@0 | 610 | } |
michael@0 | 611 | |
michael@0 | 612 | bool |
michael@0 | 613 | ParallelSafetyVisitor::visitToString(MToString *ins) |
michael@0 | 614 | { |
michael@0 | 615 | MIRType inputType = ins->input()->type(); |
michael@0 | 616 | if (inputType != MIRType_Int32 && inputType != MIRType_Double) |
michael@0 | 617 | return markUnsafe(); |
michael@0 | 618 | return true; |
michael@0 | 619 | } |
michael@0 | 620 | |
michael@0 | 621 | bool |
michael@0 | 622 | ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction, |
michael@0 | 623 | JSObject *templateObject) |
michael@0 | 624 | { |
michael@0 | 625 | replace(newInstruction, MNewPar::New(alloc(), ForkJoinContext(), templateObject)); |
michael@0 | 626 | return true; |
michael@0 | 627 | } |
michael@0 | 628 | |
michael@0 | 629 | bool |
michael@0 | 630 | ParallelSafetyVisitor::replace(MInstruction *oldInstruction, |
michael@0 | 631 | MInstruction *replacementInstruction) |
michael@0 | 632 | { |
michael@0 | 633 | MBasicBlock *block = oldInstruction->block(); |
michael@0 | 634 | block->insertBefore(oldInstruction, replacementInstruction); |
michael@0 | 635 | oldInstruction->replaceAllUsesWith(replacementInstruction); |
michael@0 | 636 | block->discard(oldInstruction); |
michael@0 | 637 | return true; |
michael@0 | 638 | } |
michael@0 | 639 | |
michael@0 | 640 | ///////////////////////////////////////////////////////////////////////////// |
michael@0 | 641 | // Write Guards |
michael@0 | 642 | // |
michael@0 | 643 | // We only want to permit writes to locally guarded objects. |
michael@0 | 644 | // Furthermore, we want to avoid PICs and other non-thread-safe things |
michael@0 | 645 | // (though perhaps we should support PICs at some point). If we |
michael@0 | 646 | // cannot determine the origin of an object, we can insert a write |
michael@0 | 647 | // guard which will check whether the object was allocated from the |
michael@0 | 648 | // per-thread-arena or not. |
michael@0 | 649 | |
michael@0 | 650 | bool |
michael@0 | 651 | ParallelSafetyVisitor::insertWriteGuard(MInstruction *writeInstruction, |
michael@0 | 652 | MDefinition *valueBeingWritten) |
michael@0 | 653 | { |
michael@0 | 654 | // Many of the write operations do not take the JS object |
michael@0 | 655 | // but rather something derived from it, such as the elements. |
michael@0 | 656 | // So we need to identify the JS object: |
michael@0 | 657 | MDefinition *object; |
michael@0 | 658 | switch (valueBeingWritten->type()) { |
michael@0 | 659 | case MIRType_Object: |
michael@0 | 660 | object = valueBeingWritten; |
michael@0 | 661 | break; |
michael@0 | 662 | |
michael@0 | 663 | case MIRType_Slots: |
michael@0 | 664 | switch (valueBeingWritten->op()) { |
michael@0 | 665 | case MDefinition::Op_Slots: |
michael@0 | 666 | object = valueBeingWritten->toSlots()->object(); |
michael@0 | 667 | break; |
michael@0 | 668 | |
michael@0 | 669 | case MDefinition::Op_NewSlots: |
michael@0 | 670 | // Values produced by new slots will ALWAYS be |
michael@0 | 671 | // thread-local. |
michael@0 | 672 | return true; |
michael@0 | 673 | |
michael@0 | 674 | default: |
michael@0 | 675 | SpewMIR(writeInstruction, "cannot insert write guard for %s", |
michael@0 | 676 | valueBeingWritten->opName()); |
michael@0 | 677 | return markUnsafe(); |
michael@0 | 678 | } |
michael@0 | 679 | break; |
michael@0 | 680 | |
michael@0 | 681 | case MIRType_Elements: |
michael@0 | 682 | switch (valueBeingWritten->op()) { |
michael@0 | 683 | case MDefinition::Op_Elements: |
michael@0 | 684 | object = valueBeingWritten->toElements()->object(); |
michael@0 | 685 | break; |
michael@0 | 686 | |
michael@0 | 687 | case MDefinition::Op_TypedArrayElements: |
michael@0 | 688 | object = valueBeingWritten->toTypedArrayElements()->object(); |
michael@0 | 689 | break; |
michael@0 | 690 | |
michael@0 | 691 | case MDefinition::Op_TypedObjectElements: |
michael@0 | 692 | object = valueBeingWritten->toTypedObjectElements()->object(); |
michael@0 | 693 | break; |
michael@0 | 694 | |
michael@0 | 695 | default: |
michael@0 | 696 | SpewMIR(writeInstruction, "cannot insert write guard for %s", |
michael@0 | 697 | valueBeingWritten->opName()); |
michael@0 | 698 | return markUnsafe(); |
michael@0 | 699 | } |
michael@0 | 700 | break; |
michael@0 | 701 | |
michael@0 | 702 | default: |
michael@0 | 703 | SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d", |
michael@0 | 704 | valueBeingWritten->type()); |
michael@0 | 705 | return markUnsafe(); |
michael@0 | 706 | } |
michael@0 | 707 | |
michael@0 | 708 | if (object->isUnbox()) |
michael@0 | 709 | object = object->toUnbox()->input(); |
michael@0 | 710 | |
michael@0 | 711 | switch (object->op()) { |
michael@0 | 712 | case MDefinition::Op_NewPar: |
michael@0 | 713 | // MNewPar will always be creating something thread-local, omit the guard |
michael@0 | 714 | SpewMIR(writeInstruction, "write to NewPar prop does not require guard"); |
michael@0 | 715 | return true; |
michael@0 | 716 | default: |
michael@0 | 717 | break; |
michael@0 | 718 | } |
michael@0 | 719 | |
michael@0 | 720 | MBasicBlock *block = writeInstruction->block(); |
michael@0 | 721 | MGuardThreadExclusive *writeGuard = |
michael@0 | 722 | MGuardThreadExclusive::New(alloc(), ForkJoinContext(), object); |
michael@0 | 723 | block->insertBefore(writeInstruction, writeGuard); |
michael@0 | 724 | writeGuard->adjustInputs(alloc(), writeGuard); |
michael@0 | 725 | return true; |
michael@0 | 726 | } |
michael@0 | 727 | |
michael@0 | 728 | ///////////////////////////////////////////////////////////////////////////// |
michael@0 | 729 | // Calls |
michael@0 | 730 | // |
michael@0 | 731 | // We only support calls to interpreted functions that that have already been |
michael@0 | 732 | // Ion compiled. If a function has no IonScript, we bail out. |
michael@0 | 733 | |
michael@0 | 734 | bool |
michael@0 | 735 | ParallelSafetyVisitor::visitCall(MCall *ins) |
michael@0 | 736 | { |
michael@0 | 737 | // DOM? Scary. |
michael@0 | 738 | if (ins->isCallDOMNative()) { |
michael@0 | 739 | SpewMIR(ins, "call to dom function"); |
michael@0 | 740 | return markUnsafe(); |
michael@0 | 741 | } |
michael@0 | 742 | |
michael@0 | 743 | JSFunction *target = ins->getSingleTarget(); |
michael@0 | 744 | if (target) { |
michael@0 | 745 | // Non-parallel native? Scary |
michael@0 | 746 | if (target->isNative() && !target->hasParallelNative()) { |
michael@0 | 747 | SpewMIR(ins, "call to non-parallel native function"); |
michael@0 | 748 | return markUnsafe(); |
michael@0 | 749 | } |
michael@0 | 750 | return true; |
michael@0 | 751 | } |
michael@0 | 752 | |
michael@0 | 753 | if (ins->isConstructing()) { |
michael@0 | 754 | SpewMIR(ins, "call to unknown constructor"); |
michael@0 | 755 | return markUnsafe(); |
michael@0 | 756 | } |
michael@0 | 757 | |
michael@0 | 758 | return true; |
michael@0 | 759 | } |
michael@0 | 760 | |
michael@0 | 761 | ///////////////////////////////////////////////////////////////////////////// |
michael@0 | 762 | // Stack limit, interrupts |
michael@0 | 763 | // |
michael@0 | 764 | // In sequential Ion code, the stack limit is stored in the JSRuntime. |
michael@0 | 765 | // We store it in the thread context. We therefore need a separate |
michael@0 | 766 | // instruction to access it, one parameterized by the thread context. |
michael@0 | 767 | // Similar considerations apply to checking for interrupts. |
michael@0 | 768 | |
michael@0 | 769 | bool |
michael@0 | 770 | ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins) |
michael@0 | 771 | { |
michael@0 | 772 | return replace(ins, MCheckOverRecursedPar::New(alloc(), ForkJoinContext())); |
michael@0 | 773 | } |
michael@0 | 774 | |
michael@0 | 775 | bool |
michael@0 | 776 | ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins) |
michael@0 | 777 | { |
michael@0 | 778 | return replace(ins, MInterruptCheckPar::New(alloc(), ForkJoinContext())); |
michael@0 | 779 | } |
michael@0 | 780 | |
michael@0 | 781 | ///////////////////////////////////////////////////////////////////////////// |
michael@0 | 782 | // Specialized ops |
michael@0 | 783 | // |
michael@0 | 784 | // Some ops, like +, can be specialized to ints/doubles. Anything |
michael@0 | 785 | // else is terrifying. |
michael@0 | 786 | // |
michael@0 | 787 | // TODO---Eventually, we should probably permit arbitrary + but bail |
michael@0 | 788 | // if the operands are not both integers/floats. |
michael@0 | 789 | |
michael@0 | 790 | bool |
michael@0 | 791 | ParallelSafetyVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec, |
michael@0 | 792 | uint32_t flags) |
michael@0 | 793 | { |
michael@0 | 794 | uint32_t flag = 1 << spec; |
michael@0 | 795 | if (flags & flag) |
michael@0 | 796 | return true; |
michael@0 | 797 | |
michael@0 | 798 | SpewMIR(ins, "specialized to unacceptable type %d", spec); |
michael@0 | 799 | return markUnsafe(); |
michael@0 | 800 | } |
michael@0 | 801 | |
michael@0 | 802 | ///////////////////////////////////////////////////////////////////////////// |
michael@0 | 803 | // Throw |
michael@0 | 804 | |
michael@0 | 805 | bool |
michael@0 | 806 | ParallelSafetyVisitor::visitThrow(MThrow *thr) |
michael@0 | 807 | { |
michael@0 | 808 | MBasicBlock *block = thr->block(); |
michael@0 | 809 | JS_ASSERT(block->lastIns() == thr); |
michael@0 | 810 | block->discardLastIns(); |
michael@0 | 811 | MAbortPar *bailout = MAbortPar::New(alloc()); |
michael@0 | 812 | if (!bailout) |
michael@0 | 813 | return false; |
michael@0 | 814 | block->end(bailout); |
michael@0 | 815 | return true; |
michael@0 | 816 | } |
michael@0 | 817 | |
michael@0 | 818 | /////////////////////////////////////////////////////////////////////////// |
michael@0 | 819 | // Callee extraction |
michael@0 | 820 | // |
michael@0 | 821 | // See comments in header file. |
michael@0 | 822 | |
michael@0 | 823 | static bool |
michael@0 | 824 | GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc, |
michael@0 | 825 | types::TemporaryTypeSet *calleeTypes, CallTargetVector &targets); |
michael@0 | 826 | |
michael@0 | 827 | static bool |
michael@0 | 828 | AddCallTarget(HandleScript script, CallTargetVector &targets); |
michael@0 | 829 | |
michael@0 | 830 | bool |
michael@0 | 831 | jit::AddPossibleCallees(JSContext *cx, MIRGraph &graph, CallTargetVector &targets) |
michael@0 | 832 | { |
michael@0 | 833 | for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) { |
michael@0 | 834 | for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) |
michael@0 | 835 | { |
michael@0 | 836 | if (!ins->isCall()) |
michael@0 | 837 | continue; |
michael@0 | 838 | |
michael@0 | 839 | MCall *callIns = ins->toCall(); |
michael@0 | 840 | |
michael@0 | 841 | RootedFunction target(cx, callIns->getSingleTarget()); |
michael@0 | 842 | if (target) { |
michael@0 | 843 | JS_ASSERT_IF(!target->isInterpreted(), target->hasParallelNative()); |
michael@0 | 844 | |
michael@0 | 845 | if (target->isInterpreted()) { |
michael@0 | 846 | RootedScript script(cx, target->getOrCreateScript(cx)); |
michael@0 | 847 | if (!script || !AddCallTarget(script, targets)) |
michael@0 | 848 | return false; |
michael@0 | 849 | } |
michael@0 | 850 | |
michael@0 | 851 | continue; |
michael@0 | 852 | } |
michael@0 | 853 | |
michael@0 | 854 | types::TemporaryTypeSet *calleeTypes = callIns->getFunction()->resultTypeSet(); |
michael@0 | 855 | RootedScript script(cx, callIns->block()->info().script()); |
michael@0 | 856 | if (!GetPossibleCallees(cx, |
michael@0 | 857 | script, |
michael@0 | 858 | callIns->resumePoint()->pc(), |
michael@0 | 859 | calleeTypes, |
michael@0 | 860 | targets)) |
michael@0 | 861 | return false; |
michael@0 | 862 | } |
michael@0 | 863 | } |
michael@0 | 864 | |
michael@0 | 865 | return true; |
michael@0 | 866 | } |
michael@0 | 867 | |
michael@0 | 868 | static bool |
michael@0 | 869 | GetPossibleCallees(JSContext *cx, |
michael@0 | 870 | HandleScript script, |
michael@0 | 871 | jsbytecode *pc, |
michael@0 | 872 | types::TemporaryTypeSet *calleeTypes, |
michael@0 | 873 | CallTargetVector &targets) |
michael@0 | 874 | { |
michael@0 | 875 | if (!calleeTypes || calleeTypes->baseFlags() != 0) |
michael@0 | 876 | return true; |
michael@0 | 877 | |
michael@0 | 878 | unsigned objCount = calleeTypes->getObjectCount(); |
michael@0 | 879 | |
michael@0 | 880 | if (objCount == 0) |
michael@0 | 881 | return true; |
michael@0 | 882 | |
michael@0 | 883 | RootedFunction rootedFun(cx); |
michael@0 | 884 | RootedScript rootedScript(cx); |
michael@0 | 885 | for (unsigned i = 0; i < objCount; i++) { |
michael@0 | 886 | JSObject *obj = calleeTypes->getSingleObject(i); |
michael@0 | 887 | if (obj && obj->is<JSFunction>()) { |
michael@0 | 888 | rootedFun = &obj->as<JSFunction>(); |
michael@0 | 889 | } else { |
michael@0 | 890 | types::TypeObject *typeObj = calleeTypes->getTypeObject(i); |
michael@0 | 891 | if (!typeObj) |
michael@0 | 892 | continue; |
michael@0 | 893 | rootedFun = typeObj->interpretedFunction; |
michael@0 | 894 | if (!rootedFun) |
michael@0 | 895 | continue; |
michael@0 | 896 | } |
michael@0 | 897 | |
michael@0 | 898 | if (!rootedFun->isInterpreted()) |
michael@0 | 899 | continue; |
michael@0 | 900 | |
michael@0 | 901 | rootedScript = rootedFun->getOrCreateScript(cx); |
michael@0 | 902 | if (!rootedScript) |
michael@0 | 903 | return false; |
michael@0 | 904 | |
michael@0 | 905 | if (rootedScript->shouldCloneAtCallsite()) { |
michael@0 | 906 | rootedFun = CloneFunctionAtCallsite(cx, rootedFun, script, pc); |
michael@0 | 907 | if (!rootedFun) |
michael@0 | 908 | return false; |
michael@0 | 909 | rootedScript = rootedFun->nonLazyScript(); |
michael@0 | 910 | } |
michael@0 | 911 | |
michael@0 | 912 | // check if this call target is already known |
michael@0 | 913 | if (!AddCallTarget(rootedScript, targets)) |
michael@0 | 914 | return false; |
michael@0 | 915 | } |
michael@0 | 916 | |
michael@0 | 917 | return true; |
michael@0 | 918 | } |
michael@0 | 919 | |
michael@0 | 920 | static bool |
michael@0 | 921 | AddCallTarget(HandleScript script, CallTargetVector &targets) |
michael@0 | 922 | { |
michael@0 | 923 | for (size_t i = 0; i < targets.length(); i++) { |
michael@0 | 924 | if (targets[i] == script) |
michael@0 | 925 | return true; |
michael@0 | 926 | } |
michael@0 | 927 | |
michael@0 | 928 | if (!targets.append(script)) |
michael@0 | 929 | return false; |
michael@0 | 930 | |
michael@0 | 931 | return true; |
michael@0 | 932 | } |