|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #if defined(XP_WIN) |
|
8 # include <io.h> // for isatty() |
|
9 #else |
|
10 # include <unistd.h> // for isatty() |
|
11 #endif |
|
12 |
|
13 #include "vm/ForkJoin.h" |
|
14 |
|
15 #include "mozilla/ThreadLocal.h" |
|
16 |
|
17 #include "jscntxt.h" |
|
18 #include "jslock.h" |
|
19 #include "jsprf.h" |
|
20 |
|
21 #include "builtin/TypedObject.h" |
|
22 |
|
23 #ifdef JS_THREADSAFE |
|
24 # include "jit/BaselineJIT.h" |
|
25 # include "vm/Monitor.h" |
|
26 #endif |
|
27 |
|
28 #if defined(JS_THREADSAFE) && defined(JS_ION) |
|
29 # include "jit/JitCommon.h" |
|
30 # ifdef DEBUG |
|
31 # include "jit/Ion.h" |
|
32 # include "jit/JitCompartment.h" |
|
33 # include "jit/MIR.h" |
|
34 # include "jit/MIRGraph.h" |
|
35 # endif |
|
36 #endif // THREADSAFE && ION |
|
37 |
|
38 #include "vm/Interpreter-inl.h" |
|
39 |
|
40 using namespace js; |
|
41 using namespace js::parallel; |
|
42 using namespace js::jit; |
|
43 |
|
44 using mozilla::ThreadLocal; |
|
45 |
|
46 /////////////////////////////////////////////////////////////////////////// |
|
47 // Degenerate configurations |
|
48 // |
|
49 // When JS_THREADSAFE or JS_ION is not defined, we simply run the |
|
50 // |func| callback sequentially. We also forego the feedback |
|
51 // altogether. |
|
52 |
|
53 static bool |
|
54 ExecuteSequentially(JSContext *cx_, HandleValue funVal, uint16_t *sliceStart, |
|
55 uint16_t sliceEnd); |
|
56 |
|
57 #if !defined(JS_THREADSAFE) || !defined(JS_ION) |
|
58 bool |
|
59 js::ForkJoin(JSContext *cx, CallArgs &args) |
|
60 { |
|
61 RootedValue argZero(cx, args[0]); |
|
62 uint16_t sliceStart = uint16_t(args[1].toInt32()); |
|
63 uint16_t sliceEnd = uint16_t(args[2].toInt32()); |
|
64 if (!ExecuteSequentially(cx, argZero, &sliceStart, sliceEnd)) |
|
65 return false; |
|
66 MOZ_ASSERT(sliceStart == sliceEnd); |
|
67 return true; |
|
68 } |
|
69 |
|
70 JSContext * |
|
71 ForkJoinContext::acquireJSContext() |
|
72 { |
|
73 return nullptr; |
|
74 } |
|
75 |
|
76 void |
|
77 ForkJoinContext::releaseJSContext() |
|
78 { |
|
79 } |
|
80 |
|
81 bool |
|
82 ForkJoinContext::isMainThread() const |
|
83 { |
|
84 return true; |
|
85 } |
|
86 |
|
87 JSRuntime * |
|
88 ForkJoinContext::runtime() |
|
89 { |
|
90 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
91 } |
|
92 |
|
93 bool |
|
94 ForkJoinContext::check() |
|
95 { |
|
96 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
97 } |
|
98 |
|
99 void |
|
100 ForkJoinContext::requestGC(JS::gcreason::Reason reason) |
|
101 { |
|
102 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
103 } |
|
104 |
|
105 void |
|
106 ForkJoinContext::requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason) |
|
107 { |
|
108 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
109 } |
|
110 |
|
111 bool |
|
112 ForkJoinContext::setPendingAbortFatal(ParallelBailoutCause cause) |
|
113 { |
|
114 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
115 return false; |
|
116 } |
|
117 |
|
118 void |
|
119 ParallelBailoutRecord::setCause(ParallelBailoutCause cause, |
|
120 JSScript *outermostScript, |
|
121 JSScript *currentScript, |
|
122 jsbytecode *currentPc) |
|
123 { |
|
124 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
125 } |
|
126 |
|
127 void |
|
128 js::ParallelBailoutRecord::updateCause(ParallelBailoutCause cause, |
|
129 JSScript *outermostScript, |
|
130 JSScript *currentScript, |
|
131 jsbytecode *currentPc) |
|
132 { |
|
133 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
134 } |
|
135 |
|
136 void |
|
137 ParallelBailoutRecord::addTrace(JSScript *script, |
|
138 jsbytecode *pc) |
|
139 { |
|
140 MOZ_ASSUME_UNREACHABLE("Not THREADSAFE build"); |
|
141 } |
|
142 |
|
143 bool |
|
144 js::InExclusiveParallelSection() |
|
145 { |
|
146 return false; |
|
147 } |
|
148 |
|
149 bool |
|
150 js::ParallelTestsShouldPass(JSContext *cx) |
|
151 { |
|
152 return false; |
|
153 } |
|
154 |
|
155 bool |
|
156 js::intrinsic_SetForkJoinTargetRegion(JSContext *cx, unsigned argc, Value *vp) |
|
157 { |
|
158 return true; |
|
159 } |
|
160 |
|
161 static bool |
|
162 intrinsic_SetForkJoinTargetRegionPar(ForkJoinContext *cx, unsigned argc, Value *vp) |
|
163 { |
|
164 return true; |
|
165 } |
|
166 |
|
167 JS_JITINFO_NATIVE_PARALLEL(js::intrinsic_SetForkJoinTargetRegionInfo, |
|
168 intrinsic_SetForkJoinTargetRegionPar); |
|
169 |
|
170 bool |
|
171 js::intrinsic_ClearThreadLocalArenas(JSContext *cx, unsigned argc, Value *vp) |
|
172 { |
|
173 return true; |
|
174 } |
|
175 |
|
176 static bool |
|
177 intrinsic_ClearThreadLocalArenasPar(ForkJoinContext *cx, unsigned argc, Value *vp) |
|
178 { |
|
179 return true; |
|
180 } |
|
181 |
|
182 JS_JITINFO_NATIVE_PARALLEL(js::intrinsic_ClearThreadLocalArenasInfo, |
|
183 intrinsic_ClearThreadLocalArenasPar); |
|
184 |
|
185 #endif // !JS_THREADSAFE || !JS_ION |
|
186 |
|
187 /////////////////////////////////////////////////////////////////////////// |
|
188 // All configurations |
|
189 // |
|
190 // Some code that is shared between degenerate and parallel configurations. |
|
191 |
|
192 static bool |
|
193 ExecuteSequentially(JSContext *cx, HandleValue funVal, uint16_t *sliceStart, |
|
194 uint16_t sliceEnd) |
|
195 { |
|
196 FastInvokeGuard fig(cx, funVal); |
|
197 InvokeArgs &args = fig.args(); |
|
198 if (!args.init(3)) |
|
199 return false; |
|
200 args.setCallee(funVal); |
|
201 args.setThis(UndefinedValue()); |
|
202 args[0].setInt32(0); |
|
203 args[1].setInt32(*sliceStart); |
|
204 args[2].setInt32(sliceEnd); |
|
205 if (!fig.invoke(cx)) |
|
206 return false; |
|
207 *sliceStart = (uint16_t)(args.rval().toInt32()); |
|
208 return true; |
|
209 } |
|
210 |
|
211 ThreadLocal<ForkJoinContext*> ForkJoinContext::tlsForkJoinContext; |
|
212 |
|
213 /* static */ bool |
|
214 ForkJoinContext::initialize() |
|
215 { |
|
216 if (!tlsForkJoinContext.initialized()) { |
|
217 if (!tlsForkJoinContext.init()) |
|
218 return false; |
|
219 } |
|
220 return true; |
|
221 } |
|
222 |
|
223 /////////////////////////////////////////////////////////////////////////// |
|
224 // Parallel configurations |
|
225 // |
|
226 // The remainder of this file is specific to cases where both |
|
227 // JS_THREADSAFE and JS_ION are enabled. |
|
228 |
|
229 #if defined(JS_THREADSAFE) && defined(JS_ION) |
|
230 |
|
231 /////////////////////////////////////////////////////////////////////////// |
|
232 // Class Declarations and Function Prototypes |
|
233 |
|
234 namespace js { |
|
235 |
|
236 // When writing tests, it is often useful to specify different modes |
|
237 // of operation. |
|
238 enum ForkJoinMode { |
|
239 // WARNING: If you change this enum, you MUST update |
|
240 // ForkJoinMode() in Utilities.js |
|
241 |
|
242 // The "normal" behavior: attempt parallel, fallback to |
|
243 // sequential. If compilation is ongoing in a helper thread, then |
|
244 // run sequential warmup iterations in the meantime. If those |
|
245 // iterations wind up completing all the work, just abort. |
|
246 ForkJoinModeNormal, |
|
247 |
|
248 // Like normal, except that we will keep running warmup iterations |
|
249 // until compilations are complete, even if there is no more work |
|
250 // to do. This is useful in tests as a "setup" run. |
|
251 ForkJoinModeCompile, |
|
252 |
|
253 // Requires that compilation has already completed. Expects parallel |
|
254 // execution to proceed without a hitch. (Reports an error otherwise) |
|
255 ForkJoinModeParallel, |
|
256 |
|
257 // Requires that compilation has already completed. Expects |
|
258 // parallel execution to bailout once but continue after that without |
|
259 // further bailouts. (Reports an error otherwise) |
|
260 ForkJoinModeRecover, |
|
261 |
|
262 // Expects all parallel executions to yield a bailout. If this is not |
|
263 // the case, reports an error. |
|
264 ForkJoinModeBailout, |
|
265 |
|
266 NumForkJoinModes |
|
267 }; |
|
268 |
|
269 class ForkJoinOperation |
|
270 { |
|
271 public: |
|
272 // For tests, make sure to keep this in sync with minItemsTestingThreshold. |
|
273 static const uint32_t MAX_BAILOUTS = 3; |
|
274 uint32_t bailouts; |
|
275 |
|
276 // Information about the bailout: |
|
277 ParallelBailoutCause bailoutCause; |
|
278 RootedScript bailoutScript; |
|
279 jsbytecode *bailoutBytecode; |
|
280 |
|
281 ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart, |
|
282 uint16_t sliceEnd, ForkJoinMode mode); |
|
283 ExecutionStatus apply(); |
|
284 |
|
285 private: |
|
286 // Most of the functions involved in managing the parallel |
|
287 // compilation follow a similar control-flow. They return RedLight |
|
288 // if they have either encountered a fatal error or completed the |
|
289 // execution, such that no further work is needed. In that event, |
|
290 // they take an `ExecutionStatus*` which they use to report |
|
291 // whether execution was successful or not. If the function |
|
292 // returns `GreenLight`, then the parallel operation is not yet |
|
293 // fully completed, so the state machine should carry on. |
|
294 enum TrafficLight { |
|
295 RedLight, |
|
296 GreenLight |
|
297 }; |
|
298 |
|
299 struct WorklistData { |
|
300 // True if we enqueued the callees from the ion-compiled |
|
301 // version of this entry |
|
302 bool calleesEnqueued; |
|
303 |
|
304 // Last record useCount; updated after warmup |
|
305 // iterations; |
|
306 uint32_t useCount; |
|
307 |
|
308 // Number of continuous "stalls" --- meaning warmups |
|
309 // where useCount did not increase. |
|
310 uint32_t stallCount; |
|
311 |
|
312 void reset() { |
|
313 calleesEnqueued = false; |
|
314 useCount = 0; |
|
315 stallCount = 0; |
|
316 } |
|
317 }; |
|
318 |
|
319 JSContext *cx_; |
|
320 HandleFunction fun_; |
|
321 uint16_t sliceStart_; |
|
322 uint16_t sliceEnd_; |
|
323 Vector<ParallelBailoutRecord, 16> bailoutRecords_; |
|
324 AutoScriptVector worklist_; |
|
325 Vector<WorklistData, 16> worklistData_; |
|
326 ForkJoinMode mode_; |
|
327 |
|
328 TrafficLight enqueueInitialScript(ExecutionStatus *status); |
|
329 TrafficLight compileForParallelExecution(ExecutionStatus *status); |
|
330 TrafficLight warmupExecution(bool stopIfComplete, ExecutionStatus *status); |
|
331 TrafficLight parallelExecution(ExecutionStatus *status); |
|
332 TrafficLight sequentialExecution(bool disqualified, ExecutionStatus *status); |
|
333 TrafficLight recoverFromBailout(ExecutionStatus *status); |
|
334 TrafficLight fatalError(ExecutionStatus *status); |
|
335 bool isInitialScript(HandleScript script); |
|
336 void determineBailoutCause(); |
|
337 bool invalidateBailedOutScripts(); |
|
338 ExecutionStatus sequentialExecution(bool disqualified); |
|
339 |
|
340 TrafficLight appendCallTargetsToWorklist(uint32_t index, ExecutionStatus *status); |
|
341 TrafficLight appendCallTargetToWorklist(HandleScript script, ExecutionStatus *status); |
|
342 bool addToWorklist(HandleScript script); |
|
343 inline bool hasScript(Vector<types::RecompileInfo> &scripts, JSScript *script); |
|
344 }; // class ForkJoinOperation |
|
345 |
|
346 class ForkJoinShared : public ParallelJob, public Monitor |
|
347 { |
|
348 ///////////////////////////////////////////////////////////////////////// |
|
349 // Constant fields |
|
350 |
|
351 JSContext *const cx_; // Current context |
|
352 ThreadPool *const threadPool_; // The thread pool |
|
353 HandleFunction fun_; // The JavaScript function to execute |
|
354 uint16_t sliceStart_; // The starting slice id. |
|
355 uint16_t sliceEnd_; // The ending slice id + 1. |
|
356 PRLock *cxLock_; // Locks cx_ for parallel VM calls |
|
357 ParallelBailoutRecord *const records_; // Bailout records for each worker |
|
358 |
|
359 ///////////////////////////////////////////////////////////////////////// |
|
360 // Per-thread arenas |
|
361 // |
|
362 // Each worker thread gets an arena to use when allocating. |
|
363 |
|
364 Vector<Allocator *, 16> allocators_; |
|
365 |
|
366 ///////////////////////////////////////////////////////////////////////// |
|
367 // Locked Fields |
|
368 // |
|
369 // Only to be accessed while holding the lock. |
|
370 |
|
371 bool gcRequested_; // True if a worker requested a GC |
|
372 JS::gcreason::Reason gcReason_; // Reason given to request GC |
|
373 Zone *gcZone_; // Zone for GC, or nullptr for full |
|
374 |
|
375 ///////////////////////////////////////////////////////////////////////// |
|
376 // Asynchronous Flags |
|
377 // |
|
378 // These can be accessed without the lock and are thus atomic. |
|
379 |
|
380 // Set to true when parallel execution should abort. |
|
381 mozilla::Atomic<bool, mozilla::ReleaseAcquire> abort_; |
|
382 |
|
383 // Set to true when a worker bails for a fatal reason. |
|
384 mozilla::Atomic<bool, mozilla::ReleaseAcquire> fatal_; |
|
385 |
|
386 public: |
|
387 ForkJoinShared(JSContext *cx, |
|
388 ThreadPool *threadPool, |
|
389 HandleFunction fun, |
|
390 uint16_t sliceStart, |
|
391 uint16_t sliceEnd, |
|
392 ParallelBailoutRecord *records); |
|
393 ~ForkJoinShared(); |
|
394 |
|
395 bool init(); |
|
396 |
|
397 ParallelResult execute(); |
|
398 |
|
399 // Invoked from parallel worker threads: |
|
400 virtual bool executeFromWorker(ThreadPoolWorker *worker, uintptr_t stackLimit) MOZ_OVERRIDE; |
|
401 |
|
402 // Invoked only from the main thread: |
|
403 virtual bool executeFromMainThread(ThreadPoolWorker *worker) MOZ_OVERRIDE; |
|
404 |
|
405 // Executes the user-supplied function a worker or the main thread. |
|
406 void executePortion(PerThreadData *perThread, ThreadPoolWorker *worker); |
|
407 |
|
408 // Moves all the per-thread arenas into the main compartment and processes |
|
409 // any pending requests for a GC. This can only safely be invoked on the |
|
410 // main thread after the workers have completed. |
|
411 void transferArenasToCompartmentAndProcessGCRequests(); |
|
412 |
|
413 |
|
414 // Requests a GC, either full or specific to a zone. |
|
415 void requestGC(JS::gcreason::Reason reason); |
|
416 void requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason); |
|
417 |
|
418 // Requests that computation abort. |
|
419 void setAbortFlagDueToInterrupt(ForkJoinContext &cx); |
|
420 void setAbortFlagAndRequestInterrupt(bool fatal); |
|
421 |
|
422 // Set the fatal flag for the next abort. |
|
423 void setPendingAbortFatal() { fatal_ = true; } |
|
424 |
|
425 JSRuntime *runtime() { return cx_->runtime(); } |
|
426 JS::Zone *zone() { return cx_->zone(); } |
|
427 JSCompartment *compartment() { return cx_->compartment(); } |
|
428 |
|
429 JSContext *acquireJSContext() { PR_Lock(cxLock_); return cx_; } |
|
430 void releaseJSContext() { PR_Unlock(cxLock_); } |
|
431 }; |
|
432 |
|
433 class AutoEnterWarmup |
|
434 { |
|
435 JSRuntime *runtime_; |
|
436 |
|
437 public: |
|
438 AutoEnterWarmup(JSRuntime *runtime) : runtime_(runtime) { runtime_->forkJoinWarmup++; } |
|
439 ~AutoEnterWarmup() { runtime_->forkJoinWarmup--; } |
|
440 }; |
|
441 |
|
442 class AutoSetForkJoinContext |
|
443 { |
|
444 public: |
|
445 AutoSetForkJoinContext(ForkJoinContext *threadCx) { |
|
446 ForkJoinContext::tlsForkJoinContext.set(threadCx); |
|
447 } |
|
448 |
|
449 ~AutoSetForkJoinContext() { |
|
450 ForkJoinContext::tlsForkJoinContext.set(nullptr); |
|
451 } |
|
452 }; |
|
453 |
|
454 } // namespace js |
|
455 |
|
456 /////////////////////////////////////////////////////////////////////////// |
|
457 // ForkJoinActivation |
|
458 // |
|
459 // Takes care of tidying up GC before we enter a fork join section. Also |
|
460 // pauses the barrier verifier, as we cannot enter fork join with the runtime |
|
461 // or the zone needing barriers. |
|
462 |
|
463 ForkJoinActivation::ForkJoinActivation(JSContext *cx) |
|
464 : Activation(cx, ForkJoin), |
|
465 prevIonTop_(cx->mainThread().ionTop), |
|
466 av_(cx->runtime(), false) |
|
467 { |
|
468 // Note: we do not allow GC during parallel sections. |
|
469 // Moreover, we do not wish to worry about making |
|
470 // write barriers thread-safe. Therefore, we guarantee |
|
471 // that there is no incremental GC in progress and force |
|
472 // a minor GC to ensure no cross-generation pointers get |
|
473 // created: |
|
474 |
|
475 if (JS::IsIncrementalGCInProgress(cx->runtime())) { |
|
476 JS::PrepareForIncrementalGC(cx->runtime()); |
|
477 JS::FinishIncrementalGC(cx->runtime(), JS::gcreason::API); |
|
478 } |
|
479 |
|
480 MinorGC(cx->runtime(), JS::gcreason::API); |
|
481 |
|
482 cx->runtime()->gcHelperThread.waitBackgroundSweepEnd(); |
|
483 |
|
484 JS_ASSERT(!cx->runtime()->needsBarrier()); |
|
485 JS_ASSERT(!cx->zone()->needsBarrier()); |
|
486 } |
|
487 |
|
488 ForkJoinActivation::~ForkJoinActivation() |
|
489 { |
|
490 cx_->mainThread().ionTop = prevIonTop_; |
|
491 } |
|
492 |
|
493 /////////////////////////////////////////////////////////////////////////// |
|
494 // js::ForkJoin() and ForkJoinOperation class |
|
495 // |
|
496 // These are the top-level objects that manage the parallel execution. |
|
497 // They handle parallel compilation (if necessary), triggering |
|
498 // parallel execution, and recovering from bailouts. |
|
499 |
|
500 static const char *ForkJoinModeString(ForkJoinMode mode); |
|
501 |
|
502 bool |
|
503 js::ForkJoin(JSContext *cx, CallArgs &args) |
|
504 { |
|
505 JS_ASSERT(args.length() == 4); // else the self-hosted code is wrong |
|
506 JS_ASSERT(args[0].isObject()); |
|
507 JS_ASSERT(args[0].toObject().is<JSFunction>()); |
|
508 JS_ASSERT(args[1].isInt32()); |
|
509 JS_ASSERT(args[2].isInt32()); |
|
510 JS_ASSERT(args[3].isInt32()); |
|
511 JS_ASSERT(args[3].toInt32() < NumForkJoinModes); |
|
512 |
|
513 RootedFunction fun(cx, &args[0].toObject().as<JSFunction>()); |
|
514 uint16_t sliceStart = (uint16_t)(args[1].toInt32()); |
|
515 uint16_t sliceEnd = (uint16_t)(args[2].toInt32()); |
|
516 ForkJoinMode mode = (ForkJoinMode)(args[3].toInt32()); |
|
517 |
|
518 MOZ_ASSERT(sliceStart == args[1].toInt32()); |
|
519 MOZ_ASSERT(sliceEnd == args[2].toInt32()); |
|
520 MOZ_ASSERT(sliceStart <= sliceEnd); |
|
521 |
|
522 ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode); |
|
523 ExecutionStatus status = op.apply(); |
|
524 if (status == ExecutionFatal) |
|
525 return false; |
|
526 |
|
527 switch (mode) { |
|
528 case ForkJoinModeNormal: |
|
529 case ForkJoinModeCompile: |
|
530 return true; |
|
531 |
|
532 case ForkJoinModeParallel: |
|
533 if (status == ExecutionParallel && op.bailouts == 0) |
|
534 return true; |
|
535 break; |
|
536 |
|
537 case ForkJoinModeRecover: |
|
538 if (status != ExecutionSequential && op.bailouts > 0) |
|
539 return true; |
|
540 break; |
|
541 |
|
542 case ForkJoinModeBailout: |
|
543 if (status != ExecutionParallel) |
|
544 return true; |
|
545 break; |
|
546 |
|
547 case NumForkJoinModes: |
|
548 break; |
|
549 } |
|
550 |
|
551 const char *statusString = "?"; |
|
552 switch (status) { |
|
553 case ExecutionSequential: statusString = "seq"; break; |
|
554 case ExecutionParallel: statusString = "par"; break; |
|
555 case ExecutionWarmup: statusString = "warmup"; break; |
|
556 case ExecutionFatal: statusString = "fatal"; break; |
|
557 } |
|
558 |
|
559 if (ParallelTestsShouldPass(cx)) { |
|
560 JS_ReportError(cx, "ForkJoin: mode=%s status=%s bailouts=%d", |
|
561 ForkJoinModeString(mode), statusString, op.bailouts); |
|
562 return false; |
|
563 } |
|
564 return true; |
|
565 } |
|
566 |
|
567 static const char * |
|
568 ForkJoinModeString(ForkJoinMode mode) { |
|
569 switch (mode) { |
|
570 case ForkJoinModeNormal: return "normal"; |
|
571 case ForkJoinModeCompile: return "compile"; |
|
572 case ForkJoinModeParallel: return "parallel"; |
|
573 case ForkJoinModeRecover: return "recover"; |
|
574 case ForkJoinModeBailout: return "bailout"; |
|
575 case NumForkJoinModes: return "max"; |
|
576 } |
|
577 return "???"; |
|
578 } |
|
579 |
|
580 ForkJoinOperation::ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart, |
|
581 uint16_t sliceEnd, ForkJoinMode mode) |
|
582 : bailouts(0), |
|
583 bailoutCause(ParallelBailoutNone), |
|
584 bailoutScript(cx), |
|
585 bailoutBytecode(nullptr), |
|
586 cx_(cx), |
|
587 fun_(fun), |
|
588 sliceStart_(sliceStart), |
|
589 sliceEnd_(sliceEnd), |
|
590 bailoutRecords_(cx), |
|
591 worklist_(cx), |
|
592 worklistData_(cx), |
|
593 mode_(mode) |
|
594 { } |
|
595 |
|
596 ExecutionStatus |
|
597 ForkJoinOperation::apply() |
|
598 { |
|
599 ExecutionStatus status; |
|
600 |
|
601 // High level outline of the procedure: |
|
602 // |
|
603 // - As we enter, we check for parallel script without "uncompiled" flag. |
|
604 // - If present, skip initial enqueue. |
|
605 // - While not too many bailouts: |
|
606 // - While all scripts in worklist are not compiled: |
|
607 // - For each script S in worklist: |
|
608 // - Compile S if not compiled |
|
609 // -> Error: fallback |
|
610 // - If compiled, add call targets to worklist w/o checking uncompiled |
|
611 // flag |
|
612 // - If some compilations pending, run warmup iteration |
|
613 // - Otherwise, clear "uncompiled targets" flag on main script and |
|
614 // break from loop |
|
615 // - Attempt parallel execution |
|
616 // - If successful: return happily |
|
617 // - If error: abort sadly |
|
618 // - If bailout: |
|
619 // - Invalidate any scripts that may need to be invalidated |
|
620 // - Re-enqueue main script and any uncompiled scripts that were called |
|
621 // - Too many bailouts: Fallback to sequential |
|
622 |
|
623 JS_ASSERT_IF(!jit::IsBaselineEnabled(cx_), !jit::IsIonEnabled(cx_)); |
|
624 if (!jit::IsBaselineEnabled(cx_) || !jit::IsIonEnabled(cx_)) |
|
625 return sequentialExecution(true); |
|
626 |
|
627 SpewBeginOp(cx_, "ForkJoinOperation"); |
|
628 |
|
629 // How many workers do we have, counting the main thread. |
|
630 unsigned numWorkers = cx_->runtime()->threadPool.numWorkers(); |
|
631 |
|
632 if (!bailoutRecords_.resize(numWorkers)) |
|
633 return SpewEndOp(ExecutionFatal); |
|
634 |
|
635 for (uint32_t i = 0; i < numWorkers; i++) |
|
636 bailoutRecords_[i].init(cx_); |
|
637 |
|
638 if (enqueueInitialScript(&status) == RedLight) |
|
639 return SpewEndOp(status); |
|
640 |
|
641 Spew(SpewOps, "Execution mode: %s", ForkJoinModeString(mode_)); |
|
642 switch (mode_) { |
|
643 case ForkJoinModeNormal: |
|
644 case ForkJoinModeCompile: |
|
645 case ForkJoinModeBailout: |
|
646 break; |
|
647 |
|
648 case ForkJoinModeParallel: |
|
649 case ForkJoinModeRecover: |
|
650 // These two modes are used to check that every iteration can |
|
651 // be executed in parallel. They expect compilation to have |
|
652 // been done. But, when using gc zeal, it's possible that |
|
653 // compiled scripts were collected. |
|
654 if (ParallelTestsShouldPass(cx_) && worklist_.length() != 0) { |
|
655 JS_ReportError(cx_, "ForkJoin: compilation required in par or bailout mode"); |
|
656 return SpewEndOp(ExecutionFatal); |
|
657 } |
|
658 break; |
|
659 |
|
660 case NumForkJoinModes: |
|
661 MOZ_ASSUME_UNREACHABLE("Invalid mode"); |
|
662 } |
|
663 |
|
664 while (bailouts < MAX_BAILOUTS) { |
|
665 for (uint32_t i = 0; i < numWorkers; i++) |
|
666 bailoutRecords_[i].reset(cx_); |
|
667 |
|
668 if (compileForParallelExecution(&status) == RedLight) |
|
669 return SpewEndOp(status); |
|
670 |
|
671 JS_ASSERT(worklist_.length() == 0); |
|
672 if (parallelExecution(&status) == RedLight) |
|
673 return SpewEndOp(status); |
|
674 |
|
675 if (recoverFromBailout(&status) == RedLight) |
|
676 return SpewEndOp(status); |
|
677 } |
|
678 |
|
679 // After enough tries, just execute sequentially. |
|
680 return SpewEndOp(sequentialExecution(true)); |
|
681 } |
|
682 |
|
683 ForkJoinOperation::TrafficLight |
|
684 ForkJoinOperation::enqueueInitialScript(ExecutionStatus *status) |
|
685 { |
|
686 // GreenLight: script successfully enqueued if necessary |
|
687 // RedLight: fatal error or fell back to sequential |
|
688 |
|
689 // The kernel should be a self-hosted function. |
|
690 if (!fun_->is<JSFunction>()) |
|
691 return sequentialExecution(true, status); |
|
692 |
|
693 RootedFunction callee(cx_, &fun_->as<JSFunction>()); |
|
694 |
|
695 if (!callee->isInterpreted() || !callee->isSelfHostedBuiltin()) |
|
696 return sequentialExecution(true, status); |
|
697 |
|
698 // If the main script is already compiled, and we have no reason |
|
699 // to suspect any of its callees are not compiled, then we can |
|
700 // just skip the compilation step. |
|
701 RootedScript script(cx_, callee->getOrCreateScript(cx_)); |
|
702 if (!script) |
|
703 return RedLight; |
|
704 |
|
705 if (script->hasParallelIonScript()) { |
|
706 // Notify that there's been activity on the entry script. |
|
707 JitCompartment *jitComp = cx_->compartment()->jitCompartment(); |
|
708 if (!jitComp->notifyOfActiveParallelEntryScript(cx_, script)) { |
|
709 *status = ExecutionFatal; |
|
710 return RedLight; |
|
711 } |
|
712 |
|
713 if (!script->parallelIonScript()->hasUncompiledCallTarget()) { |
|
714 Spew(SpewOps, "Script %p:%s:%d already compiled, no uncompiled callees", |
|
715 script.get(), script->filename(), script->lineno()); |
|
716 return GreenLight; |
|
717 } |
|
718 |
|
719 Spew(SpewOps, "Script %p:%s:%d already compiled, may have uncompiled callees", |
|
720 script.get(), script->filename(), script->lineno()); |
|
721 } |
|
722 |
|
723 // Otherwise, add to the worklist of scripts to process. |
|
724 if (addToWorklist(script) == RedLight) |
|
725 return fatalError(status); |
|
726 return GreenLight; |
|
727 } |
|
728 |
|
729 ForkJoinOperation::TrafficLight |
|
730 ForkJoinOperation::compileForParallelExecution(ExecutionStatus *status) |
|
731 { |
|
732 // GreenLight: all scripts compiled |
|
733 // RedLight: fatal error or completed work via warmups or fallback |
|
734 |
|
735 // This routine attempts to do whatever compilation is necessary |
|
736 // to execute a single parallel attempt. When it returns, either |
|
737 // (1) we have fallen back to sequential; (2) we have run enough |
|
738 // warmup runs to complete all the work; or (3) we have compiled |
|
739 // all scripts we think likely to be executed during a parallel |
|
740 // execution. |
|
741 |
|
742 RootedFunction fun(cx_); |
|
743 RootedScript script(cx_); |
|
744 |
|
745 // After 3 stalls, we stop waiting for a script to gather type |
|
746 // info and move on with execution. |
|
747 const uint32_t stallThreshold = 3; |
|
748 |
|
749 // This loop continues to iterate until the full contents of |
|
750 // `worklist` have been successfully compiled for parallel |
|
751 // execution. The compilations themselves typically occur on |
|
752 // helper threads. While we wait for the compilations to complete, |
|
753 // or for sufficient type information to be gathered, we execute |
|
754 // warmup iterations. |
|
755 while (true) { |
|
756 bool offMainThreadCompilationsInProgress = false; |
|
757 bool gatheringTypeInformation = false; |
|
758 |
|
759 // Walk over the worklist to check on the status of each entry. |
|
760 for (uint32_t i = 0; i < worklist_.length(); i++) { |
|
761 script = worklist_[i]; |
|
762 script->ensureNonLazyCanonicalFunction(cx_); |
|
763 fun = script->functionNonDelazifying(); |
|
764 |
|
765 // No baseline script means no type information, hence we |
|
766 // will not be able to compile very well. In such cases, |
|
767 // we continue to run baseline iterations until either (1) |
|
768 // the potential callee *has* a baseline script or (2) the |
|
769 // potential callee's use count stops increasing, |
|
770 // indicating that they are not in fact a callee. |
|
771 if (!script->hasBaselineScript()) { |
|
772 uint32_t previousUseCount = worklistData_[i].useCount; |
|
773 uint32_t currentUseCount = script->getUseCount(); |
|
774 if (previousUseCount < currentUseCount) { |
|
775 worklistData_[i].useCount = currentUseCount; |
|
776 worklistData_[i].stallCount = 0; |
|
777 gatheringTypeInformation = true; |
|
778 |
|
779 Spew(SpewCompile, |
|
780 "Script %p:%s:%d has no baseline script, " |
|
781 "but use count grew from %d to %d", |
|
782 script.get(), script->filename(), script->lineno(), |
|
783 previousUseCount, currentUseCount); |
|
784 } else { |
|
785 uint32_t stallCount = ++worklistData_[i].stallCount; |
|
786 if (stallCount < stallThreshold) { |
|
787 gatheringTypeInformation = true; |
|
788 } |
|
789 |
|
790 Spew(SpewCompile, |
|
791 "Script %p:%s:%d has no baseline script, " |
|
792 "and use count has %u stalls at %d", |
|
793 script.get(), script->filename(), script->lineno(), |
|
794 stallCount, previousUseCount); |
|
795 } |
|
796 continue; |
|
797 } |
|
798 |
|
799 if (!script->hasParallelIonScript()) { |
|
800 // Script has not yet been compiled. Attempt to compile it. |
|
801 SpewBeginCompile(script); |
|
802 MethodStatus mstatus = jit::CanEnterInParallel(cx_, script); |
|
803 SpewEndCompile(mstatus); |
|
804 |
|
805 switch (mstatus) { |
|
806 case Method_Error: |
|
807 return fatalError(status); |
|
808 |
|
809 case Method_CantCompile: |
|
810 Spew(SpewCompile, |
|
811 "Script %p:%s:%d cannot be compiled, " |
|
812 "falling back to sequential execution", |
|
813 script.get(), script->filename(), script->lineno()); |
|
814 return sequentialExecution(true, status); |
|
815 |
|
816 case Method_Skipped: |
|
817 // A "skipped" result either means that we are compiling |
|
818 // in parallel OR some other transient error occurred. |
|
819 if (script->isParallelIonCompilingOffThread()) { |
|
820 Spew(SpewCompile, |
|
821 "Script %p:%s:%d compiling off-thread", |
|
822 script.get(), script->filename(), script->lineno()); |
|
823 offMainThreadCompilationsInProgress = true; |
|
824 continue; |
|
825 } |
|
826 return sequentialExecution(false, status); |
|
827 |
|
828 case Method_Compiled: |
|
829 Spew(SpewCompile, |
|
830 "Script %p:%s:%d compiled", |
|
831 script.get(), script->filename(), script->lineno()); |
|
832 JS_ASSERT(script->hasParallelIonScript()); |
|
833 |
|
834 if (isInitialScript(script)) { |
|
835 JitCompartment *jitComp = cx_->compartment()->jitCompartment(); |
|
836 if (!jitComp->notifyOfActiveParallelEntryScript(cx_, script)) { |
|
837 *status = ExecutionFatal; |
|
838 return RedLight; |
|
839 } |
|
840 } |
|
841 |
|
842 break; |
|
843 } |
|
844 } |
|
845 |
|
846 // At this point, either the script was already compiled |
|
847 // or we just compiled it. Check whether its "uncompiled |
|
848 // call target" flag is set and add the targets to our |
|
849 // worklist if so. Clear the flag after that, since we |
|
850 // will be compiling the call targets. |
|
851 JS_ASSERT(script->hasParallelIonScript()); |
|
852 if (appendCallTargetsToWorklist(i, status) == RedLight) |
|
853 return RedLight; |
|
854 } |
|
855 |
|
856 // If there is compilation occurring in a helper thread, then |
|
857 // run a warmup iterations in the main thread while we wait. |
|
858 // There is a chance that this warmup will finish all the work |
|
859 // we have to do, so we should stop then, unless we are in |
|
860 // compile mode, in which case we'll continue to block. |
|
861 // |
|
862 // Note that even in compile mode, we can't block *forever*: |
|
863 // - OMTC compiles will finish; |
|
864 // - no work is being done, so use counts on not-yet-baselined |
|
865 // scripts will not increase. |
|
866 if (offMainThreadCompilationsInProgress || gatheringTypeInformation) { |
|
867 bool stopIfComplete = (mode_ != ForkJoinModeCompile); |
|
868 if (warmupExecution(stopIfComplete, status) == RedLight) |
|
869 return RedLight; |
|
870 continue; |
|
871 } |
|
872 |
|
873 // All compilations are complete. However, be careful: it is |
|
874 // possible that a garbage collection occurred while we were |
|
875 // iterating and caused some of the scripts we thought we had |
|
876 // compiled to be collected. In that case, we will just have |
|
877 // to begin again. |
|
878 bool allScriptsPresent = true; |
|
879 for (uint32_t i = 0; i < worklist_.length(); i++) { |
|
880 if (!worklist_[i]->hasParallelIonScript()) { |
|
881 if (worklistData_[i].stallCount < stallThreshold) { |
|
882 worklistData_[i].reset(); |
|
883 allScriptsPresent = false; |
|
884 |
|
885 Spew(SpewCompile, |
|
886 "Script %p:%s:%d is not stalled, " |
|
887 "but no parallel ion script found, " |
|
888 "restarting loop", |
|
889 script.get(), script->filename(), script->lineno()); |
|
890 } |
|
891 } |
|
892 } |
|
893 |
|
894 if (allScriptsPresent) |
|
895 break; |
|
896 } |
|
897 |
|
898 Spew(SpewCompile, "Compilation complete (final worklist length %d)", |
|
899 worklist_.length()); |
|
900 |
|
901 // At this point, all scripts and their transitive callees are |
|
902 // either stalled (indicating they are unlikely to be called) or |
|
903 // in a compiled state. Therefore we can clear the |
|
904 // "hasUncompiledCallTarget" flag on them and then clear the |
|
905 // worklist. |
|
906 for (uint32_t i = 0; i < worklist_.length(); i++) { |
|
907 if (worklist_[i]->hasParallelIonScript()) { |
|
908 JS_ASSERT(worklistData_[i].calleesEnqueued); |
|
909 worklist_[i]->parallelIonScript()->clearHasUncompiledCallTarget(); |
|
910 } else { |
|
911 JS_ASSERT(worklistData_[i].stallCount >= stallThreshold); |
|
912 } |
|
913 } |
|
914 worklist_.clear(); |
|
915 worklistData_.clear(); |
|
916 return GreenLight; |
|
917 } |
|
918 |
|
919 ForkJoinOperation::TrafficLight |
|
920 ForkJoinOperation::appendCallTargetsToWorklist(uint32_t index, ExecutionStatus *status) |
|
921 { |
|
922 // GreenLight: call targets appended |
|
923 // RedLight: fatal error or completed work via warmups or fallback |
|
924 |
|
925 JS_ASSERT(worklist_[index]->hasParallelIonScript()); |
|
926 |
|
927 // Check whether we have already enqueued the targets for |
|
928 // this entry and avoid doing it again if so. |
|
929 if (worklistData_[index].calleesEnqueued) |
|
930 return GreenLight; |
|
931 worklistData_[index].calleesEnqueued = true; |
|
932 |
|
933 // Iterate through the callees and enqueue them. |
|
934 RootedScript target(cx_); |
|
935 IonScript *ion = worklist_[index]->parallelIonScript(); |
|
936 for (uint32_t i = 0; i < ion->callTargetEntries(); i++) { |
|
937 target = ion->callTargetList()[i]; |
|
938 parallel::Spew(parallel::SpewCompile, |
|
939 "Adding call target %s:%u", |
|
940 target->filename(), target->lineno()); |
|
941 if (appendCallTargetToWorklist(target, status) == RedLight) |
|
942 return RedLight; |
|
943 } |
|
944 |
|
945 return GreenLight; |
|
946 } |
|
947 |
|
948 ForkJoinOperation::TrafficLight |
|
949 ForkJoinOperation::appendCallTargetToWorklist(HandleScript script, ExecutionStatus *status) |
|
950 { |
|
951 // GreenLight: call target appended if necessary |
|
952 // RedLight: fatal error or completed work via warmups or fallback |
|
953 |
|
954 JS_ASSERT(script); |
|
955 |
|
956 // Fallback to sequential if disabled. |
|
957 if (!script->canParallelIonCompile()) { |
|
958 Spew(SpewCompile, "Skipping %p:%s:%u, canParallelIonCompile() is false", |
|
959 script.get(), script->filename(), script->lineno()); |
|
960 return sequentialExecution(true, status); |
|
961 } |
|
962 |
|
963 if (script->hasParallelIonScript()) { |
|
964 // Skip if the code is expected to result in a bailout. |
|
965 if (script->parallelIonScript()->bailoutExpected()) { |
|
966 Spew(SpewCompile, "Skipping %p:%s:%u, bailout expected", |
|
967 script.get(), script->filename(), script->lineno()); |
|
968 return sequentialExecution(false, status); |
|
969 } |
|
970 } |
|
971 |
|
972 if (!addToWorklist(script)) |
|
973 return fatalError(status); |
|
974 |
|
975 return GreenLight; |
|
976 } |
|
977 |
|
978 bool |
|
979 ForkJoinOperation::addToWorklist(HandleScript script) |
|
980 { |
|
981 for (uint32_t i = 0; i < worklist_.length(); i++) { |
|
982 if (worklist_[i] == script) { |
|
983 Spew(SpewCompile, "Skipping %p:%s:%u, already in worklist", |
|
984 script.get(), script->filename(), script->lineno()); |
|
985 return true; |
|
986 } |
|
987 } |
|
988 |
|
989 Spew(SpewCompile, "Enqueued %p:%s:%u", |
|
990 script.get(), script->filename(), script->lineno()); |
|
991 |
|
992 // Note that we add all possibly compilable functions to the worklist, |
|
993 // even if they're already compiled. This is so that we can return |
|
994 // Method_Compiled and not Method_Skipped if we have a worklist full of |
|
995 // already-compiled functions. |
|
996 if (!worklist_.append(script)) |
|
997 return false; |
|
998 |
|
999 // we have not yet enqueued the callees of this script |
|
1000 if (!worklistData_.append(WorklistData())) |
|
1001 return false; |
|
1002 worklistData_[worklistData_.length() - 1].reset(); |
|
1003 |
|
1004 return true; |
|
1005 } |
|
1006 |
|
1007 ForkJoinOperation::TrafficLight |
|
1008 ForkJoinOperation::sequentialExecution(bool disqualified, ExecutionStatus *status) |
|
1009 { |
|
1010 // RedLight: fatal error or completed work |
|
1011 |
|
1012 *status = sequentialExecution(disqualified); |
|
1013 return RedLight; |
|
1014 } |
|
1015 |
|
1016 ExecutionStatus |
|
1017 ForkJoinOperation::sequentialExecution(bool disqualified) |
|
1018 { |
|
1019 // XXX use disqualified to set parallelIon to ION_DISABLED_SCRIPT? |
|
1020 |
|
1021 Spew(SpewOps, "Executing sequential execution (disqualified=%d).", |
|
1022 disqualified); |
|
1023 |
|
1024 if (sliceStart_ == sliceEnd_) |
|
1025 return ExecutionSequential; |
|
1026 |
|
1027 RootedValue funVal(cx_, ObjectValue(*fun_)); |
|
1028 if (!ExecuteSequentially(cx_, funVal, &sliceStart_, sliceEnd_)) |
|
1029 return ExecutionFatal; |
|
1030 MOZ_ASSERT(sliceStart_ == sliceEnd_); |
|
1031 return ExecutionSequential; |
|
1032 } |
|
1033 |
|
1034 ForkJoinOperation::TrafficLight |
|
1035 ForkJoinOperation::fatalError(ExecutionStatus *status) |
|
1036 { |
|
1037 // RedLight: fatal error |
|
1038 |
|
1039 *status = ExecutionFatal; |
|
1040 return RedLight; |
|
1041 } |
|
1042 |
|
1043 static const char * |
|
1044 BailoutExplanation(ParallelBailoutCause cause) |
|
1045 { |
|
1046 switch (cause) { |
|
1047 case ParallelBailoutNone: |
|
1048 return "no particular reason"; |
|
1049 case ParallelBailoutCompilationSkipped: |
|
1050 return "compilation failed (method skipped)"; |
|
1051 case ParallelBailoutCompilationFailure: |
|
1052 return "compilation failed"; |
|
1053 case ParallelBailoutInterrupt: |
|
1054 return "interrupted"; |
|
1055 case ParallelBailoutFailedIC: |
|
1056 return "failed to attach stub to IC"; |
|
1057 case ParallelBailoutHeapBusy: |
|
1058 return "heap busy flag set during interrupt"; |
|
1059 case ParallelBailoutMainScriptNotPresent: |
|
1060 return "main script not present"; |
|
1061 case ParallelBailoutCalledToUncompiledScript: |
|
1062 return "called to uncompiled script"; |
|
1063 case ParallelBailoutIllegalWrite: |
|
1064 return "illegal write"; |
|
1065 case ParallelBailoutAccessToIntrinsic: |
|
1066 return "access to intrinsic"; |
|
1067 case ParallelBailoutOverRecursed: |
|
1068 return "over recursed"; |
|
1069 case ParallelBailoutOutOfMemory: |
|
1070 return "out of memory"; |
|
1071 case ParallelBailoutUnsupported: |
|
1072 return "unsupported"; |
|
1073 case ParallelBailoutUnsupportedVM: |
|
1074 return "unsupported operation in VM call"; |
|
1075 case ParallelBailoutUnsupportedStringComparison: |
|
1076 return "unsupported string comparison"; |
|
1077 case ParallelBailoutRequestedGC: |
|
1078 return "requested GC"; |
|
1079 case ParallelBailoutRequestedZoneGC: |
|
1080 return "requested zone GC"; |
|
1081 default: |
|
1082 return "no known reason"; |
|
1083 } |
|
1084 } |
|
1085 |
|
1086 bool |
|
1087 ForkJoinOperation::isInitialScript(HandleScript script) |
|
1088 { |
|
1089 return fun_->is<JSFunction>() && (fun_->as<JSFunction>().nonLazyScript() == script); |
|
1090 } |
|
1091 |
|
1092 void |
|
1093 ForkJoinOperation::determineBailoutCause() |
|
1094 { |
|
1095 bailoutCause = ParallelBailoutNone; |
|
1096 for (uint32_t i = 0; i < bailoutRecords_.length(); i++) { |
|
1097 if (bailoutRecords_[i].cause == ParallelBailoutNone) |
|
1098 continue; |
|
1099 |
|
1100 if (bailoutRecords_[i].cause == ParallelBailoutInterrupt) |
|
1101 continue; |
|
1102 |
|
1103 bailoutCause = bailoutRecords_[i].cause; |
|
1104 const char *causeStr = BailoutExplanation(bailoutCause); |
|
1105 if (bailoutRecords_[i].depth) { |
|
1106 bailoutScript = bailoutRecords_[i].trace[0].script; |
|
1107 bailoutBytecode = bailoutRecords_[i].trace[0].bytecode; |
|
1108 |
|
1109 const char *filename = bailoutScript->filename(); |
|
1110 int line = JS_PCToLineNumber(cx_, bailoutScript, bailoutBytecode); |
|
1111 JS_ReportWarning(cx_, "Bailed out of parallel operation: %s at %s:%d", |
|
1112 causeStr, filename, line); |
|
1113 |
|
1114 Spew(SpewBailouts, "Bailout from thread %d: cause %d at loc %s:%d", |
|
1115 i, |
|
1116 bailoutCause, |
|
1117 bailoutScript->filename(), |
|
1118 PCToLineNumber(bailoutScript, bailoutBytecode)); |
|
1119 } else { |
|
1120 JS_ReportWarning(cx_, "Bailed out of parallel operation: %s", |
|
1121 causeStr); |
|
1122 |
|
1123 Spew(SpewBailouts, "Bailout from thread %d: cause %d, unknown loc", |
|
1124 i, |
|
1125 bailoutCause); |
|
1126 } |
|
1127 } |
|
1128 } |
|
1129 |
|
1130 bool |
|
1131 ForkJoinOperation::invalidateBailedOutScripts() |
|
1132 { |
|
1133 Vector<types::RecompileInfo> invalid(cx_); |
|
1134 for (uint32_t i = 0; i < bailoutRecords_.length(); i++) { |
|
1135 RootedScript script(cx_, bailoutRecords_[i].topScript); |
|
1136 |
|
1137 // No script to invalidate. |
|
1138 if (!script || !script->hasParallelIonScript()) |
|
1139 continue; |
|
1140 |
|
1141 Spew(SpewBailouts, |
|
1142 "Bailout from thread %d: cause %d, topScript %p:%s:%d", |
|
1143 i, |
|
1144 bailoutRecords_[i].cause, |
|
1145 script.get(), script->filename(), script->lineno()); |
|
1146 |
|
1147 switch (bailoutRecords_[i].cause) { |
|
1148 // An interrupt is not the fault of the script, so don't |
|
1149 // invalidate it. |
|
1150 case ParallelBailoutInterrupt: continue; |
|
1151 |
|
1152 // An illegal write will not be made legal by invalidation. |
|
1153 case ParallelBailoutIllegalWrite: continue; |
|
1154 |
|
1155 // For other cases, consider invalidation. |
|
1156 default: break; |
|
1157 } |
|
1158 |
|
1159 // Already invalidated. |
|
1160 if (hasScript(invalid, script)) |
|
1161 continue; |
|
1162 |
|
1163 Spew(SpewBailouts, "Invalidating script %p:%s:%d due to cause %d", |
|
1164 script.get(), script->filename(), script->lineno(), |
|
1165 bailoutRecords_[i].cause); |
|
1166 |
|
1167 types::RecompileInfo co = script->parallelIonScript()->recompileInfo(); |
|
1168 |
|
1169 if (!invalid.append(co)) |
|
1170 return false; |
|
1171 |
|
1172 // any script that we have marked for invalidation will need |
|
1173 // to be recompiled |
|
1174 if (!addToWorklist(script)) |
|
1175 return false; |
|
1176 } |
|
1177 |
|
1178 Invalidate(cx_, invalid); |
|
1179 |
|
1180 return true; |
|
1181 } |
|
1182 |
|
1183 ForkJoinOperation::TrafficLight |
|
1184 ForkJoinOperation::warmupExecution(bool stopIfComplete, ExecutionStatus *status) |
|
1185 { |
|
1186 // GreenLight: warmup succeeded, still more work to do |
|
1187 // RedLight: fatal error or warmup completed all work (check status) |
|
1188 |
|
1189 if (sliceStart_ == sliceEnd_) { |
|
1190 Spew(SpewOps, "Warmup execution finished all the work."); |
|
1191 |
|
1192 if (stopIfComplete) { |
|
1193 *status = ExecutionWarmup; |
|
1194 return RedLight; |
|
1195 } |
|
1196 |
|
1197 // If we finished all slices in warmup, be sure check the |
|
1198 // interrupt flag. This is because we won't be running more JS |
|
1199 // code, and thus no more automatic checking of the interrupt |
|
1200 // flag. |
|
1201 if (!CheckForInterrupt(cx_)) { |
|
1202 *status = ExecutionFatal; |
|
1203 return RedLight; |
|
1204 } |
|
1205 |
|
1206 return GreenLight; |
|
1207 } |
|
1208 |
|
1209 Spew(SpewOps, "Executing warmup from slice %d.", sliceStart_); |
|
1210 |
|
1211 AutoEnterWarmup warmup(cx_->runtime()); |
|
1212 RootedValue funVal(cx_, ObjectValue(*fun_)); |
|
1213 if (!ExecuteSequentially(cx_, funVal, &sliceStart_, sliceStart_ + 1)) { |
|
1214 *status = ExecutionFatal; |
|
1215 return RedLight; |
|
1216 } |
|
1217 |
|
1218 return GreenLight; |
|
1219 } |
|
1220 |
|
1221 ForkJoinOperation::TrafficLight |
|
1222 ForkJoinOperation::parallelExecution(ExecutionStatus *status) |
|
1223 { |
|
1224 // GreenLight: bailout occurred, keep trying |
|
1225 // RedLight: fatal error or all work completed |
|
1226 |
|
1227 // Recursive use of the ThreadPool is not supported. Right now we |
|
1228 // cannot get here because parallel code cannot invoke native |
|
1229 // functions such as ForkJoin(). |
|
1230 JS_ASSERT(ForkJoinContext::current() == nullptr); |
|
1231 |
|
1232 if (sliceStart_ == sliceEnd_) { |
|
1233 Spew(SpewOps, "Warmup execution finished all the work."); |
|
1234 *status = ExecutionWarmup; |
|
1235 return RedLight; |
|
1236 } |
|
1237 |
|
1238 ForkJoinActivation activation(cx_); |
|
1239 ThreadPool *threadPool = &cx_->runtime()->threadPool; |
|
1240 ForkJoinShared shared(cx_, threadPool, fun_, sliceStart_, sliceEnd_, &bailoutRecords_[0]); |
|
1241 if (!shared.init()) { |
|
1242 *status = ExecutionFatal; |
|
1243 return RedLight; |
|
1244 } |
|
1245 |
|
1246 switch (shared.execute()) { |
|
1247 case TP_SUCCESS: |
|
1248 *status = ExecutionParallel; |
|
1249 return RedLight; |
|
1250 |
|
1251 case TP_FATAL: |
|
1252 *status = ExecutionFatal; |
|
1253 return RedLight; |
|
1254 |
|
1255 case TP_RETRY_SEQUENTIALLY: |
|
1256 case TP_RETRY_AFTER_GC: |
|
1257 break; // bailout |
|
1258 } |
|
1259 |
|
1260 return GreenLight; |
|
1261 } |
|
1262 |
|
1263 ForkJoinOperation::TrafficLight |
|
1264 ForkJoinOperation::recoverFromBailout(ExecutionStatus *status) |
|
1265 { |
|
1266 // GreenLight: bailout recovered, try to compile-and-run again |
|
1267 // RedLight: fatal error |
|
1268 |
|
1269 bailouts += 1; |
|
1270 determineBailoutCause(); |
|
1271 |
|
1272 SpewBailout(bailouts, bailoutScript, bailoutBytecode, bailoutCause); |
|
1273 |
|
1274 // After any bailout, we always scan over callee list of main |
|
1275 // function, if nothing else |
|
1276 RootedScript mainScript(cx_, fun_->nonLazyScript()); |
|
1277 if (!addToWorklist(mainScript)) |
|
1278 return fatalError(status); |
|
1279 |
|
1280 // Also invalidate and recompile any callees that were implicated |
|
1281 // by the bailout |
|
1282 if (!invalidateBailedOutScripts()) |
|
1283 return fatalError(status); |
|
1284 |
|
1285 if (warmupExecution(/*stopIfComplete:*/true, status) == RedLight) |
|
1286 return RedLight; |
|
1287 |
|
1288 return GreenLight; |
|
1289 } |
|
1290 |
|
1291 bool |
|
1292 ForkJoinOperation::hasScript(Vector<types::RecompileInfo> &scripts, JSScript *script) |
|
1293 { |
|
1294 for (uint32_t i = 0; i < scripts.length(); i++) { |
|
1295 if (scripts[i] == script->parallelIonScript()->recompileInfo()) |
|
1296 return true; |
|
1297 } |
|
1298 return false; |
|
1299 } |
|
1300 |
|
1301 // Can only enter callees with a valid IonScript. |
|
1302 template <uint32_t maxArgc> |
|
1303 class ParallelIonInvoke |
|
1304 { |
|
1305 EnterJitCode enter_; |
|
1306 void *jitcode_; |
|
1307 void *calleeToken_; |
|
1308 Value argv_[maxArgc + 2]; |
|
1309 uint32_t argc_; |
|
1310 |
|
1311 public: |
|
1312 Value *args; |
|
1313 |
|
1314 ParallelIonInvoke(JSRuntime *rt, |
|
1315 HandleFunction callee, |
|
1316 uint32_t argc) |
|
1317 : argc_(argc), |
|
1318 args(argv_ + 2) |
|
1319 { |
|
1320 JS_ASSERT(argc <= maxArgc + 2); |
|
1321 |
|
1322 // Set 'callee' and 'this'. |
|
1323 argv_[0] = ObjectValue(*callee); |
|
1324 argv_[1] = UndefinedValue(); |
|
1325 |
|
1326 // Find JIT code pointer. |
|
1327 IonScript *ion = callee->nonLazyScript()->parallelIonScript(); |
|
1328 JitCode *code = ion->method(); |
|
1329 jitcode_ = code->raw(); |
|
1330 enter_ = rt->jitRuntime()->enterIon(); |
|
1331 calleeToken_ = CalleeToToken(callee); |
|
1332 } |
|
1333 |
|
1334 bool invoke(PerThreadData *perThread) { |
|
1335 RootedValue result(perThread); |
|
1336 CALL_GENERATED_CODE(enter_, jitcode_, argc_ + 1, argv_ + 1, nullptr, calleeToken_, |
|
1337 nullptr, 0, result.address()); |
|
1338 return !result.isMagic(); |
|
1339 } |
|
1340 }; |
|
1341 |
|
1342 ///////////////////////////////////////////////////////////////////////////// |
|
1343 // ForkJoinShared |
|
1344 // |
|
1345 |
|
1346 ForkJoinShared::ForkJoinShared(JSContext *cx, |
|
1347 ThreadPool *threadPool, |
|
1348 HandleFunction fun, |
|
1349 uint16_t sliceStart, |
|
1350 uint16_t sliceEnd, |
|
1351 ParallelBailoutRecord *records) |
|
1352 : cx_(cx), |
|
1353 threadPool_(threadPool), |
|
1354 fun_(fun), |
|
1355 sliceStart_(sliceStart), |
|
1356 sliceEnd_(sliceEnd), |
|
1357 cxLock_(nullptr), |
|
1358 records_(records), |
|
1359 allocators_(cx), |
|
1360 gcRequested_(false), |
|
1361 gcReason_(JS::gcreason::NUM_REASONS), |
|
1362 gcZone_(nullptr), |
|
1363 abort_(false), |
|
1364 fatal_(false) |
|
1365 { |
|
1366 } |
|
1367 |
|
1368 bool |
|
1369 ForkJoinShared::init() |
|
1370 { |
|
1371 // Create temporary arenas to hold the data allocated during the |
|
1372 // parallel code. |
|
1373 // |
|
1374 // Note: you might think (as I did, initially) that we could use |
|
1375 // compartment |Allocator| for the main thread. This is not true, |
|
1376 // because when executing parallel code we sometimes check what |
|
1377 // arena list an object is in to decide if it is writable. If we |
|
1378 // used the compartment |Allocator| for the main thread, then the |
|
1379 // main thread would be permitted to write to any object it wants. |
|
1380 |
|
1381 if (!Monitor::init()) |
|
1382 return false; |
|
1383 |
|
1384 cxLock_ = PR_NewLock(); |
|
1385 if (!cxLock_) |
|
1386 return false; |
|
1387 |
|
1388 for (unsigned i = 0; i < threadPool_->numWorkers(); i++) { |
|
1389 Allocator *allocator = cx_->new_<Allocator>(cx_->zone()); |
|
1390 if (!allocator) |
|
1391 return false; |
|
1392 |
|
1393 if (!allocators_.append(allocator)) { |
|
1394 js_delete(allocator); |
|
1395 return false; |
|
1396 } |
|
1397 } |
|
1398 |
|
1399 return true; |
|
1400 } |
|
1401 |
|
1402 ForkJoinShared::~ForkJoinShared() |
|
1403 { |
|
1404 PR_DestroyLock(cxLock_); |
|
1405 |
|
1406 while (allocators_.length() > 0) |
|
1407 js_delete(allocators_.popCopy()); |
|
1408 } |
|
1409 |
|
1410 ParallelResult |
|
1411 ForkJoinShared::execute() |
|
1412 { |
|
1413 // Sometimes a GC request occurs *just before* we enter into the |
|
1414 // parallel section. Rather than enter into the parallel section |
|
1415 // and then abort, we just check here and abort early. |
|
1416 if (cx_->runtime()->interruptPar) |
|
1417 return TP_RETRY_SEQUENTIALLY; |
|
1418 |
|
1419 AutoLockMonitor lock(*this); |
|
1420 |
|
1421 ParallelResult jobResult = TP_SUCCESS; |
|
1422 { |
|
1423 AutoUnlockMonitor unlock(*this); |
|
1424 |
|
1425 // Push parallel tasks and wait until they're all done. |
|
1426 jobResult = threadPool_->executeJob(cx_, this, sliceStart_, sliceEnd_); |
|
1427 if (jobResult == TP_FATAL) |
|
1428 return TP_FATAL; |
|
1429 } |
|
1430 |
|
1431 transferArenasToCompartmentAndProcessGCRequests(); |
|
1432 |
|
1433 // Check if any of the workers failed. |
|
1434 if (abort_) { |
|
1435 if (fatal_) |
|
1436 return TP_FATAL; |
|
1437 return TP_RETRY_SEQUENTIALLY; |
|
1438 } |
|
1439 |
|
1440 #ifdef DEBUG |
|
1441 Spew(SpewOps, "Completed parallel job [slices: %d, threads: %d, stolen: %d (work stealing:%s)]", |
|
1442 sliceEnd_ - sliceStart_, |
|
1443 threadPool_->numWorkers(), |
|
1444 threadPool_->stolenSlices(), |
|
1445 threadPool_->workStealing() ? "ON" : "OFF"); |
|
1446 #endif |
|
1447 |
|
1448 // Everything went swimmingly. Give yourself a pat on the back. |
|
1449 return jobResult; |
|
1450 } |
|
1451 |
|
1452 void |
|
1453 ForkJoinShared::transferArenasToCompartmentAndProcessGCRequests() |
|
1454 { |
|
1455 JSCompartment *comp = cx_->compartment(); |
|
1456 for (unsigned i = 0; i < threadPool_->numWorkers(); i++) |
|
1457 comp->adoptWorkerAllocator(allocators_[i]); |
|
1458 |
|
1459 if (gcRequested_) { |
|
1460 if (!gcZone_) |
|
1461 TriggerGC(cx_->runtime(), gcReason_); |
|
1462 else |
|
1463 TriggerZoneGC(gcZone_, gcReason_); |
|
1464 gcRequested_ = false; |
|
1465 gcZone_ = nullptr; |
|
1466 } |
|
1467 } |
|
1468 |
|
1469 bool |
|
1470 ForkJoinShared::executeFromWorker(ThreadPoolWorker *worker, uintptr_t stackLimit) |
|
1471 { |
|
1472 PerThreadData thisThread(cx_->runtime()); |
|
1473 if (!thisThread.init()) { |
|
1474 setAbortFlagAndRequestInterrupt(true); |
|
1475 return false; |
|
1476 } |
|
1477 TlsPerThreadData.set(&thisThread); |
|
1478 |
|
1479 #ifdef JS_ARM_SIMULATOR |
|
1480 stackLimit = Simulator::StackLimit(); |
|
1481 #endif |
|
1482 |
|
1483 // Don't use setIonStackLimit() because that acquires the ionStackLimitLock, and the |
|
1484 // lock has not been initialized in these cases. |
|
1485 thisThread.jitStackLimit = stackLimit; |
|
1486 executePortion(&thisThread, worker); |
|
1487 TlsPerThreadData.set(nullptr); |
|
1488 |
|
1489 return !abort_; |
|
1490 } |
|
1491 |
|
1492 bool |
|
1493 ForkJoinShared::executeFromMainThread(ThreadPoolWorker *worker) |
|
1494 { |
|
1495 executePortion(&cx_->mainThread(), worker); |
|
1496 return !abort_; |
|
1497 } |
|
1498 |
|
1499 void |
|
1500 ForkJoinShared::executePortion(PerThreadData *perThread, ThreadPoolWorker *worker) |
|
1501 { |
|
1502 // WARNING: This code runs ON THE PARALLEL WORKER THREAD. |
|
1503 // Be careful when accessing cx_. |
|
1504 |
|
1505 // ForkJoinContext already contains an AutoAssertNoGC; however, the analysis |
|
1506 // does not propagate this type information. We duplicate the assertion |
|
1507 // here for maximum clarity. |
|
1508 JS::AutoAssertNoGC nogc(runtime()); |
|
1509 |
|
1510 Allocator *allocator = allocators_[worker->id()]; |
|
1511 ForkJoinContext cx(perThread, worker, allocator, this, &records_[worker->id()]); |
|
1512 AutoSetForkJoinContext autoContext(&cx); |
|
1513 |
|
1514 #ifdef DEBUG |
|
1515 // Set the maximum worker and slice number for prettier spewing. |
|
1516 cx.maxWorkerId = threadPool_->numWorkers(); |
|
1517 #endif |
|
1518 |
|
1519 Spew(SpewOps, "Up"); |
|
1520 |
|
1521 // Make a new IonContext for the slice, which is needed if we need to |
|
1522 // re-enter the VM. |
|
1523 IonContext icx(CompileRuntime::get(cx_->runtime()), |
|
1524 CompileCompartment::get(cx_->compartment()), |
|
1525 nullptr); |
|
1526 |
|
1527 JS_ASSERT(cx.bailoutRecord->topScript == nullptr); |
|
1528 |
|
1529 if (!fun_->nonLazyScript()->hasParallelIonScript()) { |
|
1530 // Sometimes, particularly with GCZeal, the parallel ion |
|
1531 // script can be collected between starting the parallel |
|
1532 // op and reaching this point. In that case, we just fail |
|
1533 // and fallback. |
|
1534 Spew(SpewOps, "Down (Script no longer present)"); |
|
1535 cx.bailoutRecord->setCause(ParallelBailoutMainScriptNotPresent); |
|
1536 setAbortFlagAndRequestInterrupt(false); |
|
1537 } else { |
|
1538 ParallelIonInvoke<3> fii(cx_->runtime(), fun_, 3); |
|
1539 |
|
1540 fii.args[0] = Int32Value(worker->id()); |
|
1541 fii.args[1] = Int32Value(sliceStart_); |
|
1542 fii.args[2] = Int32Value(sliceEnd_); |
|
1543 |
|
1544 bool ok = fii.invoke(perThread); |
|
1545 JS_ASSERT(ok == !cx.bailoutRecord->topScript); |
|
1546 if (!ok) |
|
1547 setAbortFlagAndRequestInterrupt(false); |
|
1548 } |
|
1549 |
|
1550 Spew(SpewOps, "Down"); |
|
1551 } |
|
1552 |
|
1553 void |
|
1554 ForkJoinShared::setAbortFlagDueToInterrupt(ForkJoinContext &cx) |
|
1555 { |
|
1556 JS_ASSERT(cx_->runtime()->interruptPar); |
|
1557 // The GC Needed flag should not be set during parallel |
|
1558 // execution. Instead, one of the requestGC() or |
|
1559 // requestZoneGC() methods should be invoked. |
|
1560 JS_ASSERT(!cx_->runtime()->gcIsNeeded); |
|
1561 |
|
1562 if (!abort_) { |
|
1563 cx.bailoutRecord->setCause(ParallelBailoutInterrupt); |
|
1564 setAbortFlagAndRequestInterrupt(false); |
|
1565 } |
|
1566 } |
|
1567 |
|
1568 void |
|
1569 ForkJoinShared::setAbortFlagAndRequestInterrupt(bool fatal) |
|
1570 { |
|
1571 AutoLockMonitor lock(*this); |
|
1572 |
|
1573 abort_ = true; |
|
1574 fatal_ = fatal_ || fatal; |
|
1575 |
|
1576 // Note: The ForkJoin trigger here avoids the expensive memory protection needed to |
|
1577 // interrupt Ion code compiled for sequential execution. |
|
1578 cx_->runtime()->requestInterrupt(JSRuntime::RequestInterruptAnyThreadForkJoin); |
|
1579 } |
|
1580 |
|
1581 void |
|
1582 ForkJoinShared::requestGC(JS::gcreason::Reason reason) |
|
1583 { |
|
1584 AutoLockMonitor lock(*this); |
|
1585 |
|
1586 gcZone_ = nullptr; |
|
1587 gcReason_ = reason; |
|
1588 gcRequested_ = true; |
|
1589 } |
|
1590 |
|
1591 void |
|
1592 ForkJoinShared::requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason) |
|
1593 { |
|
1594 AutoLockMonitor lock(*this); |
|
1595 |
|
1596 if (gcRequested_ && gcZone_ != zone) { |
|
1597 // If a full GC has been requested, or a GC for another zone, |
|
1598 // issue a request for a full GC. |
|
1599 gcZone_ = nullptr; |
|
1600 gcReason_ = reason; |
|
1601 gcRequested_ = true; |
|
1602 } else { |
|
1603 // Otherwise, just GC this zone. |
|
1604 gcZone_ = zone; |
|
1605 gcReason_ = reason; |
|
1606 gcRequested_ = true; |
|
1607 } |
|
1608 } |
|
1609 |
|
1610 ///////////////////////////////////////////////////////////////////////////// |
|
1611 // ForkJoinContext |
|
1612 // |
|
1613 |
|
1614 ForkJoinContext::ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker *worker, |
|
1615 Allocator *allocator, ForkJoinShared *shared, |
|
1616 ParallelBailoutRecord *bailoutRecord) |
|
1617 : ThreadSafeContext(shared->runtime(), perThreadData, Context_ForkJoin), |
|
1618 bailoutRecord(bailoutRecord), |
|
1619 targetRegionStart(nullptr), |
|
1620 targetRegionEnd(nullptr), |
|
1621 shared_(shared), |
|
1622 worker_(worker), |
|
1623 acquiredJSContext_(false), |
|
1624 nogc_(shared->runtime()) |
|
1625 { |
|
1626 /* |
|
1627 * Unsafely set the zone. This is used to track malloc counters and to |
|
1628 * trigger GCs and is otherwise not thread-safe to access. |
|
1629 */ |
|
1630 zone_ = shared->zone(); |
|
1631 |
|
1632 /* |
|
1633 * Unsafely set the compartment. This is used to get read-only access to |
|
1634 * shared tables. |
|
1635 */ |
|
1636 compartment_ = shared->compartment(); |
|
1637 |
|
1638 allocator_ = allocator; |
|
1639 } |
|
1640 |
|
1641 bool |
|
1642 ForkJoinContext::isMainThread() const |
|
1643 { |
|
1644 return perThreadData == &shared_->runtime()->mainThread; |
|
1645 } |
|
1646 |
|
1647 JSRuntime * |
|
1648 ForkJoinContext::runtime() |
|
1649 { |
|
1650 return shared_->runtime(); |
|
1651 } |
|
1652 |
|
1653 JSContext * |
|
1654 ForkJoinContext::acquireJSContext() |
|
1655 { |
|
1656 JSContext *cx = shared_->acquireJSContext(); |
|
1657 JS_ASSERT(!acquiredJSContext_); |
|
1658 acquiredJSContext_ = true; |
|
1659 return cx; |
|
1660 } |
|
1661 |
|
1662 void |
|
1663 ForkJoinContext::releaseJSContext() |
|
1664 { |
|
1665 JS_ASSERT(acquiredJSContext_); |
|
1666 acquiredJSContext_ = false; |
|
1667 return shared_->releaseJSContext(); |
|
1668 } |
|
1669 |
|
1670 bool |
|
1671 ForkJoinContext::hasAcquiredJSContext() const |
|
1672 { |
|
1673 return acquiredJSContext_; |
|
1674 } |
|
1675 |
|
1676 bool |
|
1677 ForkJoinContext::check() |
|
1678 { |
|
1679 if (runtime()->interruptPar) { |
|
1680 shared_->setAbortFlagDueToInterrupt(*this); |
|
1681 return false; |
|
1682 } |
|
1683 return true; |
|
1684 } |
|
1685 |
|
1686 void |
|
1687 ForkJoinContext::requestGC(JS::gcreason::Reason reason) |
|
1688 { |
|
1689 shared_->requestGC(reason); |
|
1690 bailoutRecord->setCause(ParallelBailoutRequestedGC); |
|
1691 shared_->setAbortFlagAndRequestInterrupt(false); |
|
1692 } |
|
1693 |
|
1694 void |
|
1695 ForkJoinContext::requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason) |
|
1696 { |
|
1697 shared_->requestZoneGC(zone, reason); |
|
1698 bailoutRecord->setCause(ParallelBailoutRequestedZoneGC); |
|
1699 shared_->setAbortFlagAndRequestInterrupt(false); |
|
1700 } |
|
1701 |
|
1702 bool |
|
1703 ForkJoinContext::setPendingAbortFatal(ParallelBailoutCause cause) |
|
1704 { |
|
1705 shared_->setPendingAbortFatal(); |
|
1706 bailoutRecord->setCause(cause); |
|
1707 return false; |
|
1708 } |
|
1709 |
|
1710 ////////////////////////////////////////////////////////////////////////////// |
|
1711 // ParallelBailoutRecord |
|
1712 |
|
1713 void |
|
1714 js::ParallelBailoutRecord::init(JSContext *cx) |
|
1715 { |
|
1716 reset(cx); |
|
1717 } |
|
1718 |
|
1719 void |
|
1720 js::ParallelBailoutRecord::reset(JSContext *cx) |
|
1721 { |
|
1722 topScript = nullptr; |
|
1723 cause = ParallelBailoutNone; |
|
1724 depth = 0; |
|
1725 } |
|
1726 |
|
1727 void |
|
1728 js::ParallelBailoutRecord::setCause(ParallelBailoutCause cause, |
|
1729 JSScript *outermostScript, |
|
1730 JSScript *currentScript, |
|
1731 jsbytecode *currentPc) |
|
1732 { |
|
1733 this->cause = cause; |
|
1734 updateCause(cause, outermostScript, currentScript, currentPc); |
|
1735 } |
|
1736 |
|
1737 void |
|
1738 js::ParallelBailoutRecord::updateCause(ParallelBailoutCause cause, |
|
1739 JSScript *outermostScript, |
|
1740 JSScript *currentScript, |
|
1741 jsbytecode *currentPc) |
|
1742 { |
|
1743 JS_ASSERT_IF(outermostScript, currentScript); |
|
1744 JS_ASSERT_IF(outermostScript, outermostScript->hasParallelIonScript()); |
|
1745 JS_ASSERT_IF(currentScript, outermostScript); |
|
1746 JS_ASSERT_IF(!currentScript, !currentPc); |
|
1747 |
|
1748 if (this->cause == ParallelBailoutNone) |
|
1749 this->cause = cause; |
|
1750 |
|
1751 if (outermostScript) |
|
1752 this->topScript = outermostScript; |
|
1753 |
|
1754 if (currentScript) |
|
1755 addTrace(currentScript, currentPc); |
|
1756 } |
|
1757 |
|
1758 void |
|
1759 js::ParallelBailoutRecord::addTrace(JSScript *script, |
|
1760 jsbytecode *pc) |
|
1761 { |
|
1762 // Ideally, this should never occur, because we should always have |
|
1763 // a script when we invoke setCause, but I havent' fully |
|
1764 // refactored things to that point yet: |
|
1765 if (topScript == nullptr && script != nullptr) |
|
1766 topScript = script; |
|
1767 |
|
1768 if (depth < MaxDepth) { |
|
1769 trace[depth].script = script; |
|
1770 trace[depth].bytecode = pc; |
|
1771 depth += 1; |
|
1772 } |
|
1773 } |
|
1774 |
|
1775 ////////////////////////////////////////////////////////////////////////////// |
|
1776 |
|
1777 // |
|
1778 // Debug spew |
|
1779 // |
|
1780 |
|
1781 #ifdef DEBUG |
|
1782 |
|
1783 static const char * |
|
1784 ExecutionStatusToString(ExecutionStatus status) |
|
1785 { |
|
1786 switch (status) { |
|
1787 case ExecutionFatal: |
|
1788 return "fatal"; |
|
1789 case ExecutionSequential: |
|
1790 return "sequential"; |
|
1791 case ExecutionWarmup: |
|
1792 return "warmup"; |
|
1793 case ExecutionParallel: |
|
1794 return "parallel"; |
|
1795 } |
|
1796 return "(unknown status)"; |
|
1797 } |
|
1798 |
|
1799 static const char * |
|
1800 MethodStatusToString(MethodStatus status) |
|
1801 { |
|
1802 switch (status) { |
|
1803 case Method_Error: |
|
1804 return "error"; |
|
1805 case Method_CantCompile: |
|
1806 return "can't compile"; |
|
1807 case Method_Skipped: |
|
1808 return "skipped"; |
|
1809 case Method_Compiled: |
|
1810 return "compiled"; |
|
1811 } |
|
1812 return "(unknown status)"; |
|
1813 } |
|
1814 |
|
1815 static unsigned |
|
1816 NumberOfDigits(unsigned n) |
|
1817 { |
|
1818 if (n == 0) |
|
1819 return 1; |
|
1820 unsigned d = 0; |
|
1821 while (n != 0) { |
|
1822 d++; |
|
1823 n /= 10; |
|
1824 } |
|
1825 return d; |
|
1826 } |
|
1827 |
|
1828 static const size_t BufferSize = 4096; |
|
1829 |
|
1830 class ParallelSpewer |
|
1831 { |
|
1832 uint32_t depth; |
|
1833 bool colorable; |
|
1834 bool active[NumSpewChannels]; |
|
1835 |
|
1836 const char *color(const char *colorCode) { |
|
1837 if (!colorable) |
|
1838 return ""; |
|
1839 return colorCode; |
|
1840 } |
|
1841 |
|
1842 const char *reset() { return color("\x1b[0m"); } |
|
1843 const char *bold() { return color("\x1b[1m"); } |
|
1844 const char *red() { return color("\x1b[31m"); } |
|
1845 const char *green() { return color("\x1b[32m"); } |
|
1846 const char *yellow() { return color("\x1b[33m"); } |
|
1847 const char *cyan() { return color("\x1b[36m"); } |
|
1848 const char *workerColor(uint32_t id) { |
|
1849 static const char *colors[] = { |
|
1850 "\x1b[7m\x1b[31m", "\x1b[7m\x1b[32m", "\x1b[7m\x1b[33m", |
|
1851 "\x1b[7m\x1b[34m", "\x1b[7m\x1b[35m", "\x1b[7m\x1b[36m", |
|
1852 "\x1b[7m\x1b[37m", |
|
1853 "\x1b[31m", "\x1b[32m", "\x1b[33m", |
|
1854 "\x1b[34m", "\x1b[35m", "\x1b[36m", |
|
1855 "\x1b[37m" |
|
1856 }; |
|
1857 return color(colors[id % 14]); |
|
1858 } |
|
1859 |
|
1860 public: |
|
1861 ParallelSpewer() |
|
1862 : depth(0) |
|
1863 { |
|
1864 const char *env; |
|
1865 |
|
1866 mozilla::PodArrayZero(active); |
|
1867 env = getenv("PAFLAGS"); |
|
1868 if (env) { |
|
1869 if (strstr(env, "ops")) |
|
1870 active[SpewOps] = true; |
|
1871 if (strstr(env, "compile")) |
|
1872 active[SpewCompile] = true; |
|
1873 if (strstr(env, "bailouts")) |
|
1874 active[SpewBailouts] = true; |
|
1875 if (strstr(env, "full")) { |
|
1876 for (uint32_t i = 0; i < NumSpewChannels; i++) |
|
1877 active[i] = true; |
|
1878 } |
|
1879 } |
|
1880 |
|
1881 env = getenv("TERM"); |
|
1882 if (env && isatty(fileno(stderr))) { |
|
1883 if (strcmp(env, "xterm-color") == 0 || strcmp(env, "xterm-256color") == 0) |
|
1884 colorable = true; |
|
1885 } |
|
1886 } |
|
1887 |
|
1888 bool isActive(js::parallel::SpewChannel channel) { |
|
1889 return active[channel]; |
|
1890 } |
|
1891 |
|
1892 void spewVA(js::parallel::SpewChannel channel, const char *fmt, va_list ap) { |
|
1893 if (!active[channel]) |
|
1894 return; |
|
1895 |
|
1896 // Print into a buffer first so we use one fprintf, which usually |
|
1897 // doesn't get interrupted when running with multiple threads. |
|
1898 char buf[BufferSize]; |
|
1899 |
|
1900 if (ForkJoinContext *cx = ForkJoinContext::current()) { |
|
1901 // Print the format first into a buffer to right-justify the |
|
1902 // worker ids. |
|
1903 char bufbuf[BufferSize]; |
|
1904 JS_snprintf(bufbuf, BufferSize, "[%%sParallel:%%0%du%%s] ", |
|
1905 NumberOfDigits(cx->maxWorkerId)); |
|
1906 JS_snprintf(buf, BufferSize, bufbuf, workerColor(cx->workerId()), |
|
1907 cx->workerId(), reset()); |
|
1908 } else { |
|
1909 JS_snprintf(buf, BufferSize, "[Parallel:M] "); |
|
1910 } |
|
1911 |
|
1912 for (uint32_t i = 0; i < depth; i++) |
|
1913 JS_snprintf(buf + strlen(buf), BufferSize, " "); |
|
1914 |
|
1915 JS_vsnprintf(buf + strlen(buf), BufferSize, fmt, ap); |
|
1916 JS_snprintf(buf + strlen(buf), BufferSize, "\n"); |
|
1917 |
|
1918 fprintf(stderr, "%s", buf); |
|
1919 } |
|
1920 |
|
1921 void spew(js::parallel::SpewChannel channel, const char *fmt, ...) { |
|
1922 va_list ap; |
|
1923 va_start(ap, fmt); |
|
1924 spewVA(channel, fmt, ap); |
|
1925 va_end(ap); |
|
1926 } |
|
1927 |
|
1928 void beginOp(JSContext *cx, const char *name) { |
|
1929 if (!active[SpewOps]) |
|
1930 return; |
|
1931 |
|
1932 if (cx) { |
|
1933 jsbytecode *pc; |
|
1934 RootedScript script(cx, cx->currentScript(&pc)); |
|
1935 if (script && pc) { |
|
1936 NonBuiltinScriptFrameIter iter(cx); |
|
1937 if (iter.done()) { |
|
1938 spew(SpewOps, "%sBEGIN %s%s (%s:%u)", bold(), name, reset(), |
|
1939 script->filename(), PCToLineNumber(script, pc)); |
|
1940 } else { |
|
1941 spew(SpewOps, "%sBEGIN %s%s (%s:%u -> %s:%u)", bold(), name, reset(), |
|
1942 iter.script()->filename(), PCToLineNumber(iter.script(), iter.pc()), |
|
1943 script->filename(), PCToLineNumber(script, pc)); |
|
1944 } |
|
1945 } else { |
|
1946 spew(SpewOps, "%sBEGIN %s%s", bold(), name, reset()); |
|
1947 } |
|
1948 } else { |
|
1949 spew(SpewOps, "%sBEGIN %s%s", bold(), name, reset()); |
|
1950 } |
|
1951 |
|
1952 depth++; |
|
1953 } |
|
1954 |
|
1955 void endOp(ExecutionStatus status) { |
|
1956 if (!active[SpewOps]) |
|
1957 return; |
|
1958 |
|
1959 JS_ASSERT(depth > 0); |
|
1960 depth--; |
|
1961 |
|
1962 const char *statusColor; |
|
1963 switch (status) { |
|
1964 case ExecutionFatal: |
|
1965 statusColor = red(); |
|
1966 break; |
|
1967 case ExecutionSequential: |
|
1968 statusColor = yellow(); |
|
1969 break; |
|
1970 case ExecutionParallel: |
|
1971 statusColor = green(); |
|
1972 break; |
|
1973 default: |
|
1974 statusColor = reset(); |
|
1975 break; |
|
1976 } |
|
1977 |
|
1978 spew(SpewOps, "%sEND %s%s%s", bold(), |
|
1979 statusColor, ExecutionStatusToString(status), reset()); |
|
1980 } |
|
1981 |
|
1982 void bailout(uint32_t count, HandleScript script, |
|
1983 jsbytecode *pc, ParallelBailoutCause cause) { |
|
1984 if (!active[SpewOps]) |
|
1985 return; |
|
1986 |
|
1987 const char *filename = ""; |
|
1988 unsigned line=0, column=0; |
|
1989 if (script) { |
|
1990 line = PCToLineNumber(script, pc, &column); |
|
1991 filename = script->filename(); |
|
1992 } |
|
1993 |
|
1994 spew(SpewOps, "%s%sBAILOUT %d%s: %s (%d) at %s:%d:%d", bold(), yellow(), count, reset(), |
|
1995 BailoutExplanation(cause), cause, filename, line, column); |
|
1996 } |
|
1997 |
|
1998 void beginCompile(HandleScript script) { |
|
1999 if (!active[SpewCompile]) |
|
2000 return; |
|
2001 |
|
2002 spew(SpewCompile, "COMPILE %p:%s:%u", script.get(), script->filename(), script->lineno()); |
|
2003 depth++; |
|
2004 } |
|
2005 |
|
2006 void endCompile(MethodStatus status) { |
|
2007 if (!active[SpewCompile]) |
|
2008 return; |
|
2009 |
|
2010 JS_ASSERT(depth > 0); |
|
2011 depth--; |
|
2012 |
|
2013 const char *statusColor; |
|
2014 switch (status) { |
|
2015 case Method_Error: |
|
2016 case Method_CantCompile: |
|
2017 statusColor = red(); |
|
2018 break; |
|
2019 case Method_Skipped: |
|
2020 statusColor = yellow(); |
|
2021 break; |
|
2022 case Method_Compiled: |
|
2023 statusColor = green(); |
|
2024 break; |
|
2025 default: |
|
2026 statusColor = reset(); |
|
2027 break; |
|
2028 } |
|
2029 |
|
2030 spew(SpewCompile, "END %s%s%s", statusColor, MethodStatusToString(status), reset()); |
|
2031 } |
|
2032 |
|
2033 void spewMIR(MDefinition *mir, const char *fmt, va_list ap) { |
|
2034 if (!active[SpewCompile]) |
|
2035 return; |
|
2036 |
|
2037 char buf[BufferSize]; |
|
2038 JS_vsnprintf(buf, BufferSize, fmt, ap); |
|
2039 |
|
2040 JSScript *script = mir->block()->info().script(); |
|
2041 spew(SpewCompile, "%s%s%s: %s (%s:%u)", cyan(), mir->opName(), reset(), buf, |
|
2042 script->filename(), PCToLineNumber(script, mir->trackedPc())); |
|
2043 } |
|
2044 |
|
2045 void spewBailoutIR(IonLIRTraceData *data) { |
|
2046 if (!active[SpewBailouts]) |
|
2047 return; |
|
2048 |
|
2049 // If we didn't bail from a LIR/MIR but from a propagated parallel |
|
2050 // bailout, don't bother printing anything since we've printed it |
|
2051 // elsewhere. |
|
2052 if (data->mirOpName && data->script) { |
|
2053 spew(SpewBailouts, "%sBailout%s: %s / %s%s%s (block %d lir %d) (%s:%u)", yellow(), reset(), |
|
2054 data->lirOpName, cyan(), data->mirOpName, reset(), |
|
2055 data->blockIndex, data->lirIndex, data->script->filename(), |
|
2056 PCToLineNumber(data->script, data->pc)); |
|
2057 } |
|
2058 } |
|
2059 }; |
|
2060 |
|
2061 // Singleton instance of the spewer. |
|
2062 static ParallelSpewer spewer; |
|
2063 |
|
2064 bool |
|
2065 parallel::SpewEnabled(SpewChannel channel) |
|
2066 { |
|
2067 return spewer.isActive(channel); |
|
2068 } |
|
2069 |
|
2070 void |
|
2071 parallel::Spew(SpewChannel channel, const char *fmt, ...) |
|
2072 { |
|
2073 va_list ap; |
|
2074 va_start(ap, fmt); |
|
2075 spewer.spewVA(channel, fmt, ap); |
|
2076 va_end(ap); |
|
2077 } |
|
2078 |
|
2079 void |
|
2080 parallel::SpewBeginOp(JSContext *cx, const char *name) |
|
2081 { |
|
2082 spewer.beginOp(cx, name); |
|
2083 } |
|
2084 |
|
2085 ExecutionStatus |
|
2086 parallel::SpewEndOp(ExecutionStatus status) |
|
2087 { |
|
2088 spewer.endOp(status); |
|
2089 return status; |
|
2090 } |
|
2091 |
|
2092 void |
|
2093 parallel::SpewBailout(uint32_t count, HandleScript script, |
|
2094 jsbytecode *pc, ParallelBailoutCause cause) |
|
2095 { |
|
2096 spewer.bailout(count, script, pc, cause); |
|
2097 } |
|
2098 |
|
2099 void |
|
2100 parallel::SpewBeginCompile(HandleScript script) |
|
2101 { |
|
2102 spewer.beginCompile(script); |
|
2103 } |
|
2104 |
|
2105 MethodStatus |
|
2106 parallel::SpewEndCompile(MethodStatus status) |
|
2107 { |
|
2108 spewer.endCompile(status); |
|
2109 return status; |
|
2110 } |
|
2111 |
|
2112 void |
|
2113 parallel::SpewMIR(MDefinition *mir, const char *fmt, ...) |
|
2114 { |
|
2115 va_list ap; |
|
2116 va_start(ap, fmt); |
|
2117 spewer.spewMIR(mir, fmt, ap); |
|
2118 va_end(ap); |
|
2119 } |
|
2120 |
|
2121 void |
|
2122 parallel::SpewBailoutIR(IonLIRTraceData *data) |
|
2123 { |
|
2124 spewer.spewBailoutIR(data); |
|
2125 } |
|
2126 |
|
2127 #endif // DEBUG |
|
2128 |
|
2129 bool |
|
2130 js::InExclusiveParallelSection() |
|
2131 { |
|
2132 return InParallelSection() && ForkJoinContext::current()->hasAcquiredJSContext(); |
|
2133 } |
|
2134 |
|
2135 bool |
|
2136 js::ParallelTestsShouldPass(JSContext *cx) |
|
2137 { |
|
2138 return jit::IsIonEnabled(cx) && |
|
2139 jit::IsBaselineEnabled(cx) && |
|
2140 !jit::js_JitOptions.eagerCompilation && |
|
2141 jit::js_JitOptions.baselineUsesBeforeCompile != 0 && |
|
2142 cx->runtime()->gcZeal() == 0; |
|
2143 } |
|
2144 |
|
2145 void |
|
2146 js::RequestInterruptForForkJoin(JSRuntime *rt, JSRuntime::InterruptMode mode) |
|
2147 { |
|
2148 if (mode != JSRuntime::RequestInterruptAnyThreadDontStopIon) |
|
2149 rt->interruptPar = true; |
|
2150 } |
|
2151 |
|
2152 bool |
|
2153 js::intrinsic_SetForkJoinTargetRegion(JSContext *cx, unsigned argc, Value *vp) |
|
2154 { |
|
2155 // This version of SetForkJoinTargetRegion is called during |
|
2156 // sequential execution. It is a no-op. The parallel version |
|
2157 // is intrinsic_SetForkJoinTargetRegionPar(), below. |
|
2158 return true; |
|
2159 } |
|
2160 |
|
2161 static bool |
|
2162 intrinsic_SetForkJoinTargetRegionPar(ForkJoinContext *cx, unsigned argc, Value *vp) |
|
2163 { |
|
2164 // Sets the *target region*, which is the portion of the output |
|
2165 // buffer that the current iteration is permitted to write to. |
|
2166 // |
|
2167 // Note: it is important that the target region should be an |
|
2168 // entire element (or several elements) of the output array and |
|
2169 // not some region that spans from the middle of one element into |
|
2170 // the middle of another. This is because the guarding code |
|
2171 // assumes that handles, which never straddle across elements, |
|
2172 // will either be contained entirely within the target region or |
|
2173 // be contained entirely without of the region, and not straddling |
|
2174 // the region nor encompassing it. |
|
2175 |
|
2176 CallArgs args = CallArgsFromVp(argc, vp); |
|
2177 JS_ASSERT(argc == 3); |
|
2178 JS_ASSERT(args[0].isObject() && args[0].toObject().is<TypedObject>()); |
|
2179 JS_ASSERT(args[1].isInt32()); |
|
2180 JS_ASSERT(args[2].isInt32()); |
|
2181 |
|
2182 uint8_t *mem = args[0].toObject().as<TypedObject>().typedMem(); |
|
2183 int32_t start = args[1].toInt32(); |
|
2184 int32_t end = args[2].toInt32(); |
|
2185 |
|
2186 cx->targetRegionStart = mem + start; |
|
2187 cx->targetRegionEnd = mem + end; |
|
2188 return true; |
|
2189 } |
|
2190 |
|
2191 JS_JITINFO_NATIVE_PARALLEL(js::intrinsic_SetForkJoinTargetRegionInfo, |
|
2192 intrinsic_SetForkJoinTargetRegionPar); |
|
2193 |
|
2194 bool |
|
2195 js::intrinsic_ClearThreadLocalArenas(JSContext *cx, unsigned argc, Value *vp) |
|
2196 { |
|
2197 return true; |
|
2198 } |
|
2199 |
|
2200 static bool |
|
2201 intrinsic_ClearThreadLocalArenasPar(ForkJoinContext *cx, unsigned argc, Value *vp) |
|
2202 { |
|
2203 cx->allocator()->arenas.wipeDuringParallelExecution(cx->runtime()); |
|
2204 return true; |
|
2205 } |
|
2206 |
|
2207 JS_JITINFO_NATIVE_PARALLEL(js::intrinsic_ClearThreadLocalArenasInfo, |
|
2208 intrinsic_ClearThreadLocalArenasPar); |
|
2209 |
|
2210 #endif // JS_THREADSAFE && JS_ION |