michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef vm_ForkJoin_h michael@0: #define vm_ForkJoin_h michael@0: michael@0: #include "mozilla/ThreadLocal.h" michael@0: michael@0: #include "jscntxt.h" michael@0: michael@0: #include "gc/GCInternals.h" michael@0: michael@0: #include "jit/Ion.h" michael@0: michael@0: /////////////////////////////////////////////////////////////////////////// michael@0: // Read Me First michael@0: // michael@0: // The ForkJoin abstraction: michael@0: // ------------------------- michael@0: // michael@0: // This is the building block for executing multi-threaded JavaScript with michael@0: // shared memory (as distinct from Web Workers). The idea is that you have michael@0: // some (typically data-parallel) operation which you wish to execute in michael@0: // parallel across as many threads as you have available. michael@0: // michael@0: // The ForkJoin abstraction is intended to be used by self-hosted code michael@0: // to enable parallel execution. At the top-level, it consists of a native michael@0: // function (exposed as the ForkJoin intrinsic) that is used like so: michael@0: // michael@0: // ForkJoin(func, sliceStart, sliceEnd, mode) michael@0: // michael@0: // The intention of this statement is to start some some number (usually the michael@0: // number of hardware threads) of copies of |func()| running in parallel. Each michael@0: // copy will then do a portion of the total work, depending on michael@0: // workstealing-based load balancing. michael@0: // michael@0: // Typically, each of the N slices runs in a different worker thread, but that michael@0: // is not something you should rely upon---if work-stealing is enabled it michael@0: // could be that a single worker thread winds up handling multiple slices. michael@0: // michael@0: // The second and third arguments, |sliceStart| and |sliceEnd|, are the slice michael@0: // boundaries. These numbers must each fit inside an uint16_t. michael@0: // michael@0: // The fourth argument, |mode|, is an internal mode integer giving finer michael@0: // control over the behavior of ForkJoin. See the |ForkJoinMode| enum. michael@0: // michael@0: // func() should expect the following arguments: michael@0: // michael@0: // func(workerId, sliceStart, sliceEnd) michael@0: // michael@0: // The |workerId| parameter is the id of the worker executing the function. It michael@0: // is 0 in sequential mode. michael@0: // michael@0: // The |sliceStart| and |sliceEnd| parameters are the current bounds that that michael@0: // the worker is handling. In parallel execution, these parameters are not michael@0: // used. In sequential execution, they tell the worker what slices should be michael@0: // processed. During the warm up phase, sliceEnd == sliceStart + 1. michael@0: // michael@0: // |func| can keep asking for more work from the scheduler by calling the michael@0: // intrinsic |GetForkJoinSlice(sliceStart, sliceEnd, id)|. When there are no michael@0: // more slices to hand out, ThreadPool::MAX_SLICE_ID is returned as a sentinel michael@0: // value. By exposing this function as an intrinsic, we reduce the number of michael@0: // JS-C++ boundary crossings incurred by workstealing, which may have many michael@0: // slices. michael@0: // michael@0: // In sequential execution, |func| should return the maximum computed slice id michael@0: // S for which all slices with id < S have already been processed. This is so michael@0: // ThreadPool can track the leftmost completed slice id to maintain michael@0: // determinism. Slices which have been completed in sequential execution michael@0: // cannot be re-run in parallel execution. michael@0: // michael@0: // In parallel execution, |func| MUST PROCESS ALL SLICES BEFORE RETURNING! michael@0: // Not doing so is an error and is protected by debug asserts in ThreadPool. michael@0: // michael@0: // Warmups and Sequential Fallbacks michael@0: // -------------------------------- michael@0: // michael@0: // ForkJoin can only execute code in parallel when it has been michael@0: // ion-compiled in Parallel Execution Mode. ForkJoin handles this part michael@0: // for you. However, because ion relies on having decent type michael@0: // information available, it is necessary to run the code sequentially michael@0: // for a few iterations first to get the various type sets "primed" michael@0: // with reasonable information. We try to make do with just a few michael@0: // runs, under the hypothesis that parallel execution code which reach michael@0: // type stability relatively quickly. michael@0: // michael@0: // The general strategy of ForkJoin is as follows: michael@0: // michael@0: // - If the code has not yet been run, invoke `func` sequentially with michael@0: // warmup set to true. When warmup is true, `func` should try and michael@0: // do less work than normal---just enough to prime type sets. (See michael@0: // ParallelArray.js for a discussion of specifically how we do this michael@0: // in the case of ParallelArray). michael@0: // michael@0: // - Try to execute the code in parallel. Parallel execution mode has michael@0: // three possible results: success, fatal error, or bailout. If a michael@0: // bailout occurs, it means that the code attempted some action michael@0: // which is not possible in parallel mode. This might be a michael@0: // modification to shared state, but it might also be that it michael@0: // attempted to take some theoreticaly pure action that has not been michael@0: // made threadsafe (yet?). michael@0: // michael@0: // - If parallel execution is successful, ForkJoin returns true. michael@0: // michael@0: // - If parallel execution results in a fatal error, ForkJoin returns false. michael@0: // michael@0: // - If parallel execution results in a *bailout*, this is when things michael@0: // get interesting. In that case, the semantics of parallel michael@0: // execution guarantee us that no visible side effects have occurred michael@0: // (unless they were performed with the intrinsic michael@0: // |UnsafePutElements()|, which can only be used in self-hosted michael@0: // code). We therefore reinvoke |func()| but with warmup set to michael@0: // true. The idea here is that often parallel bailouts result from michael@0: // a failed type guard or other similar assumption, so rerunning the michael@0: // warmup sequentially gives us a chance to recompile with more michael@0: // data. Because warmup is true, we do not expect this sequential michael@0: // call to process all remaining data, just a chunk. After this michael@0: // recovery execution is complete, we again attempt parallel michael@0: // execution. michael@0: // michael@0: // - If more than a fixed number of bailouts occur, we give up on michael@0: // parallelization and just invoke |func()| N times in a row (once michael@0: // for each worker) but with |warmup| set to false. michael@0: // michael@0: // Interrupts: michael@0: // michael@0: // During parallel execution, |cx.check()| must be periodically invoked to michael@0: // check for interrupts. This is automatically done by the Ion-generated michael@0: // code. If an interrupt has been requested |cx.check()| aborts parallel michael@0: // execution. michael@0: // michael@0: // Transitive compilation: michael@0: // michael@0: // One of the challenges for parallel compilation is that we michael@0: // (currently) have to abort when we encounter an uncompiled script. michael@0: // Therefore, we try to compile everything that might be needed michael@0: // beforehand. The exact strategy is described in `ParallelDo::apply()` michael@0: // in ForkJoin.cpp, but at the highest level the idea is: michael@0: // michael@0: // 1. We maintain a flag on every script telling us if that script and michael@0: // its transitive callees are believed to be compiled. If that flag michael@0: // is set, we can skip the initial compilation. michael@0: // 2. Otherwise, we maintain a worklist that begins with the main michael@0: // script. We compile it and then examine the generated parallel IonScript, michael@0: // which will have a list of callees. We enqueue those. Some of these michael@0: // compilations may take place off the main thread, in which case michael@0: // we will run warmup iterations while we wait for them to complete. michael@0: // 3. If the warmup iterations finish all the work, we're done. michael@0: // 4. If compilations fail, we fallback to sequential. michael@0: // 5. Otherwise, we will try running in parallel once we're all done. michael@0: // michael@0: // Bailout tracing and recording: michael@0: // michael@0: // When a bailout occurs, we record a bit of state so that we can michael@0: // recover with grace. Each |ForkJoinContext| has a pointer to a michael@0: // |ParallelBailoutRecord| pre-allocated for this purpose. This michael@0: // structure is used to record the cause of the bailout, the JSScript michael@0: // which was executing, as well as the location in the source where michael@0: // the bailout occurred (in principle, we can record a full stack michael@0: // trace, but right now we only record the top-most frame). Note that michael@0: // the error location might not be in the same JSScript as the one michael@0: // which was executing due to inlining. michael@0: // michael@0: // Garbage collection and allocation: michael@0: // michael@0: // Code which executes on these parallel threads must be very careful michael@0: // with respect to garbage collection and allocation. The typical michael@0: // allocation paths are UNSAFE in parallel code because they access michael@0: // shared state (the compartment's arena lists and so forth) without michael@0: // any synchronization. They can also trigger GC in an ad-hoc way. michael@0: // michael@0: // To deal with this, the forkjoin code creates a distinct |Allocator| michael@0: // object for each slice. You can access the appropriate object via michael@0: // the |ForkJoinContext| object that is provided to the callbacks. Once michael@0: // the execution is complete, all the objects found in these distinct michael@0: // |Allocator| is merged back into the main compartment lists and michael@0: // things proceed normally. michael@0: // michael@0: // In Ion-generated code, we will do allocation through the michael@0: // |Allocator| found in |ForkJoinContext| (which is obtained via TLS). michael@0: // Also, no write barriers are emitted. Conceptually, we should never michael@0: // need a write barrier because we only permit writes to objects that michael@0: // are newly allocated, and such objects are always black (to use michael@0: // incremental GC terminology). However, to be safe, we also block michael@0: // upon entering a parallel section to ensure that any concurrent michael@0: // marking or incremental GC has completed. michael@0: // michael@0: // In the future, it should be possible to lift the restriction that michael@0: // we must block until inc. GC has completed and also to permit GC michael@0: // during parallel exeution. But we're not there yet. michael@0: // michael@0: // Load balancing (work stealing): michael@0: // michael@0: // The ForkJoin job is dynamically divided into a fixed number of slices, michael@0: // and is submitted for parallel execution in the pool. When the number michael@0: // of slices is big enough (typically greater than the number of workers michael@0: // in the pool) -and the workload is unbalanced- each worker thread michael@0: // will perform load balancing through work stealing. The number michael@0: // of slices is computed by the self-hosted function |ComputeNumSlices| michael@0: // and can be used to know how many slices will be executed by the michael@0: // runtime for an array of the given size. michael@0: // michael@0: // Current Limitations: michael@0: // michael@0: // - The API does not support recursive or nested use. That is, the michael@0: // JavaScript function given to |ForkJoin| should not itself invoke michael@0: // |ForkJoin()|. Instead, use the intrinsic |InParallelSection()| to michael@0: // check for recursive use and execute a sequential fallback. michael@0: // michael@0: /////////////////////////////////////////////////////////////////////////// michael@0: michael@0: namespace js { michael@0: michael@0: class ForkJoinActivation : public Activation michael@0: { michael@0: uint8_t *prevIonTop_; michael@0: michael@0: // We ensure that incremental GC be finished before we enter into a fork michael@0: // join section, but the runtime/zone might still be marked as needing michael@0: // barriers due to being in the middle of verifying barriers. Pause michael@0: // verification during the fork join section. michael@0: gc::AutoStopVerifyingBarriers av_; michael@0: michael@0: public: michael@0: ForkJoinActivation(JSContext *cx); michael@0: ~ForkJoinActivation(); michael@0: }; michael@0: michael@0: class ForkJoinContext; michael@0: michael@0: bool ForkJoin(JSContext *cx, CallArgs &args); michael@0: michael@0: struct IonLIRTraceData { michael@0: uint32_t blockIndex; michael@0: uint32_t lirIndex; michael@0: uint32_t execModeInt; michael@0: const char *lirOpName; michael@0: const char *mirOpName; michael@0: JSScript *script; michael@0: jsbytecode *pc; michael@0: }; michael@0: michael@0: /////////////////////////////////////////////////////////////////////////// michael@0: // Bailout tracking michael@0: michael@0: enum ParallelBailoutCause { michael@0: ParallelBailoutNone, michael@0: michael@0: // Compiler returned Method_Skipped michael@0: ParallelBailoutCompilationSkipped, michael@0: michael@0: // Compiler returned Method_CantCompile michael@0: ParallelBailoutCompilationFailure, michael@0: michael@0: // The periodic interrupt failed, which can mean that either michael@0: // another thread canceled, the user interrupted us, etc michael@0: ParallelBailoutInterrupt, michael@0: michael@0: // An IC update failed michael@0: ParallelBailoutFailedIC, michael@0: michael@0: // Heap busy flag was set during interrupt michael@0: ParallelBailoutHeapBusy, michael@0: michael@0: ParallelBailoutMainScriptNotPresent, michael@0: ParallelBailoutCalledToUncompiledScript, michael@0: ParallelBailoutIllegalWrite, michael@0: ParallelBailoutAccessToIntrinsic, michael@0: ParallelBailoutOverRecursed, michael@0: ParallelBailoutOutOfMemory, michael@0: ParallelBailoutUnsupported, michael@0: ParallelBailoutUnsupportedVM, michael@0: ParallelBailoutUnsupportedStringComparison, michael@0: ParallelBailoutRequestedGC, michael@0: ParallelBailoutRequestedZoneGC, michael@0: }; michael@0: michael@0: struct ParallelBailoutTrace { michael@0: JSScript *script; michael@0: jsbytecode *bytecode; michael@0: }; michael@0: michael@0: // See "Bailouts" section in comment above. michael@0: struct ParallelBailoutRecord { michael@0: JSScript *topScript; michael@0: ParallelBailoutCause cause; michael@0: michael@0: // Eventually we will support deeper traces, michael@0: // but for now we gather at most a single frame. michael@0: static const uint32_t MaxDepth = 1; michael@0: uint32_t depth; michael@0: ParallelBailoutTrace trace[MaxDepth]; michael@0: michael@0: void init(JSContext *cx); michael@0: void reset(JSContext *cx); michael@0: void setCause(ParallelBailoutCause cause, michael@0: JSScript *outermostScript = nullptr, // inliner (if applicable) michael@0: JSScript *currentScript = nullptr, // inlinee (if applicable) michael@0: jsbytecode *currentPc = nullptr); michael@0: void updateCause(ParallelBailoutCause cause, michael@0: JSScript *outermostScript, michael@0: JSScript *currentScript, michael@0: jsbytecode *currentPc); michael@0: void addTrace(JSScript *script, michael@0: jsbytecode *pc); michael@0: }; michael@0: michael@0: struct ForkJoinShared; michael@0: michael@0: class ForkJoinContext : public ThreadSafeContext michael@0: { michael@0: public: michael@0: // Bailout record used to record the reason this thread stopped executing michael@0: ParallelBailoutRecord *const bailoutRecord; michael@0: michael@0: #ifdef DEBUG michael@0: // Records the last instr. to execute on this thread. michael@0: IonLIRTraceData traceData; michael@0: michael@0: // The maximum worker id. michael@0: uint32_t maxWorkerId; michael@0: #endif michael@0: michael@0: // When we run a par operation like mapPar, we create an out pointer michael@0: // into a specific region of the destination buffer. Even though the michael@0: // destination buffer is not thread-local, it is permissible to write into michael@0: // it via the handles provided. These two fields identify the memory michael@0: // region where writes are allowed so that the write guards can test for michael@0: // it. michael@0: // michael@0: // Note: we only permit writes into the *specific region* that the user michael@0: // is supposed to write. Normally, they only have access to this region michael@0: // anyhow. But due to sequential fallback it is possible for handles into michael@0: // other regions to escape into global variables in the sequential michael@0: // execution and then get accessed by later parallel sections. Thus we michael@0: // must be careful and ensure that the write is going through a handle michael@0: // into the correct *region* of the buffer. michael@0: uint8_t *targetRegionStart; michael@0: uint8_t *targetRegionEnd; michael@0: michael@0: ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker *worker, michael@0: Allocator *allocator, ForkJoinShared *shared, michael@0: ParallelBailoutRecord *bailoutRecord); michael@0: michael@0: // Get the worker id. The main thread by convention has the id of the max michael@0: // worker thread id + 1. michael@0: uint32_t workerId() const { return worker_->id(); } michael@0: michael@0: // Get a slice of work for the worker associated with the context. michael@0: bool getSlice(uint16_t *sliceId) { return worker_->getSlice(this, sliceId); } michael@0: michael@0: // True if this is the main thread, false if it is one of the parallel workers. michael@0: bool isMainThread() const; michael@0: michael@0: // When the code would normally trigger a GC, we don't trigger it michael@0: // immediately but instead record that request here. This will michael@0: // cause |ExecuteForkJoinOp()| to invoke |TriggerGC()| or michael@0: // |TriggerCompartmentGC()| as appropriate once the parallel michael@0: // section is complete. This is done because those routines do michael@0: // various preparations that are not thread-safe, and because the michael@0: // full set of arenas is not available until the end of the michael@0: // parallel section. michael@0: void requestGC(JS::gcreason::Reason reason); michael@0: void requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason); michael@0: michael@0: // Set the fatal flag for the next abort. Used to distinguish retry or michael@0: // fatal aborts from VM functions. michael@0: bool setPendingAbortFatal(ParallelBailoutCause cause); michael@0: michael@0: // Reports an unsupported operation, returning false if we are reporting michael@0: // an error. Otherwise drop the warning on the floor. michael@0: bool reportError(ParallelBailoutCause cause, unsigned report) { michael@0: if (report & JSREPORT_ERROR) michael@0: return setPendingAbortFatal(cause); michael@0: return true; michael@0: } michael@0: michael@0: // During the parallel phase, this method should be invoked michael@0: // periodically, for example on every backedge, similar to the michael@0: // interrupt check. If it returns false, then the parallel phase michael@0: // has been aborted and so you should bailout. The function may michael@0: // also rendesvous to perform GC or do other similar things. michael@0: // michael@0: // This function is guaranteed to have no effect if both michael@0: // runtime()->interruptPar is zero. Ion-generated code takes michael@0: // advantage of this by inlining the checks on those flags before michael@0: // actually calling this function. If this function ends up michael@0: // getting called a lot from outside ion code, we can refactor michael@0: // it into an inlined version with this check that calls a slower michael@0: // version. michael@0: bool check(); michael@0: michael@0: // Be wary, the runtime is shared between all threads! michael@0: JSRuntime *runtime(); michael@0: michael@0: // Acquire and release the JSContext from the runtime. michael@0: JSContext *acquireJSContext(); michael@0: void releaseJSContext(); michael@0: bool hasAcquiredJSContext() const; michael@0: michael@0: // Check the current state of parallel execution. michael@0: static inline ForkJoinContext *current(); michael@0: michael@0: // Initializes the thread-local state. michael@0: static bool initialize(); michael@0: michael@0: // Used in inlining GetForkJoinSlice. michael@0: static size_t offsetOfWorker() { michael@0: return offsetof(ForkJoinContext, worker_); michael@0: } michael@0: michael@0: private: michael@0: friend class AutoSetForkJoinContext; michael@0: michael@0: // Initialized by initialize() michael@0: static mozilla::ThreadLocal tlsForkJoinContext; michael@0: michael@0: ForkJoinShared *const shared_; michael@0: michael@0: ThreadPoolWorker *worker_; michael@0: michael@0: bool acquiredJSContext_; michael@0: michael@0: // ForkJoinContext is allocated on the stack. It would be dangerous to GC michael@0: // with it live because of the GC pointer fields stored in the context. michael@0: JS::AutoAssertNoGC nogc_; michael@0: }; michael@0: michael@0: // Locks a JSContext for its scope. Be very careful, because locking a michael@0: // JSContext does *not* allow you to safely mutate the data in the michael@0: // JSContext unless you can guarantee that any of the other threads michael@0: // that want to access that data will also acquire the lock, which is michael@0: // generally not the case. For example, the lock is used in the IC michael@0: // code to allow us to atomically patch up the dispatch table, but we michael@0: // must be aware that other threads may be reading from the table even michael@0: // as we write to it (though they cannot be writing, since they must michael@0: // hold the lock to write). michael@0: class LockedJSContext michael@0: { michael@0: #if defined(JS_THREADSAFE) && defined(JS_ION) michael@0: ForkJoinContext *cx_; michael@0: #endif michael@0: JSContext *jscx_; michael@0: michael@0: public: michael@0: LockedJSContext(ForkJoinContext *cx) michael@0: #if defined(JS_THREADSAFE) && defined(JS_ION) michael@0: : cx_(cx), michael@0: jscx_(cx->acquireJSContext()) michael@0: #else michael@0: : jscx_(nullptr) michael@0: #endif michael@0: { } michael@0: michael@0: ~LockedJSContext() { michael@0: #if defined(JS_THREADSAFE) && defined(JS_ION) michael@0: cx_->releaseJSContext(); michael@0: #endif michael@0: } michael@0: michael@0: operator JSContext *() { return jscx_; } michael@0: JSContext *operator->() { return jscx_; } michael@0: }; michael@0: michael@0: bool InExclusiveParallelSection(); michael@0: michael@0: bool ParallelTestsShouldPass(JSContext *cx); michael@0: michael@0: void RequestInterruptForForkJoin(JSRuntime *rt, JSRuntime::InterruptMode mode); michael@0: michael@0: bool intrinsic_SetForkJoinTargetRegion(JSContext *cx, unsigned argc, Value *vp); michael@0: extern const JSJitInfo intrinsic_SetForkJoinTargetRegionInfo; michael@0: michael@0: bool intrinsic_ClearThreadLocalArenas(JSContext *cx, unsigned argc, Value *vp); michael@0: extern const JSJitInfo intrinsic_ClearThreadLocalArenasInfo; michael@0: michael@0: /////////////////////////////////////////////////////////////////////////// michael@0: // Debug Spew michael@0: michael@0: namespace jit { michael@0: class MDefinition; michael@0: } michael@0: michael@0: namespace parallel { michael@0: michael@0: enum ExecutionStatus { michael@0: // Parallel or seq execution terminated in a fatal way, operation failed michael@0: ExecutionFatal, michael@0: michael@0: // Parallel exec failed and so we fell back to sequential michael@0: ExecutionSequential, michael@0: michael@0: // We completed the work in seq mode before parallel compilation completed michael@0: ExecutionWarmup, michael@0: michael@0: // Parallel exec was successful after some number of bailouts michael@0: ExecutionParallel michael@0: }; michael@0: michael@0: enum SpewChannel { michael@0: SpewOps, michael@0: SpewCompile, michael@0: SpewBailouts, michael@0: NumSpewChannels michael@0: }; michael@0: michael@0: #if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION) michael@0: michael@0: bool SpewEnabled(SpewChannel channel); michael@0: void Spew(SpewChannel channel, const char *fmt, ...); michael@0: void SpewBeginOp(JSContext *cx, const char *name); michael@0: void SpewBailout(uint32_t count, HandleScript script, jsbytecode *pc, michael@0: ParallelBailoutCause cause); michael@0: ExecutionStatus SpewEndOp(ExecutionStatus status); michael@0: void SpewBeginCompile(HandleScript script); michael@0: jit::MethodStatus SpewEndCompile(jit::MethodStatus status); michael@0: void SpewMIR(jit::MDefinition *mir, const char *fmt, ...); michael@0: void SpewBailoutIR(IonLIRTraceData *data); michael@0: michael@0: #else michael@0: michael@0: static inline bool SpewEnabled(SpewChannel channel) { return false; } michael@0: static inline void Spew(SpewChannel channel, const char *fmt, ...) { } michael@0: static inline void SpewBeginOp(JSContext *cx, const char *name) { } michael@0: static inline void SpewBailout(uint32_t count, HandleScript script, michael@0: jsbytecode *pc, ParallelBailoutCause cause) {} michael@0: static inline ExecutionStatus SpewEndOp(ExecutionStatus status) { return status; } michael@0: static inline void SpewBeginCompile(HandleScript script) { } michael@0: #ifdef JS_ION michael@0: static inline jit::MethodStatus SpewEndCompile(jit::MethodStatus status) { return status; } michael@0: static inline void SpewMIR(jit::MDefinition *mir, const char *fmt, ...) { } michael@0: #endif michael@0: static inline void SpewBailoutIR(IonLIRTraceData *data) { } michael@0: michael@0: #endif // DEBUG && JS_THREADSAFE && JS_ION michael@0: michael@0: } // namespace parallel michael@0: } // namespace js michael@0: michael@0: /* static */ inline js::ForkJoinContext * michael@0: js::ForkJoinContext::current() michael@0: { michael@0: return tlsForkJoinContext.get(); michael@0: } michael@0: michael@0: namespace js { michael@0: michael@0: static inline bool michael@0: InParallelSection() michael@0: { michael@0: return ForkJoinContext::current() != nullptr; michael@0: } michael@0: michael@0: } // namespace js michael@0: michael@0: #endif /* vm_ForkJoin_h */