js/src/vm/ForkJoin.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/vm/ForkJoin.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,559 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef vm_ForkJoin_h
    1.11 +#define vm_ForkJoin_h
    1.12 +
    1.13 +#include "mozilla/ThreadLocal.h"
    1.14 +
    1.15 +#include "jscntxt.h"
    1.16 +
    1.17 +#include "gc/GCInternals.h"
    1.18 +
    1.19 +#include "jit/Ion.h"
    1.20 +
    1.21 +///////////////////////////////////////////////////////////////////////////
    1.22 +// Read Me First
    1.23 +//
    1.24 +// The ForkJoin abstraction:
    1.25 +// -------------------------
    1.26 +//
    1.27 +// This is the building block for executing multi-threaded JavaScript with
    1.28 +// shared memory (as distinct from Web Workers).  The idea is that you have
    1.29 +// some (typically data-parallel) operation which you wish to execute in
    1.30 +// parallel across as many threads as you have available.
    1.31 +//
    1.32 +// The ForkJoin abstraction is intended to be used by self-hosted code
    1.33 +// to enable parallel execution.  At the top-level, it consists of a native
    1.34 +// function (exposed as the ForkJoin intrinsic) that is used like so:
    1.35 +//
    1.36 +//     ForkJoin(func, sliceStart, sliceEnd, mode)
    1.37 +//
    1.38 +// The intention of this statement is to start some some number (usually the
    1.39 +// number of hardware threads) of copies of |func()| running in parallel. Each
    1.40 +// copy will then do a portion of the total work, depending on
    1.41 +// workstealing-based load balancing.
    1.42 +//
    1.43 +// Typically, each of the N slices runs in a different worker thread, but that
    1.44 +// is not something you should rely upon---if work-stealing is enabled it
    1.45 +// could be that a single worker thread winds up handling multiple slices.
    1.46 +//
    1.47 +// The second and third arguments, |sliceStart| and |sliceEnd|, are the slice
    1.48 +// boundaries. These numbers must each fit inside an uint16_t.
    1.49 +//
    1.50 +// The fourth argument, |mode|, is an internal mode integer giving finer
    1.51 +// control over the behavior of ForkJoin. See the |ForkJoinMode| enum.
    1.52 +//
    1.53 +// func() should expect the following arguments:
    1.54 +//
    1.55 +//     func(workerId, sliceStart, sliceEnd)
    1.56 +//
    1.57 +// The |workerId| parameter is the id of the worker executing the function. It
    1.58 +// is 0 in sequential mode.
    1.59 +//
    1.60 +// The |sliceStart| and |sliceEnd| parameters are the current bounds that that
    1.61 +// the worker is handling. In parallel execution, these parameters are not
    1.62 +// used. In sequential execution, they tell the worker what slices should be
    1.63 +// processed. During the warm up phase, sliceEnd == sliceStart + 1.
    1.64 +//
    1.65 +// |func| can keep asking for more work from the scheduler by calling the
    1.66 +// intrinsic |GetForkJoinSlice(sliceStart, sliceEnd, id)|. When there are no
    1.67 +// more slices to hand out, ThreadPool::MAX_SLICE_ID is returned as a sentinel
    1.68 +// value. By exposing this function as an intrinsic, we reduce the number of
    1.69 +// JS-C++ boundary crossings incurred by workstealing, which may have many
    1.70 +// slices.
    1.71 +//
    1.72 +// In sequential execution, |func| should return the maximum computed slice id
    1.73 +// S for which all slices with id < S have already been processed. This is so
    1.74 +// ThreadPool can track the leftmost completed slice id to maintain
    1.75 +// determinism. Slices which have been completed in sequential execution
    1.76 +// cannot be re-run in parallel execution.
    1.77 +//
    1.78 +// In parallel execution, |func| MUST PROCESS ALL SLICES BEFORE RETURNING!
    1.79 +// Not doing so is an error and is protected by debug asserts in ThreadPool.
    1.80 +//
    1.81 +// Warmups and Sequential Fallbacks
    1.82 +// --------------------------------
    1.83 +//
    1.84 +// ForkJoin can only execute code in parallel when it has been
    1.85 +// ion-compiled in Parallel Execution Mode. ForkJoin handles this part
    1.86 +// for you. However, because ion relies on having decent type
    1.87 +// information available, it is necessary to run the code sequentially
    1.88 +// for a few iterations first to get the various type sets "primed"
    1.89 +// with reasonable information.  We try to make do with just a few
    1.90 +// runs, under the hypothesis that parallel execution code which reach
    1.91 +// type stability relatively quickly.
    1.92 +//
    1.93 +// The general strategy of ForkJoin is as follows:
    1.94 +//
    1.95 +// - If the code has not yet been run, invoke `func` sequentially with
    1.96 +//   warmup set to true.  When warmup is true, `func` should try and
    1.97 +//   do less work than normal---just enough to prime type sets. (See
    1.98 +//   ParallelArray.js for a discussion of specifically how we do this
    1.99 +//   in the case of ParallelArray).
   1.100 +//
   1.101 +// - Try to execute the code in parallel.  Parallel execution mode has
   1.102 +//   three possible results: success, fatal error, or bailout.  If a
   1.103 +//   bailout occurs, it means that the code attempted some action
   1.104 +//   which is not possible in parallel mode.  This might be a
   1.105 +//   modification to shared state, but it might also be that it
   1.106 +//   attempted to take some theoreticaly pure action that has not been
   1.107 +//   made threadsafe (yet?).
   1.108 +//
   1.109 +// - If parallel execution is successful, ForkJoin returns true.
   1.110 +//
   1.111 +// - If parallel execution results in a fatal error, ForkJoin returns false.
   1.112 +//
   1.113 +// - If parallel execution results in a *bailout*, this is when things
   1.114 +//   get interesting.  In that case, the semantics of parallel
   1.115 +//   execution guarantee us that no visible side effects have occurred
   1.116 +//   (unless they were performed with the intrinsic
   1.117 +//   |UnsafePutElements()|, which can only be used in self-hosted
   1.118 +//   code).  We therefore reinvoke |func()| but with warmup set to
   1.119 +//   true.  The idea here is that often parallel bailouts result from
   1.120 +//   a failed type guard or other similar assumption, so rerunning the
   1.121 +//   warmup sequentially gives us a chance to recompile with more
   1.122 +//   data.  Because warmup is true, we do not expect this sequential
   1.123 +//   call to process all remaining data, just a chunk.  After this
   1.124 +//   recovery execution is complete, we again attempt parallel
   1.125 +//   execution.
   1.126 +//
   1.127 +// - If more than a fixed number of bailouts occur, we give up on
   1.128 +//   parallelization and just invoke |func()| N times in a row (once
   1.129 +//   for each worker) but with |warmup| set to false.
   1.130 +//
   1.131 +// Interrupts:
   1.132 +//
   1.133 +// During parallel execution, |cx.check()| must be periodically invoked to
   1.134 +// check for interrupts. This is automatically done by the Ion-generated
   1.135 +// code. If an interrupt has been requested |cx.check()| aborts parallel
   1.136 +// execution.
   1.137 +//
   1.138 +// Transitive compilation:
   1.139 +//
   1.140 +// One of the challenges for parallel compilation is that we
   1.141 +// (currently) have to abort when we encounter an uncompiled script.
   1.142 +// Therefore, we try to compile everything that might be needed
   1.143 +// beforehand. The exact strategy is described in `ParallelDo::apply()`
   1.144 +// in ForkJoin.cpp, but at the highest level the idea is:
   1.145 +//
   1.146 +// 1. We maintain a flag on every script telling us if that script and
   1.147 +//    its transitive callees are believed to be compiled. If that flag
   1.148 +//    is set, we can skip the initial compilation.
   1.149 +// 2. Otherwise, we maintain a worklist that begins with the main
   1.150 +//    script. We compile it and then examine the generated parallel IonScript,
   1.151 +//    which will have a list of callees. We enqueue those. Some of these
   1.152 +//    compilations may take place off the main thread, in which case
   1.153 +//    we will run warmup iterations while we wait for them to complete.
   1.154 +// 3. If the warmup iterations finish all the work, we're done.
   1.155 +// 4. If compilations fail, we fallback to sequential.
   1.156 +// 5. Otherwise, we will try running in parallel once we're all done.
   1.157 +//
   1.158 +// Bailout tracing and recording:
   1.159 +//
   1.160 +// When a bailout occurs, we record a bit of state so that we can
   1.161 +// recover with grace. Each |ForkJoinContext| has a pointer to a
   1.162 +// |ParallelBailoutRecord| pre-allocated for this purpose. This
   1.163 +// structure is used to record the cause of the bailout, the JSScript
   1.164 +// which was executing, as well as the location in the source where
   1.165 +// the bailout occurred (in principle, we can record a full stack
   1.166 +// trace, but right now we only record the top-most frame). Note that
   1.167 +// the error location might not be in the same JSScript as the one
   1.168 +// which was executing due to inlining.
   1.169 +//
   1.170 +// Garbage collection and allocation:
   1.171 +//
   1.172 +// Code which executes on these parallel threads must be very careful
   1.173 +// with respect to garbage collection and allocation.  The typical
   1.174 +// allocation paths are UNSAFE in parallel code because they access
   1.175 +// shared state (the compartment's arena lists and so forth) without
   1.176 +// any synchronization.  They can also trigger GC in an ad-hoc way.
   1.177 +//
   1.178 +// To deal with this, the forkjoin code creates a distinct |Allocator|
   1.179 +// object for each slice.  You can access the appropriate object via
   1.180 +// the |ForkJoinContext| object that is provided to the callbacks.  Once
   1.181 +// the execution is complete, all the objects found in these distinct
   1.182 +// |Allocator| is merged back into the main compartment lists and
   1.183 +// things proceed normally.
   1.184 +//
   1.185 +// In Ion-generated code, we will do allocation through the
   1.186 +// |Allocator| found in |ForkJoinContext| (which is obtained via TLS).
   1.187 +// Also, no write barriers are emitted.  Conceptually, we should never
   1.188 +// need a write barrier because we only permit writes to objects that
   1.189 +// are newly allocated, and such objects are always black (to use
   1.190 +// incremental GC terminology).  However, to be safe, we also block
   1.191 +// upon entering a parallel section to ensure that any concurrent
   1.192 +// marking or incremental GC has completed.
   1.193 +//
   1.194 +// In the future, it should be possible to lift the restriction that
   1.195 +// we must block until inc. GC has completed and also to permit GC
   1.196 +// during parallel exeution. But we're not there yet.
   1.197 +//
   1.198 +// Load balancing (work stealing):
   1.199 +//
   1.200 +// The ForkJoin job is dynamically divided into a fixed number of slices,
   1.201 +// and is submitted for parallel execution in the pool. When the number
   1.202 +// of slices is big enough (typically greater than the number of workers
   1.203 +// in the pool) -and the workload is unbalanced- each worker thread
   1.204 +// will perform load balancing through work stealing. The number
   1.205 +// of slices is computed by the self-hosted function |ComputeNumSlices|
   1.206 +// and can be used to know how many slices will be executed by the
   1.207 +// runtime for an array of the given size.
   1.208 +//
   1.209 +// Current Limitations:
   1.210 +//
   1.211 +// - The API does not support recursive or nested use.  That is, the
   1.212 +//   JavaScript function given to |ForkJoin| should not itself invoke
   1.213 +//   |ForkJoin()|. Instead, use the intrinsic |InParallelSection()| to
   1.214 +//   check for recursive use and execute a sequential fallback.
   1.215 +//
   1.216 +///////////////////////////////////////////////////////////////////////////
   1.217 +
   1.218 +namespace js {
   1.219 +
   1.220 +class ForkJoinActivation : public Activation
   1.221 +{
   1.222 +    uint8_t *prevIonTop_;
   1.223 +
   1.224 +    // We ensure that incremental GC be finished before we enter into a fork
   1.225 +    // join section, but the runtime/zone might still be marked as needing
   1.226 +    // barriers due to being in the middle of verifying barriers. Pause
   1.227 +    // verification during the fork join section.
   1.228 +    gc::AutoStopVerifyingBarriers av_;
   1.229 +
   1.230 +  public:
   1.231 +    ForkJoinActivation(JSContext *cx);
   1.232 +    ~ForkJoinActivation();
   1.233 +};
   1.234 +
   1.235 +class ForkJoinContext;
   1.236 +
   1.237 +bool ForkJoin(JSContext *cx, CallArgs &args);
   1.238 +
   1.239 +struct IonLIRTraceData {
   1.240 +    uint32_t blockIndex;
   1.241 +    uint32_t lirIndex;
   1.242 +    uint32_t execModeInt;
   1.243 +    const char *lirOpName;
   1.244 +    const char *mirOpName;
   1.245 +    JSScript *script;
   1.246 +    jsbytecode *pc;
   1.247 +};
   1.248 +
   1.249 +///////////////////////////////////////////////////////////////////////////
   1.250 +// Bailout tracking
   1.251 +
   1.252 +enum ParallelBailoutCause {
   1.253 +    ParallelBailoutNone,
   1.254 +
   1.255 +    // Compiler returned Method_Skipped
   1.256 +    ParallelBailoutCompilationSkipped,
   1.257 +
   1.258 +    // Compiler returned Method_CantCompile
   1.259 +    ParallelBailoutCompilationFailure,
   1.260 +
   1.261 +    // The periodic interrupt failed, which can mean that either
   1.262 +    // another thread canceled, the user interrupted us, etc
   1.263 +    ParallelBailoutInterrupt,
   1.264 +
   1.265 +    // An IC update failed
   1.266 +    ParallelBailoutFailedIC,
   1.267 +
   1.268 +    // Heap busy flag was set during interrupt
   1.269 +    ParallelBailoutHeapBusy,
   1.270 +
   1.271 +    ParallelBailoutMainScriptNotPresent,
   1.272 +    ParallelBailoutCalledToUncompiledScript,
   1.273 +    ParallelBailoutIllegalWrite,
   1.274 +    ParallelBailoutAccessToIntrinsic,
   1.275 +    ParallelBailoutOverRecursed,
   1.276 +    ParallelBailoutOutOfMemory,
   1.277 +    ParallelBailoutUnsupported,
   1.278 +    ParallelBailoutUnsupportedVM,
   1.279 +    ParallelBailoutUnsupportedStringComparison,
   1.280 +    ParallelBailoutRequestedGC,
   1.281 +    ParallelBailoutRequestedZoneGC,
   1.282 +};
   1.283 +
   1.284 +struct ParallelBailoutTrace {
   1.285 +    JSScript *script;
   1.286 +    jsbytecode *bytecode;
   1.287 +};
   1.288 +
   1.289 +// See "Bailouts" section in comment above.
   1.290 +struct ParallelBailoutRecord {
   1.291 +    JSScript *topScript;
   1.292 +    ParallelBailoutCause cause;
   1.293 +
   1.294 +    // Eventually we will support deeper traces,
   1.295 +    // but for now we gather at most a single frame.
   1.296 +    static const uint32_t MaxDepth = 1;
   1.297 +    uint32_t depth;
   1.298 +    ParallelBailoutTrace trace[MaxDepth];
   1.299 +
   1.300 +    void init(JSContext *cx);
   1.301 +    void reset(JSContext *cx);
   1.302 +    void setCause(ParallelBailoutCause cause,
   1.303 +                  JSScript *outermostScript = nullptr,   // inliner (if applicable)
   1.304 +                  JSScript *currentScript = nullptr,     // inlinee (if applicable)
   1.305 +                  jsbytecode *currentPc = nullptr);
   1.306 +    void updateCause(ParallelBailoutCause cause,
   1.307 +                     JSScript *outermostScript,
   1.308 +                     JSScript *currentScript,
   1.309 +                     jsbytecode *currentPc);
   1.310 +    void addTrace(JSScript *script,
   1.311 +                  jsbytecode *pc);
   1.312 +};
   1.313 +
   1.314 +struct ForkJoinShared;
   1.315 +
   1.316 +class ForkJoinContext : public ThreadSafeContext
   1.317 +{
   1.318 +  public:
   1.319 +    // Bailout record used to record the reason this thread stopped executing
   1.320 +    ParallelBailoutRecord *const bailoutRecord;
   1.321 +
   1.322 +#ifdef DEBUG
   1.323 +    // Records the last instr. to execute on this thread.
   1.324 +    IonLIRTraceData traceData;
   1.325 +
   1.326 +    // The maximum worker id.
   1.327 +    uint32_t maxWorkerId;
   1.328 +#endif
   1.329 +
   1.330 +    // When we run a par operation like mapPar, we create an out pointer
   1.331 +    // into a specific region of the destination buffer. Even though the
   1.332 +    // destination buffer is not thread-local, it is permissible to write into
   1.333 +    // it via the handles provided. These two fields identify the memory
   1.334 +    // region where writes are allowed so that the write guards can test for
   1.335 +    // it.
   1.336 +    //
   1.337 +    // Note: we only permit writes into the *specific region* that the user
   1.338 +    // is supposed to write. Normally, they only have access to this region
   1.339 +    // anyhow. But due to sequential fallback it is possible for handles into
   1.340 +    // other regions to escape into global variables in the sequential
   1.341 +    // execution and then get accessed by later parallel sections. Thus we
   1.342 +    // must be careful and ensure that the write is going through a handle
   1.343 +    // into the correct *region* of the buffer.
   1.344 +    uint8_t *targetRegionStart;
   1.345 +    uint8_t *targetRegionEnd;
   1.346 +
   1.347 +    ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker *worker,
   1.348 +                    Allocator *allocator, ForkJoinShared *shared,
   1.349 +                    ParallelBailoutRecord *bailoutRecord);
   1.350 +
   1.351 +    // Get the worker id. The main thread by convention has the id of the max
   1.352 +    // worker thread id + 1.
   1.353 +    uint32_t workerId() const { return worker_->id(); }
   1.354 +
   1.355 +    // Get a slice of work for the worker associated with the context.
   1.356 +    bool getSlice(uint16_t *sliceId) { return worker_->getSlice(this, sliceId); }
   1.357 +
   1.358 +    // True if this is the main thread, false if it is one of the parallel workers.
   1.359 +    bool isMainThread() const;
   1.360 +
   1.361 +    // When the code would normally trigger a GC, we don't trigger it
   1.362 +    // immediately but instead record that request here.  This will
   1.363 +    // cause |ExecuteForkJoinOp()| to invoke |TriggerGC()| or
   1.364 +    // |TriggerCompartmentGC()| as appropriate once the parallel
   1.365 +    // section is complete. This is done because those routines do
   1.366 +    // various preparations that are not thread-safe, and because the
   1.367 +    // full set of arenas is not available until the end of the
   1.368 +    // parallel section.
   1.369 +    void requestGC(JS::gcreason::Reason reason);
   1.370 +    void requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason);
   1.371 +
   1.372 +    // Set the fatal flag for the next abort. Used to distinguish retry or
   1.373 +    // fatal aborts from VM functions.
   1.374 +    bool setPendingAbortFatal(ParallelBailoutCause cause);
   1.375 +
   1.376 +    // Reports an unsupported operation, returning false if we are reporting
   1.377 +    // an error. Otherwise drop the warning on the floor.
   1.378 +    bool reportError(ParallelBailoutCause cause, unsigned report) {
   1.379 +        if (report & JSREPORT_ERROR)
   1.380 +            return setPendingAbortFatal(cause);
   1.381 +        return true;
   1.382 +    }
   1.383 +
   1.384 +    // During the parallel phase, this method should be invoked
   1.385 +    // periodically, for example on every backedge, similar to the
   1.386 +    // interrupt check.  If it returns false, then the parallel phase
   1.387 +    // has been aborted and so you should bailout.  The function may
   1.388 +    // also rendesvous to perform GC or do other similar things.
   1.389 +    //
   1.390 +    // This function is guaranteed to have no effect if both
   1.391 +    // runtime()->interruptPar is zero.  Ion-generated code takes
   1.392 +    // advantage of this by inlining the checks on those flags before
   1.393 +    // actually calling this function.  If this function ends up
   1.394 +    // getting called a lot from outside ion code, we can refactor
   1.395 +    // it into an inlined version with this check that calls a slower
   1.396 +    // version.
   1.397 +    bool check();
   1.398 +
   1.399 +    // Be wary, the runtime is shared between all threads!
   1.400 +    JSRuntime *runtime();
   1.401 +
   1.402 +    // Acquire and release the JSContext from the runtime.
   1.403 +    JSContext *acquireJSContext();
   1.404 +    void releaseJSContext();
   1.405 +    bool hasAcquiredJSContext() const;
   1.406 +
   1.407 +    // Check the current state of parallel execution.
   1.408 +    static inline ForkJoinContext *current();
   1.409 +
   1.410 +    // Initializes the thread-local state.
   1.411 +    static bool initialize();
   1.412 +
   1.413 +    // Used in inlining GetForkJoinSlice.
   1.414 +    static size_t offsetOfWorker() {
   1.415 +        return offsetof(ForkJoinContext, worker_);
   1.416 +    }
   1.417 +
   1.418 +  private:
   1.419 +    friend class AutoSetForkJoinContext;
   1.420 +
   1.421 +    // Initialized by initialize()
   1.422 +    static mozilla::ThreadLocal<ForkJoinContext*> tlsForkJoinContext;
   1.423 +
   1.424 +    ForkJoinShared *const shared_;
   1.425 +
   1.426 +    ThreadPoolWorker *worker_;
   1.427 +
   1.428 +    bool acquiredJSContext_;
   1.429 +
   1.430 +    // ForkJoinContext is allocated on the stack. It would be dangerous to GC
   1.431 +    // with it live because of the GC pointer fields stored in the context.
   1.432 +    JS::AutoAssertNoGC nogc_;
   1.433 +};
   1.434 +
   1.435 +// Locks a JSContext for its scope. Be very careful, because locking a
   1.436 +// JSContext does *not* allow you to safely mutate the data in the
   1.437 +// JSContext unless you can guarantee that any of the other threads
   1.438 +// that want to access that data will also acquire the lock, which is
   1.439 +// generally not the case. For example, the lock is used in the IC
   1.440 +// code to allow us to atomically patch up the dispatch table, but we
   1.441 +// must be aware that other threads may be reading from the table even
   1.442 +// as we write to it (though they cannot be writing, since they must
   1.443 +// hold the lock to write).
   1.444 +class LockedJSContext
   1.445 +{
   1.446 +#if defined(JS_THREADSAFE) && defined(JS_ION)
   1.447 +    ForkJoinContext *cx_;
   1.448 +#endif
   1.449 +    JSContext *jscx_;
   1.450 +
   1.451 +  public:
   1.452 +    LockedJSContext(ForkJoinContext *cx)
   1.453 +#if defined(JS_THREADSAFE) && defined(JS_ION)
   1.454 +      : cx_(cx),
   1.455 +        jscx_(cx->acquireJSContext())
   1.456 +#else
   1.457 +      : jscx_(nullptr)
   1.458 +#endif
   1.459 +    { }
   1.460 +
   1.461 +    ~LockedJSContext() {
   1.462 +#if defined(JS_THREADSAFE) && defined(JS_ION)
   1.463 +        cx_->releaseJSContext();
   1.464 +#endif
   1.465 +    }
   1.466 +
   1.467 +    operator JSContext *() { return jscx_; }
   1.468 +    JSContext *operator->() { return jscx_; }
   1.469 +};
   1.470 +
   1.471 +bool InExclusiveParallelSection();
   1.472 +
   1.473 +bool ParallelTestsShouldPass(JSContext *cx);
   1.474 +
   1.475 +void RequestInterruptForForkJoin(JSRuntime *rt, JSRuntime::InterruptMode mode);
   1.476 +
   1.477 +bool intrinsic_SetForkJoinTargetRegion(JSContext *cx, unsigned argc, Value *vp);
   1.478 +extern const JSJitInfo intrinsic_SetForkJoinTargetRegionInfo;
   1.479 +
   1.480 +bool intrinsic_ClearThreadLocalArenas(JSContext *cx, unsigned argc, Value *vp);
   1.481 +extern const JSJitInfo intrinsic_ClearThreadLocalArenasInfo;
   1.482 +
   1.483 +///////////////////////////////////////////////////////////////////////////
   1.484 +// Debug Spew
   1.485 +
   1.486 +namespace jit {
   1.487 +    class MDefinition;
   1.488 +}
   1.489 +
   1.490 +namespace parallel {
   1.491 +
   1.492 +enum ExecutionStatus {
   1.493 +    // Parallel or seq execution terminated in a fatal way, operation failed
   1.494 +    ExecutionFatal,
   1.495 +
   1.496 +    // Parallel exec failed and so we fell back to sequential
   1.497 +    ExecutionSequential,
   1.498 +
   1.499 +    // We completed the work in seq mode before parallel compilation completed
   1.500 +    ExecutionWarmup,
   1.501 +
   1.502 +    // Parallel exec was successful after some number of bailouts
   1.503 +    ExecutionParallel
   1.504 +};
   1.505 +
   1.506 +enum SpewChannel {
   1.507 +    SpewOps,
   1.508 +    SpewCompile,
   1.509 +    SpewBailouts,
   1.510 +    NumSpewChannels
   1.511 +};
   1.512 +
   1.513 +#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
   1.514 +
   1.515 +bool SpewEnabled(SpewChannel channel);
   1.516 +void Spew(SpewChannel channel, const char *fmt, ...);
   1.517 +void SpewBeginOp(JSContext *cx, const char *name);
   1.518 +void SpewBailout(uint32_t count, HandleScript script, jsbytecode *pc,
   1.519 +                 ParallelBailoutCause cause);
   1.520 +ExecutionStatus SpewEndOp(ExecutionStatus status);
   1.521 +void SpewBeginCompile(HandleScript script);
   1.522 +jit::MethodStatus SpewEndCompile(jit::MethodStatus status);
   1.523 +void SpewMIR(jit::MDefinition *mir, const char *fmt, ...);
   1.524 +void SpewBailoutIR(IonLIRTraceData *data);
   1.525 +
   1.526 +#else
   1.527 +
   1.528 +static inline bool SpewEnabled(SpewChannel channel) { return false; }
   1.529 +static inline void Spew(SpewChannel channel, const char *fmt, ...) { }
   1.530 +static inline void SpewBeginOp(JSContext *cx, const char *name) { }
   1.531 +static inline void SpewBailout(uint32_t count, HandleScript script,
   1.532 +                               jsbytecode *pc, ParallelBailoutCause cause) {}
   1.533 +static inline ExecutionStatus SpewEndOp(ExecutionStatus status) { return status; }
   1.534 +static inline void SpewBeginCompile(HandleScript script) { }
   1.535 +#ifdef JS_ION
   1.536 +static inline jit::MethodStatus SpewEndCompile(jit::MethodStatus status) { return status; }
   1.537 +static inline void SpewMIR(jit::MDefinition *mir, const char *fmt, ...) { }
   1.538 +#endif
   1.539 +static inline void SpewBailoutIR(IonLIRTraceData *data) { }
   1.540 +
   1.541 +#endif // DEBUG && JS_THREADSAFE && JS_ION
   1.542 +
   1.543 +} // namespace parallel
   1.544 +} // namespace js
   1.545 +
   1.546 +/* static */ inline js::ForkJoinContext *
   1.547 +js::ForkJoinContext::current()
   1.548 +{
   1.549 +    return tlsForkJoinContext.get();
   1.550 +}
   1.551 +
   1.552 +namespace js {
   1.553 +
   1.554 +static inline bool
   1.555 +InParallelSection()
   1.556 +{
   1.557 +    return ForkJoinContext::current() != nullptr;
   1.558 +}
   1.559 +
   1.560 +} // namespace js
   1.561 +
   1.562 +#endif /* vm_ForkJoin_h */

mercurial