Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef vm_ForkJoin_h |
michael@0 | 8 | #define vm_ForkJoin_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/ThreadLocal.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "jscntxt.h" |
michael@0 | 13 | |
michael@0 | 14 | #include "gc/GCInternals.h" |
michael@0 | 15 | |
michael@0 | 16 | #include "jit/Ion.h" |
michael@0 | 17 | |
michael@0 | 18 | /////////////////////////////////////////////////////////////////////////// |
michael@0 | 19 | // Read Me First |
michael@0 | 20 | // |
michael@0 | 21 | // The ForkJoin abstraction: |
michael@0 | 22 | // ------------------------- |
michael@0 | 23 | // |
michael@0 | 24 | // This is the building block for executing multi-threaded JavaScript with |
michael@0 | 25 | // shared memory (as distinct from Web Workers). The idea is that you have |
michael@0 | 26 | // some (typically data-parallel) operation which you wish to execute in |
michael@0 | 27 | // parallel across as many threads as you have available. |
michael@0 | 28 | // |
michael@0 | 29 | // The ForkJoin abstraction is intended to be used by self-hosted code |
michael@0 | 30 | // to enable parallel execution. At the top-level, it consists of a native |
michael@0 | 31 | // function (exposed as the ForkJoin intrinsic) that is used like so: |
michael@0 | 32 | // |
michael@0 | 33 | // ForkJoin(func, sliceStart, sliceEnd, mode) |
michael@0 | 34 | // |
michael@0 | 35 | // The intention of this statement is to start some some number (usually the |
michael@0 | 36 | // number of hardware threads) of copies of |func()| running in parallel. Each |
michael@0 | 37 | // copy will then do a portion of the total work, depending on |
michael@0 | 38 | // workstealing-based load balancing. |
michael@0 | 39 | // |
michael@0 | 40 | // Typically, each of the N slices runs in a different worker thread, but that |
michael@0 | 41 | // is not something you should rely upon---if work-stealing is enabled it |
michael@0 | 42 | // could be that a single worker thread winds up handling multiple slices. |
michael@0 | 43 | // |
michael@0 | 44 | // The second and third arguments, |sliceStart| and |sliceEnd|, are the slice |
michael@0 | 45 | // boundaries. These numbers must each fit inside an uint16_t. |
michael@0 | 46 | // |
michael@0 | 47 | // The fourth argument, |mode|, is an internal mode integer giving finer |
michael@0 | 48 | // control over the behavior of ForkJoin. See the |ForkJoinMode| enum. |
michael@0 | 49 | // |
michael@0 | 50 | // func() should expect the following arguments: |
michael@0 | 51 | // |
michael@0 | 52 | // func(workerId, sliceStart, sliceEnd) |
michael@0 | 53 | // |
michael@0 | 54 | // The |workerId| parameter is the id of the worker executing the function. It |
michael@0 | 55 | // is 0 in sequential mode. |
michael@0 | 56 | // |
michael@0 | 57 | // The |sliceStart| and |sliceEnd| parameters are the current bounds that that |
michael@0 | 58 | // the worker is handling. In parallel execution, these parameters are not |
michael@0 | 59 | // used. In sequential execution, they tell the worker what slices should be |
michael@0 | 60 | // processed. During the warm up phase, sliceEnd == sliceStart + 1. |
michael@0 | 61 | // |
michael@0 | 62 | // |func| can keep asking for more work from the scheduler by calling the |
michael@0 | 63 | // intrinsic |GetForkJoinSlice(sliceStart, sliceEnd, id)|. When there are no |
michael@0 | 64 | // more slices to hand out, ThreadPool::MAX_SLICE_ID is returned as a sentinel |
michael@0 | 65 | // value. By exposing this function as an intrinsic, we reduce the number of |
michael@0 | 66 | // JS-C++ boundary crossings incurred by workstealing, which may have many |
michael@0 | 67 | // slices. |
michael@0 | 68 | // |
michael@0 | 69 | // In sequential execution, |func| should return the maximum computed slice id |
michael@0 | 70 | // S for which all slices with id < S have already been processed. This is so |
michael@0 | 71 | // ThreadPool can track the leftmost completed slice id to maintain |
michael@0 | 72 | // determinism. Slices which have been completed in sequential execution |
michael@0 | 73 | // cannot be re-run in parallel execution. |
michael@0 | 74 | // |
michael@0 | 75 | // In parallel execution, |func| MUST PROCESS ALL SLICES BEFORE RETURNING! |
michael@0 | 76 | // Not doing so is an error and is protected by debug asserts in ThreadPool. |
michael@0 | 77 | // |
michael@0 | 78 | // Warmups and Sequential Fallbacks |
michael@0 | 79 | // -------------------------------- |
michael@0 | 80 | // |
michael@0 | 81 | // ForkJoin can only execute code in parallel when it has been |
michael@0 | 82 | // ion-compiled in Parallel Execution Mode. ForkJoin handles this part |
michael@0 | 83 | // for you. However, because ion relies on having decent type |
michael@0 | 84 | // information available, it is necessary to run the code sequentially |
michael@0 | 85 | // for a few iterations first to get the various type sets "primed" |
michael@0 | 86 | // with reasonable information. We try to make do with just a few |
michael@0 | 87 | // runs, under the hypothesis that parallel execution code which reach |
michael@0 | 88 | // type stability relatively quickly. |
michael@0 | 89 | // |
michael@0 | 90 | // The general strategy of ForkJoin is as follows: |
michael@0 | 91 | // |
michael@0 | 92 | // - If the code has not yet been run, invoke `func` sequentially with |
michael@0 | 93 | // warmup set to true. When warmup is true, `func` should try and |
michael@0 | 94 | // do less work than normal---just enough to prime type sets. (See |
michael@0 | 95 | // ParallelArray.js for a discussion of specifically how we do this |
michael@0 | 96 | // in the case of ParallelArray). |
michael@0 | 97 | // |
michael@0 | 98 | // - Try to execute the code in parallel. Parallel execution mode has |
michael@0 | 99 | // three possible results: success, fatal error, or bailout. If a |
michael@0 | 100 | // bailout occurs, it means that the code attempted some action |
michael@0 | 101 | // which is not possible in parallel mode. This might be a |
michael@0 | 102 | // modification to shared state, but it might also be that it |
michael@0 | 103 | // attempted to take some theoreticaly pure action that has not been |
michael@0 | 104 | // made threadsafe (yet?). |
michael@0 | 105 | // |
michael@0 | 106 | // - If parallel execution is successful, ForkJoin returns true. |
michael@0 | 107 | // |
michael@0 | 108 | // - If parallel execution results in a fatal error, ForkJoin returns false. |
michael@0 | 109 | // |
michael@0 | 110 | // - If parallel execution results in a *bailout*, this is when things |
michael@0 | 111 | // get interesting. In that case, the semantics of parallel |
michael@0 | 112 | // execution guarantee us that no visible side effects have occurred |
michael@0 | 113 | // (unless they were performed with the intrinsic |
michael@0 | 114 | // |UnsafePutElements()|, which can only be used in self-hosted |
michael@0 | 115 | // code). We therefore reinvoke |func()| but with warmup set to |
michael@0 | 116 | // true. The idea here is that often parallel bailouts result from |
michael@0 | 117 | // a failed type guard or other similar assumption, so rerunning the |
michael@0 | 118 | // warmup sequentially gives us a chance to recompile with more |
michael@0 | 119 | // data. Because warmup is true, we do not expect this sequential |
michael@0 | 120 | // call to process all remaining data, just a chunk. After this |
michael@0 | 121 | // recovery execution is complete, we again attempt parallel |
michael@0 | 122 | // execution. |
michael@0 | 123 | // |
michael@0 | 124 | // - If more than a fixed number of bailouts occur, we give up on |
michael@0 | 125 | // parallelization and just invoke |func()| N times in a row (once |
michael@0 | 126 | // for each worker) but with |warmup| set to false. |
michael@0 | 127 | // |
michael@0 | 128 | // Interrupts: |
michael@0 | 129 | // |
michael@0 | 130 | // During parallel execution, |cx.check()| must be periodically invoked to |
michael@0 | 131 | // check for interrupts. This is automatically done by the Ion-generated |
michael@0 | 132 | // code. If an interrupt has been requested |cx.check()| aborts parallel |
michael@0 | 133 | // execution. |
michael@0 | 134 | // |
michael@0 | 135 | // Transitive compilation: |
michael@0 | 136 | // |
michael@0 | 137 | // One of the challenges for parallel compilation is that we |
michael@0 | 138 | // (currently) have to abort when we encounter an uncompiled script. |
michael@0 | 139 | // Therefore, we try to compile everything that might be needed |
michael@0 | 140 | // beforehand. The exact strategy is described in `ParallelDo::apply()` |
michael@0 | 141 | // in ForkJoin.cpp, but at the highest level the idea is: |
michael@0 | 142 | // |
michael@0 | 143 | // 1. We maintain a flag on every script telling us if that script and |
michael@0 | 144 | // its transitive callees are believed to be compiled. If that flag |
michael@0 | 145 | // is set, we can skip the initial compilation. |
michael@0 | 146 | // 2. Otherwise, we maintain a worklist that begins with the main |
michael@0 | 147 | // script. We compile it and then examine the generated parallel IonScript, |
michael@0 | 148 | // which will have a list of callees. We enqueue those. Some of these |
michael@0 | 149 | // compilations may take place off the main thread, in which case |
michael@0 | 150 | // we will run warmup iterations while we wait for them to complete. |
michael@0 | 151 | // 3. If the warmup iterations finish all the work, we're done. |
michael@0 | 152 | // 4. If compilations fail, we fallback to sequential. |
michael@0 | 153 | // 5. Otherwise, we will try running in parallel once we're all done. |
michael@0 | 154 | // |
michael@0 | 155 | // Bailout tracing and recording: |
michael@0 | 156 | // |
michael@0 | 157 | // When a bailout occurs, we record a bit of state so that we can |
michael@0 | 158 | // recover with grace. Each |ForkJoinContext| has a pointer to a |
michael@0 | 159 | // |ParallelBailoutRecord| pre-allocated for this purpose. This |
michael@0 | 160 | // structure is used to record the cause of the bailout, the JSScript |
michael@0 | 161 | // which was executing, as well as the location in the source where |
michael@0 | 162 | // the bailout occurred (in principle, we can record a full stack |
michael@0 | 163 | // trace, but right now we only record the top-most frame). Note that |
michael@0 | 164 | // the error location might not be in the same JSScript as the one |
michael@0 | 165 | // which was executing due to inlining. |
michael@0 | 166 | // |
michael@0 | 167 | // Garbage collection and allocation: |
michael@0 | 168 | // |
michael@0 | 169 | // Code which executes on these parallel threads must be very careful |
michael@0 | 170 | // with respect to garbage collection and allocation. The typical |
michael@0 | 171 | // allocation paths are UNSAFE in parallel code because they access |
michael@0 | 172 | // shared state (the compartment's arena lists and so forth) without |
michael@0 | 173 | // any synchronization. They can also trigger GC in an ad-hoc way. |
michael@0 | 174 | // |
michael@0 | 175 | // To deal with this, the forkjoin code creates a distinct |Allocator| |
michael@0 | 176 | // object for each slice. You can access the appropriate object via |
michael@0 | 177 | // the |ForkJoinContext| object that is provided to the callbacks. Once |
michael@0 | 178 | // the execution is complete, all the objects found in these distinct |
michael@0 | 179 | // |Allocator| is merged back into the main compartment lists and |
michael@0 | 180 | // things proceed normally. |
michael@0 | 181 | // |
michael@0 | 182 | // In Ion-generated code, we will do allocation through the |
michael@0 | 183 | // |Allocator| found in |ForkJoinContext| (which is obtained via TLS). |
michael@0 | 184 | // Also, no write barriers are emitted. Conceptually, we should never |
michael@0 | 185 | // need a write barrier because we only permit writes to objects that |
michael@0 | 186 | // are newly allocated, and such objects are always black (to use |
michael@0 | 187 | // incremental GC terminology). However, to be safe, we also block |
michael@0 | 188 | // upon entering a parallel section to ensure that any concurrent |
michael@0 | 189 | // marking or incremental GC has completed. |
michael@0 | 190 | // |
michael@0 | 191 | // In the future, it should be possible to lift the restriction that |
michael@0 | 192 | // we must block until inc. GC has completed and also to permit GC |
michael@0 | 193 | // during parallel exeution. But we're not there yet. |
michael@0 | 194 | // |
michael@0 | 195 | // Load balancing (work stealing): |
michael@0 | 196 | // |
michael@0 | 197 | // The ForkJoin job is dynamically divided into a fixed number of slices, |
michael@0 | 198 | // and is submitted for parallel execution in the pool. When the number |
michael@0 | 199 | // of slices is big enough (typically greater than the number of workers |
michael@0 | 200 | // in the pool) -and the workload is unbalanced- each worker thread |
michael@0 | 201 | // will perform load balancing through work stealing. The number |
michael@0 | 202 | // of slices is computed by the self-hosted function |ComputeNumSlices| |
michael@0 | 203 | // and can be used to know how many slices will be executed by the |
michael@0 | 204 | // runtime for an array of the given size. |
michael@0 | 205 | // |
michael@0 | 206 | // Current Limitations: |
michael@0 | 207 | // |
michael@0 | 208 | // - The API does not support recursive or nested use. That is, the |
michael@0 | 209 | // JavaScript function given to |ForkJoin| should not itself invoke |
michael@0 | 210 | // |ForkJoin()|. Instead, use the intrinsic |InParallelSection()| to |
michael@0 | 211 | // check for recursive use and execute a sequential fallback. |
michael@0 | 212 | // |
michael@0 | 213 | /////////////////////////////////////////////////////////////////////////// |
michael@0 | 214 | |
michael@0 | 215 | namespace js { |
michael@0 | 216 | |
michael@0 | 217 | class ForkJoinActivation : public Activation |
michael@0 | 218 | { |
michael@0 | 219 | uint8_t *prevIonTop_; |
michael@0 | 220 | |
michael@0 | 221 | // We ensure that incremental GC be finished before we enter into a fork |
michael@0 | 222 | // join section, but the runtime/zone might still be marked as needing |
michael@0 | 223 | // barriers due to being in the middle of verifying barriers. Pause |
michael@0 | 224 | // verification during the fork join section. |
michael@0 | 225 | gc::AutoStopVerifyingBarriers av_; |
michael@0 | 226 | |
michael@0 | 227 | public: |
michael@0 | 228 | ForkJoinActivation(JSContext *cx); |
michael@0 | 229 | ~ForkJoinActivation(); |
michael@0 | 230 | }; |
michael@0 | 231 | |
michael@0 | 232 | class ForkJoinContext; |
michael@0 | 233 | |
michael@0 | 234 | bool ForkJoin(JSContext *cx, CallArgs &args); |
michael@0 | 235 | |
michael@0 | 236 | struct IonLIRTraceData { |
michael@0 | 237 | uint32_t blockIndex; |
michael@0 | 238 | uint32_t lirIndex; |
michael@0 | 239 | uint32_t execModeInt; |
michael@0 | 240 | const char *lirOpName; |
michael@0 | 241 | const char *mirOpName; |
michael@0 | 242 | JSScript *script; |
michael@0 | 243 | jsbytecode *pc; |
michael@0 | 244 | }; |
michael@0 | 245 | |
michael@0 | 246 | /////////////////////////////////////////////////////////////////////////// |
michael@0 | 247 | // Bailout tracking |
michael@0 | 248 | |
michael@0 | 249 | enum ParallelBailoutCause { |
michael@0 | 250 | ParallelBailoutNone, |
michael@0 | 251 | |
michael@0 | 252 | // Compiler returned Method_Skipped |
michael@0 | 253 | ParallelBailoutCompilationSkipped, |
michael@0 | 254 | |
michael@0 | 255 | // Compiler returned Method_CantCompile |
michael@0 | 256 | ParallelBailoutCompilationFailure, |
michael@0 | 257 | |
michael@0 | 258 | // The periodic interrupt failed, which can mean that either |
michael@0 | 259 | // another thread canceled, the user interrupted us, etc |
michael@0 | 260 | ParallelBailoutInterrupt, |
michael@0 | 261 | |
michael@0 | 262 | // An IC update failed |
michael@0 | 263 | ParallelBailoutFailedIC, |
michael@0 | 264 | |
michael@0 | 265 | // Heap busy flag was set during interrupt |
michael@0 | 266 | ParallelBailoutHeapBusy, |
michael@0 | 267 | |
michael@0 | 268 | ParallelBailoutMainScriptNotPresent, |
michael@0 | 269 | ParallelBailoutCalledToUncompiledScript, |
michael@0 | 270 | ParallelBailoutIllegalWrite, |
michael@0 | 271 | ParallelBailoutAccessToIntrinsic, |
michael@0 | 272 | ParallelBailoutOverRecursed, |
michael@0 | 273 | ParallelBailoutOutOfMemory, |
michael@0 | 274 | ParallelBailoutUnsupported, |
michael@0 | 275 | ParallelBailoutUnsupportedVM, |
michael@0 | 276 | ParallelBailoutUnsupportedStringComparison, |
michael@0 | 277 | ParallelBailoutRequestedGC, |
michael@0 | 278 | ParallelBailoutRequestedZoneGC, |
michael@0 | 279 | }; |
michael@0 | 280 | |
michael@0 | 281 | struct ParallelBailoutTrace { |
michael@0 | 282 | JSScript *script; |
michael@0 | 283 | jsbytecode *bytecode; |
michael@0 | 284 | }; |
michael@0 | 285 | |
michael@0 | 286 | // See "Bailouts" section in comment above. |
michael@0 | 287 | struct ParallelBailoutRecord { |
michael@0 | 288 | JSScript *topScript; |
michael@0 | 289 | ParallelBailoutCause cause; |
michael@0 | 290 | |
michael@0 | 291 | // Eventually we will support deeper traces, |
michael@0 | 292 | // but for now we gather at most a single frame. |
michael@0 | 293 | static const uint32_t MaxDepth = 1; |
michael@0 | 294 | uint32_t depth; |
michael@0 | 295 | ParallelBailoutTrace trace[MaxDepth]; |
michael@0 | 296 | |
michael@0 | 297 | void init(JSContext *cx); |
michael@0 | 298 | void reset(JSContext *cx); |
michael@0 | 299 | void setCause(ParallelBailoutCause cause, |
michael@0 | 300 | JSScript *outermostScript = nullptr, // inliner (if applicable) |
michael@0 | 301 | JSScript *currentScript = nullptr, // inlinee (if applicable) |
michael@0 | 302 | jsbytecode *currentPc = nullptr); |
michael@0 | 303 | void updateCause(ParallelBailoutCause cause, |
michael@0 | 304 | JSScript *outermostScript, |
michael@0 | 305 | JSScript *currentScript, |
michael@0 | 306 | jsbytecode *currentPc); |
michael@0 | 307 | void addTrace(JSScript *script, |
michael@0 | 308 | jsbytecode *pc); |
michael@0 | 309 | }; |
michael@0 | 310 | |
michael@0 | 311 | struct ForkJoinShared; |
michael@0 | 312 | |
michael@0 | 313 | class ForkJoinContext : public ThreadSafeContext |
michael@0 | 314 | { |
michael@0 | 315 | public: |
michael@0 | 316 | // Bailout record used to record the reason this thread stopped executing |
michael@0 | 317 | ParallelBailoutRecord *const bailoutRecord; |
michael@0 | 318 | |
michael@0 | 319 | #ifdef DEBUG |
michael@0 | 320 | // Records the last instr. to execute on this thread. |
michael@0 | 321 | IonLIRTraceData traceData; |
michael@0 | 322 | |
michael@0 | 323 | // The maximum worker id. |
michael@0 | 324 | uint32_t maxWorkerId; |
michael@0 | 325 | #endif |
michael@0 | 326 | |
michael@0 | 327 | // When we run a par operation like mapPar, we create an out pointer |
michael@0 | 328 | // into a specific region of the destination buffer. Even though the |
michael@0 | 329 | // destination buffer is not thread-local, it is permissible to write into |
michael@0 | 330 | // it via the handles provided. These two fields identify the memory |
michael@0 | 331 | // region where writes are allowed so that the write guards can test for |
michael@0 | 332 | // it. |
michael@0 | 333 | // |
michael@0 | 334 | // Note: we only permit writes into the *specific region* that the user |
michael@0 | 335 | // is supposed to write. Normally, they only have access to this region |
michael@0 | 336 | // anyhow. But due to sequential fallback it is possible for handles into |
michael@0 | 337 | // other regions to escape into global variables in the sequential |
michael@0 | 338 | // execution and then get accessed by later parallel sections. Thus we |
michael@0 | 339 | // must be careful and ensure that the write is going through a handle |
michael@0 | 340 | // into the correct *region* of the buffer. |
michael@0 | 341 | uint8_t *targetRegionStart; |
michael@0 | 342 | uint8_t *targetRegionEnd; |
michael@0 | 343 | |
michael@0 | 344 | ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker *worker, |
michael@0 | 345 | Allocator *allocator, ForkJoinShared *shared, |
michael@0 | 346 | ParallelBailoutRecord *bailoutRecord); |
michael@0 | 347 | |
michael@0 | 348 | // Get the worker id. The main thread by convention has the id of the max |
michael@0 | 349 | // worker thread id + 1. |
michael@0 | 350 | uint32_t workerId() const { return worker_->id(); } |
michael@0 | 351 | |
michael@0 | 352 | // Get a slice of work for the worker associated with the context. |
michael@0 | 353 | bool getSlice(uint16_t *sliceId) { return worker_->getSlice(this, sliceId); } |
michael@0 | 354 | |
michael@0 | 355 | // True if this is the main thread, false if it is one of the parallel workers. |
michael@0 | 356 | bool isMainThread() const; |
michael@0 | 357 | |
michael@0 | 358 | // When the code would normally trigger a GC, we don't trigger it |
michael@0 | 359 | // immediately but instead record that request here. This will |
michael@0 | 360 | // cause |ExecuteForkJoinOp()| to invoke |TriggerGC()| or |
michael@0 | 361 | // |TriggerCompartmentGC()| as appropriate once the parallel |
michael@0 | 362 | // section is complete. This is done because those routines do |
michael@0 | 363 | // various preparations that are not thread-safe, and because the |
michael@0 | 364 | // full set of arenas is not available until the end of the |
michael@0 | 365 | // parallel section. |
michael@0 | 366 | void requestGC(JS::gcreason::Reason reason); |
michael@0 | 367 | void requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason); |
michael@0 | 368 | |
michael@0 | 369 | // Set the fatal flag for the next abort. Used to distinguish retry or |
michael@0 | 370 | // fatal aborts from VM functions. |
michael@0 | 371 | bool setPendingAbortFatal(ParallelBailoutCause cause); |
michael@0 | 372 | |
michael@0 | 373 | // Reports an unsupported operation, returning false if we are reporting |
michael@0 | 374 | // an error. Otherwise drop the warning on the floor. |
michael@0 | 375 | bool reportError(ParallelBailoutCause cause, unsigned report) { |
michael@0 | 376 | if (report & JSREPORT_ERROR) |
michael@0 | 377 | return setPendingAbortFatal(cause); |
michael@0 | 378 | return true; |
michael@0 | 379 | } |
michael@0 | 380 | |
michael@0 | 381 | // During the parallel phase, this method should be invoked |
michael@0 | 382 | // periodically, for example on every backedge, similar to the |
michael@0 | 383 | // interrupt check. If it returns false, then the parallel phase |
michael@0 | 384 | // has been aborted and so you should bailout. The function may |
michael@0 | 385 | // also rendesvous to perform GC or do other similar things. |
michael@0 | 386 | // |
michael@0 | 387 | // This function is guaranteed to have no effect if both |
michael@0 | 388 | // runtime()->interruptPar is zero. Ion-generated code takes |
michael@0 | 389 | // advantage of this by inlining the checks on those flags before |
michael@0 | 390 | // actually calling this function. If this function ends up |
michael@0 | 391 | // getting called a lot from outside ion code, we can refactor |
michael@0 | 392 | // it into an inlined version with this check that calls a slower |
michael@0 | 393 | // version. |
michael@0 | 394 | bool check(); |
michael@0 | 395 | |
michael@0 | 396 | // Be wary, the runtime is shared between all threads! |
michael@0 | 397 | JSRuntime *runtime(); |
michael@0 | 398 | |
michael@0 | 399 | // Acquire and release the JSContext from the runtime. |
michael@0 | 400 | JSContext *acquireJSContext(); |
michael@0 | 401 | void releaseJSContext(); |
michael@0 | 402 | bool hasAcquiredJSContext() const; |
michael@0 | 403 | |
michael@0 | 404 | // Check the current state of parallel execution. |
michael@0 | 405 | static inline ForkJoinContext *current(); |
michael@0 | 406 | |
michael@0 | 407 | // Initializes the thread-local state. |
michael@0 | 408 | static bool initialize(); |
michael@0 | 409 | |
michael@0 | 410 | // Used in inlining GetForkJoinSlice. |
michael@0 | 411 | static size_t offsetOfWorker() { |
michael@0 | 412 | return offsetof(ForkJoinContext, worker_); |
michael@0 | 413 | } |
michael@0 | 414 | |
michael@0 | 415 | private: |
michael@0 | 416 | friend class AutoSetForkJoinContext; |
michael@0 | 417 | |
michael@0 | 418 | // Initialized by initialize() |
michael@0 | 419 | static mozilla::ThreadLocal<ForkJoinContext*> tlsForkJoinContext; |
michael@0 | 420 | |
michael@0 | 421 | ForkJoinShared *const shared_; |
michael@0 | 422 | |
michael@0 | 423 | ThreadPoolWorker *worker_; |
michael@0 | 424 | |
michael@0 | 425 | bool acquiredJSContext_; |
michael@0 | 426 | |
michael@0 | 427 | // ForkJoinContext is allocated on the stack. It would be dangerous to GC |
michael@0 | 428 | // with it live because of the GC pointer fields stored in the context. |
michael@0 | 429 | JS::AutoAssertNoGC nogc_; |
michael@0 | 430 | }; |
michael@0 | 431 | |
michael@0 | 432 | // Locks a JSContext for its scope. Be very careful, because locking a |
michael@0 | 433 | // JSContext does *not* allow you to safely mutate the data in the |
michael@0 | 434 | // JSContext unless you can guarantee that any of the other threads |
michael@0 | 435 | // that want to access that data will also acquire the lock, which is |
michael@0 | 436 | // generally not the case. For example, the lock is used in the IC |
michael@0 | 437 | // code to allow us to atomically patch up the dispatch table, but we |
michael@0 | 438 | // must be aware that other threads may be reading from the table even |
michael@0 | 439 | // as we write to it (though they cannot be writing, since they must |
michael@0 | 440 | // hold the lock to write). |
michael@0 | 441 | class LockedJSContext |
michael@0 | 442 | { |
michael@0 | 443 | #if defined(JS_THREADSAFE) && defined(JS_ION) |
michael@0 | 444 | ForkJoinContext *cx_; |
michael@0 | 445 | #endif |
michael@0 | 446 | JSContext *jscx_; |
michael@0 | 447 | |
michael@0 | 448 | public: |
michael@0 | 449 | LockedJSContext(ForkJoinContext *cx) |
michael@0 | 450 | #if defined(JS_THREADSAFE) && defined(JS_ION) |
michael@0 | 451 | : cx_(cx), |
michael@0 | 452 | jscx_(cx->acquireJSContext()) |
michael@0 | 453 | #else |
michael@0 | 454 | : jscx_(nullptr) |
michael@0 | 455 | #endif |
michael@0 | 456 | { } |
michael@0 | 457 | |
michael@0 | 458 | ~LockedJSContext() { |
michael@0 | 459 | #if defined(JS_THREADSAFE) && defined(JS_ION) |
michael@0 | 460 | cx_->releaseJSContext(); |
michael@0 | 461 | #endif |
michael@0 | 462 | } |
michael@0 | 463 | |
michael@0 | 464 | operator JSContext *() { return jscx_; } |
michael@0 | 465 | JSContext *operator->() { return jscx_; } |
michael@0 | 466 | }; |
michael@0 | 467 | |
michael@0 | 468 | bool InExclusiveParallelSection(); |
michael@0 | 469 | |
michael@0 | 470 | bool ParallelTestsShouldPass(JSContext *cx); |
michael@0 | 471 | |
michael@0 | 472 | void RequestInterruptForForkJoin(JSRuntime *rt, JSRuntime::InterruptMode mode); |
michael@0 | 473 | |
michael@0 | 474 | bool intrinsic_SetForkJoinTargetRegion(JSContext *cx, unsigned argc, Value *vp); |
michael@0 | 475 | extern const JSJitInfo intrinsic_SetForkJoinTargetRegionInfo; |
michael@0 | 476 | |
michael@0 | 477 | bool intrinsic_ClearThreadLocalArenas(JSContext *cx, unsigned argc, Value *vp); |
michael@0 | 478 | extern const JSJitInfo intrinsic_ClearThreadLocalArenasInfo; |
michael@0 | 479 | |
michael@0 | 480 | /////////////////////////////////////////////////////////////////////////// |
michael@0 | 481 | // Debug Spew |
michael@0 | 482 | |
michael@0 | 483 | namespace jit { |
michael@0 | 484 | class MDefinition; |
michael@0 | 485 | } |
michael@0 | 486 | |
michael@0 | 487 | namespace parallel { |
michael@0 | 488 | |
michael@0 | 489 | enum ExecutionStatus { |
michael@0 | 490 | // Parallel or seq execution terminated in a fatal way, operation failed |
michael@0 | 491 | ExecutionFatal, |
michael@0 | 492 | |
michael@0 | 493 | // Parallel exec failed and so we fell back to sequential |
michael@0 | 494 | ExecutionSequential, |
michael@0 | 495 | |
michael@0 | 496 | // We completed the work in seq mode before parallel compilation completed |
michael@0 | 497 | ExecutionWarmup, |
michael@0 | 498 | |
michael@0 | 499 | // Parallel exec was successful after some number of bailouts |
michael@0 | 500 | ExecutionParallel |
michael@0 | 501 | }; |
michael@0 | 502 | |
michael@0 | 503 | enum SpewChannel { |
michael@0 | 504 | SpewOps, |
michael@0 | 505 | SpewCompile, |
michael@0 | 506 | SpewBailouts, |
michael@0 | 507 | NumSpewChannels |
michael@0 | 508 | }; |
michael@0 | 509 | |
michael@0 | 510 | #if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION) |
michael@0 | 511 | |
michael@0 | 512 | bool SpewEnabled(SpewChannel channel); |
michael@0 | 513 | void Spew(SpewChannel channel, const char *fmt, ...); |
michael@0 | 514 | void SpewBeginOp(JSContext *cx, const char *name); |
michael@0 | 515 | void SpewBailout(uint32_t count, HandleScript script, jsbytecode *pc, |
michael@0 | 516 | ParallelBailoutCause cause); |
michael@0 | 517 | ExecutionStatus SpewEndOp(ExecutionStatus status); |
michael@0 | 518 | void SpewBeginCompile(HandleScript script); |
michael@0 | 519 | jit::MethodStatus SpewEndCompile(jit::MethodStatus status); |
michael@0 | 520 | void SpewMIR(jit::MDefinition *mir, const char *fmt, ...); |
michael@0 | 521 | void SpewBailoutIR(IonLIRTraceData *data); |
michael@0 | 522 | |
michael@0 | 523 | #else |
michael@0 | 524 | |
michael@0 | 525 | static inline bool SpewEnabled(SpewChannel channel) { return false; } |
michael@0 | 526 | static inline void Spew(SpewChannel channel, const char *fmt, ...) { } |
michael@0 | 527 | static inline void SpewBeginOp(JSContext *cx, const char *name) { } |
michael@0 | 528 | static inline void SpewBailout(uint32_t count, HandleScript script, |
michael@0 | 529 | jsbytecode *pc, ParallelBailoutCause cause) {} |
michael@0 | 530 | static inline ExecutionStatus SpewEndOp(ExecutionStatus status) { return status; } |
michael@0 | 531 | static inline void SpewBeginCompile(HandleScript script) { } |
michael@0 | 532 | #ifdef JS_ION |
michael@0 | 533 | static inline jit::MethodStatus SpewEndCompile(jit::MethodStatus status) { return status; } |
michael@0 | 534 | static inline void SpewMIR(jit::MDefinition *mir, const char *fmt, ...) { } |
michael@0 | 535 | #endif |
michael@0 | 536 | static inline void SpewBailoutIR(IonLIRTraceData *data) { } |
michael@0 | 537 | |
michael@0 | 538 | #endif // DEBUG && JS_THREADSAFE && JS_ION |
michael@0 | 539 | |
michael@0 | 540 | } // namespace parallel |
michael@0 | 541 | } // namespace js |
michael@0 | 542 | |
michael@0 | 543 | /* static */ inline js::ForkJoinContext * |
michael@0 | 544 | js::ForkJoinContext::current() |
michael@0 | 545 | { |
michael@0 | 546 | return tlsForkJoinContext.get(); |
michael@0 | 547 | } |
michael@0 | 548 | |
michael@0 | 549 | namespace js { |
michael@0 | 550 | |
michael@0 | 551 | static inline bool |
michael@0 | 552 | InParallelSection() |
michael@0 | 553 | { |
michael@0 | 554 | return ForkJoinContext::current() != nullptr; |
michael@0 | 555 | } |
michael@0 | 556 | |
michael@0 | 557 | } // namespace js |
michael@0 | 558 | |
michael@0 | 559 | #endif /* vm_ForkJoin_h */ |