Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef jit_IonCode_h |
michael@0 | 8 | #define jit_IonCode_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/Atomics.h" |
michael@0 | 11 | #include "mozilla/MemoryReporting.h" |
michael@0 | 12 | #include "mozilla/PodOperations.h" |
michael@0 | 13 | |
michael@0 | 14 | #include "jsinfer.h" |
michael@0 | 15 | #include "jstypes.h" |
michael@0 | 16 | |
michael@0 | 17 | #include "assembler/jit/ExecutableAllocator.h" |
michael@0 | 18 | #include "gc/Heap.h" |
michael@0 | 19 | #include "jit/IonOptimizationLevels.h" |
michael@0 | 20 | #include "jit/IonTypes.h" |
michael@0 | 21 | |
michael@0 | 22 | namespace JSC { |
michael@0 | 23 | class ExecutablePool; |
michael@0 | 24 | } |
michael@0 | 25 | |
michael@0 | 26 | namespace js { |
michael@0 | 27 | |
michael@0 | 28 | class AsmJSModule; |
michael@0 | 29 | |
michael@0 | 30 | namespace jit { |
michael@0 | 31 | |
michael@0 | 32 | class MacroAssembler; |
michael@0 | 33 | class CodeOffsetLabel; |
michael@0 | 34 | class PatchableBackedge; |
michael@0 | 35 | |
michael@0 | 36 | class JitCode : public gc::BarrieredCell<JitCode> |
michael@0 | 37 | { |
michael@0 | 38 | protected: |
michael@0 | 39 | uint8_t *code_; |
michael@0 | 40 | JSC::ExecutablePool *pool_; |
michael@0 | 41 | uint32_t bufferSize_; // Total buffer size. Does not include headerSize_. |
michael@0 | 42 | uint32_t insnSize_; // Instruction stream size. |
michael@0 | 43 | uint32_t dataSize_; // Size of the read-only data area. |
michael@0 | 44 | uint32_t jumpRelocTableBytes_; // Size of the jump relocation table. |
michael@0 | 45 | uint32_t dataRelocTableBytes_; // Size of the data relocation table. |
michael@0 | 46 | uint32_t preBarrierTableBytes_; // Size of the prebarrier table. |
michael@0 | 47 | uint8_t headerSize_ : 5; // Number of bytes allocated before codeStart. |
michael@0 | 48 | uint8_t kind_ : 3; // JSC::CodeKind, for the memory reporters. |
michael@0 | 49 | bool invalidated_ : 1; // Whether the code object has been invalidated. |
michael@0 | 50 | // This is necessary to prevent GC tracing. |
michael@0 | 51 | |
michael@0 | 52 | #if JS_BITS_PER_WORD == 32 |
michael@0 | 53 | // Ensure JitCode is gc::Cell aligned. |
michael@0 | 54 | uint32_t padding_; |
michael@0 | 55 | #endif |
michael@0 | 56 | |
michael@0 | 57 | JitCode() |
michael@0 | 58 | : code_(nullptr), |
michael@0 | 59 | pool_(nullptr) |
michael@0 | 60 | { } |
michael@0 | 61 | JitCode(uint8_t *code, uint32_t bufferSize, uint32_t headerSize, JSC::ExecutablePool *pool, |
michael@0 | 62 | JSC::CodeKind kind) |
michael@0 | 63 | : code_(code), |
michael@0 | 64 | pool_(pool), |
michael@0 | 65 | bufferSize_(bufferSize), |
michael@0 | 66 | insnSize_(0), |
michael@0 | 67 | dataSize_(0), |
michael@0 | 68 | jumpRelocTableBytes_(0), |
michael@0 | 69 | dataRelocTableBytes_(0), |
michael@0 | 70 | preBarrierTableBytes_(0), |
michael@0 | 71 | headerSize_(headerSize), |
michael@0 | 72 | kind_(kind), |
michael@0 | 73 | invalidated_(false) |
michael@0 | 74 | { |
michael@0 | 75 | MOZ_ASSERT(JSC::CodeKind(kind_) == kind); |
michael@0 | 76 | MOZ_ASSERT(headerSize_ == headerSize); |
michael@0 | 77 | } |
michael@0 | 78 | |
michael@0 | 79 | uint32_t dataOffset() const { |
michael@0 | 80 | return insnSize_; |
michael@0 | 81 | } |
michael@0 | 82 | uint32_t jumpRelocTableOffset() const { |
michael@0 | 83 | return dataOffset() + dataSize_; |
michael@0 | 84 | } |
michael@0 | 85 | uint32_t dataRelocTableOffset() const { |
michael@0 | 86 | return jumpRelocTableOffset() + jumpRelocTableBytes_; |
michael@0 | 87 | } |
michael@0 | 88 | uint32_t preBarrierTableOffset() const { |
michael@0 | 89 | return dataRelocTableOffset() + dataRelocTableBytes_; |
michael@0 | 90 | } |
michael@0 | 91 | |
michael@0 | 92 | public: |
michael@0 | 93 | uint8_t *raw() const { |
michael@0 | 94 | return code_; |
michael@0 | 95 | } |
michael@0 | 96 | size_t instructionsSize() const { |
michael@0 | 97 | return insnSize_; |
michael@0 | 98 | } |
michael@0 | 99 | void trace(JSTracer *trc); |
michael@0 | 100 | void finalize(FreeOp *fop); |
michael@0 | 101 | void setInvalidated() { |
michael@0 | 102 | invalidated_ = true; |
michael@0 | 103 | } |
michael@0 | 104 | |
michael@0 | 105 | void togglePreBarriers(bool enabled); |
michael@0 | 106 | |
michael@0 | 107 | // If this JitCode object has been, effectively, corrupted due to |
michael@0 | 108 | // invalidation patching, then we have to remember this so we don't try and |
michael@0 | 109 | // trace relocation entries that may now be corrupt. |
michael@0 | 110 | bool invalidated() const { |
michael@0 | 111 | return !!invalidated_; |
michael@0 | 112 | } |
michael@0 | 113 | |
michael@0 | 114 | template <typename T> T as() const { |
michael@0 | 115 | return JS_DATA_TO_FUNC_PTR(T, raw()); |
michael@0 | 116 | } |
michael@0 | 117 | |
michael@0 | 118 | void copyFrom(MacroAssembler &masm); |
michael@0 | 119 | |
michael@0 | 120 | static JitCode *FromExecutable(uint8_t *buffer) { |
michael@0 | 121 | JitCode *code = *(JitCode **)(buffer - sizeof(JitCode *)); |
michael@0 | 122 | JS_ASSERT(code->raw() == buffer); |
michael@0 | 123 | return code; |
michael@0 | 124 | } |
michael@0 | 125 | |
michael@0 | 126 | static size_t offsetOfCode() { |
michael@0 | 127 | return offsetof(JitCode, code_); |
michael@0 | 128 | } |
michael@0 | 129 | |
michael@0 | 130 | uint8_t *jumpRelocTable() { |
michael@0 | 131 | return code_ + jumpRelocTableOffset(); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | // Allocates a new JitCode object which will be managed by the GC. If no |
michael@0 | 135 | // object can be allocated, nullptr is returned. On failure, |pool| is |
michael@0 | 136 | // automatically released, so the code may be freed. |
michael@0 | 137 | template <AllowGC allowGC> |
michael@0 | 138 | static JitCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, |
michael@0 | 139 | JSC::ExecutablePool *pool, JSC::CodeKind kind); |
michael@0 | 140 | |
michael@0 | 141 | public: |
michael@0 | 142 | static inline ThingRootKind rootKind() { return THING_ROOT_JIT_CODE; } |
michael@0 | 143 | }; |
michael@0 | 144 | |
michael@0 | 145 | class SnapshotWriter; |
michael@0 | 146 | class RecoverWriter; |
michael@0 | 147 | class SafepointWriter; |
michael@0 | 148 | class SafepointIndex; |
michael@0 | 149 | class OsiIndex; |
michael@0 | 150 | class IonCache; |
michael@0 | 151 | struct PatchableBackedgeInfo; |
michael@0 | 152 | struct CacheLocation; |
michael@0 | 153 | |
michael@0 | 154 | // Describes a single AsmJSModule which jumps (via an FFI exit with the given |
michael@0 | 155 | // index) directly into an IonScript. |
michael@0 | 156 | struct DependentAsmJSModuleExit |
michael@0 | 157 | { |
michael@0 | 158 | const AsmJSModule *module; |
michael@0 | 159 | size_t exitIndex; |
michael@0 | 160 | |
michael@0 | 161 | DependentAsmJSModuleExit(const AsmJSModule *module, size_t exitIndex) |
michael@0 | 162 | : module(module), |
michael@0 | 163 | exitIndex(exitIndex) |
michael@0 | 164 | { } |
michael@0 | 165 | }; |
michael@0 | 166 | |
michael@0 | 167 | // An IonScript attaches Ion-generated information to a JSScript. |
michael@0 | 168 | struct IonScript |
michael@0 | 169 | { |
michael@0 | 170 | private: |
michael@0 | 171 | // Code pointer containing the actual method. |
michael@0 | 172 | EncapsulatedPtr<JitCode> method_; |
michael@0 | 173 | |
michael@0 | 174 | // Deoptimization table used by this method. |
michael@0 | 175 | EncapsulatedPtr<JitCode> deoptTable_; |
michael@0 | 176 | |
michael@0 | 177 | // Entrypoint for OSR, or nullptr. |
michael@0 | 178 | jsbytecode *osrPc_; |
michael@0 | 179 | |
michael@0 | 180 | // Offset to OSR entrypoint from method_->raw(), or 0. |
michael@0 | 181 | uint32_t osrEntryOffset_; |
michael@0 | 182 | |
michael@0 | 183 | // Offset to entrypoint skipping type arg check from method_->raw(). |
michael@0 | 184 | uint32_t skipArgCheckEntryOffset_; |
michael@0 | 185 | |
michael@0 | 186 | // Offset of the invalidation epilogue (which pushes this IonScript |
michael@0 | 187 | // and calls the invalidation thunk). |
michael@0 | 188 | uint32_t invalidateEpilogueOffset_; |
michael@0 | 189 | |
michael@0 | 190 | // The offset immediately after the IonScript immediate. |
michael@0 | 191 | // NOTE: technically a constant delta from |
michael@0 | 192 | // |invalidateEpilogueOffset_|, so we could hard-code this |
michael@0 | 193 | // per-platform if we want. |
michael@0 | 194 | uint32_t invalidateEpilogueDataOffset_; |
michael@0 | 195 | |
michael@0 | 196 | // Number of times this script bailed out without invalidation. |
michael@0 | 197 | uint32_t numBailouts_; |
michael@0 | 198 | |
michael@0 | 199 | // Flag set when it is likely that one of our (transitive) call |
michael@0 | 200 | // targets is not compiled. Used in ForkJoin.cpp to decide when |
michael@0 | 201 | // we should add call targets to the worklist. |
michael@0 | 202 | mozilla::Atomic<bool, mozilla::Relaxed> hasUncompiledCallTarget_; |
michael@0 | 203 | |
michael@0 | 204 | // Flag set when this script is used as an entry script to parallel |
michael@0 | 205 | // execution. If this is true, then the parent JSScript must be in its |
michael@0 | 206 | // JitCompartment's parallel entry script set. |
michael@0 | 207 | bool isParallelEntryScript_; |
michael@0 | 208 | |
michael@0 | 209 | // Flag set if IonScript was compiled with SPS profiling enabled. |
michael@0 | 210 | bool hasSPSInstrumentation_; |
michael@0 | 211 | |
michael@0 | 212 | // Flag for if this script is getting recompiled. |
michael@0 | 213 | uint32_t recompiling_; |
michael@0 | 214 | |
michael@0 | 215 | // Any kind of data needed by the runtime, these can be either cache |
michael@0 | 216 | // information or profiling info. |
michael@0 | 217 | uint32_t runtimeData_; |
michael@0 | 218 | uint32_t runtimeSize_; |
michael@0 | 219 | |
michael@0 | 220 | // State for polymorphic caches in the compiled code. All caches are stored |
michael@0 | 221 | // in the runtimeData buffer and indexed by the cacheIndex which give a |
michael@0 | 222 | // relative offset in the runtimeData array. |
michael@0 | 223 | uint32_t cacheIndex_; |
michael@0 | 224 | uint32_t cacheEntries_; |
michael@0 | 225 | |
michael@0 | 226 | // Map code displacement to safepoint / OSI-patch-delta. |
michael@0 | 227 | uint32_t safepointIndexOffset_; |
michael@0 | 228 | uint32_t safepointIndexEntries_; |
michael@0 | 229 | |
michael@0 | 230 | // Offset to and length of the safepoint table in bytes. |
michael@0 | 231 | uint32_t safepointsStart_; |
michael@0 | 232 | uint32_t safepointsSize_; |
michael@0 | 233 | |
michael@0 | 234 | // Number of bytes this function reserves on the stack. |
michael@0 | 235 | uint32_t frameSlots_; |
michael@0 | 236 | |
michael@0 | 237 | // Frame size is the value that can be added to the StackPointer along |
michael@0 | 238 | // with the frame prefix to get a valid IonJSFrameLayout. |
michael@0 | 239 | uint32_t frameSize_; |
michael@0 | 240 | |
michael@0 | 241 | // Table mapping bailout IDs to snapshot offsets. |
michael@0 | 242 | uint32_t bailoutTable_; |
michael@0 | 243 | uint32_t bailoutEntries_; |
michael@0 | 244 | |
michael@0 | 245 | // Map OSI-point displacement to snapshot. |
michael@0 | 246 | uint32_t osiIndexOffset_; |
michael@0 | 247 | uint32_t osiIndexEntries_; |
michael@0 | 248 | |
michael@0 | 249 | // Offset from the start of the code buffer to its snapshot buffer. |
michael@0 | 250 | uint32_t snapshots_; |
michael@0 | 251 | uint32_t snapshotsListSize_; |
michael@0 | 252 | uint32_t snapshotsRVATableSize_; |
michael@0 | 253 | |
michael@0 | 254 | // List of instructions needed to recover stack frames. |
michael@0 | 255 | uint32_t recovers_; |
michael@0 | 256 | uint32_t recoversSize_; |
michael@0 | 257 | |
michael@0 | 258 | // Constant table for constants stored in snapshots. |
michael@0 | 259 | uint32_t constantTable_; |
michael@0 | 260 | uint32_t constantEntries_; |
michael@0 | 261 | |
michael@0 | 262 | // List of scripts that we call. |
michael@0 | 263 | // |
michael@0 | 264 | // Currently this is only non-nullptr for parallel IonScripts. |
michael@0 | 265 | uint32_t callTargetList_; |
michael@0 | 266 | uint32_t callTargetEntries_; |
michael@0 | 267 | |
michael@0 | 268 | // List of patchable backedges which are threaded into the runtime's list. |
michael@0 | 269 | uint32_t backedgeList_; |
michael@0 | 270 | uint32_t backedgeEntries_; |
michael@0 | 271 | |
michael@0 | 272 | // Number of references from invalidation records. |
michael@0 | 273 | uint32_t refcount_; |
michael@0 | 274 | |
michael@0 | 275 | // If this is a parallel script, the number of major GC collections it has |
michael@0 | 276 | // been idle, otherwise 0. |
michael@0 | 277 | // |
michael@0 | 278 | // JSScripts with parallel IonScripts are preserved across GC if the |
michael@0 | 279 | // parallel age is < MAX_PARALLEL_AGE. |
michael@0 | 280 | uint32_t parallelAge_; |
michael@0 | 281 | |
michael@0 | 282 | // Identifier of the compilation which produced this code. |
michael@0 | 283 | types::RecompileInfo recompileInfo_; |
michael@0 | 284 | |
michael@0 | 285 | // The optimization level this script was compiled in. |
michael@0 | 286 | OptimizationLevel optimizationLevel_; |
michael@0 | 287 | |
michael@0 | 288 | // Number of times we tried to enter this script via OSR but failed due to |
michael@0 | 289 | // a LOOPENTRY pc other than osrPc_. |
michael@0 | 290 | uint32_t osrPcMismatchCounter_; |
michael@0 | 291 | |
michael@0 | 292 | // If non-null, the list of AsmJSModules |
michael@0 | 293 | // that contain an optimized call directly into this IonScript. |
michael@0 | 294 | Vector<DependentAsmJSModuleExit> *dependentAsmJSModules; |
michael@0 | 295 | |
michael@0 | 296 | private: |
michael@0 | 297 | inline uint8_t *bottomBuffer() { |
michael@0 | 298 | return reinterpret_cast<uint8_t *>(this); |
michael@0 | 299 | } |
michael@0 | 300 | inline const uint8_t *bottomBuffer() const { |
michael@0 | 301 | return reinterpret_cast<const uint8_t *>(this); |
michael@0 | 302 | } |
michael@0 | 303 | |
michael@0 | 304 | public: |
michael@0 | 305 | SnapshotOffset *bailoutTable() { |
michael@0 | 306 | return (SnapshotOffset *) &bottomBuffer()[bailoutTable_]; |
michael@0 | 307 | } |
michael@0 | 308 | EncapsulatedValue *constants() { |
michael@0 | 309 | return (EncapsulatedValue *) &bottomBuffer()[constantTable_]; |
michael@0 | 310 | } |
michael@0 | 311 | const SafepointIndex *safepointIndices() const { |
michael@0 | 312 | return const_cast<IonScript *>(this)->safepointIndices(); |
michael@0 | 313 | } |
michael@0 | 314 | SafepointIndex *safepointIndices() { |
michael@0 | 315 | return (SafepointIndex *) &bottomBuffer()[safepointIndexOffset_]; |
michael@0 | 316 | } |
michael@0 | 317 | const OsiIndex *osiIndices() const { |
michael@0 | 318 | return const_cast<IonScript *>(this)->osiIndices(); |
michael@0 | 319 | } |
michael@0 | 320 | OsiIndex *osiIndices() { |
michael@0 | 321 | return (OsiIndex *) &bottomBuffer()[osiIndexOffset_]; |
michael@0 | 322 | } |
michael@0 | 323 | uint32_t *cacheIndex() { |
michael@0 | 324 | return (uint32_t *) &bottomBuffer()[cacheIndex_]; |
michael@0 | 325 | } |
michael@0 | 326 | uint8_t *runtimeData() { |
michael@0 | 327 | return &bottomBuffer()[runtimeData_]; |
michael@0 | 328 | } |
michael@0 | 329 | JSScript **callTargetList() { |
michael@0 | 330 | return (JSScript **) &bottomBuffer()[callTargetList_]; |
michael@0 | 331 | } |
michael@0 | 332 | PatchableBackedge *backedgeList() { |
michael@0 | 333 | return (PatchableBackedge *) &bottomBuffer()[backedgeList_]; |
michael@0 | 334 | } |
michael@0 | 335 | bool addDependentAsmJSModule(JSContext *cx, DependentAsmJSModuleExit exit); |
michael@0 | 336 | void removeDependentAsmJSModule(DependentAsmJSModuleExit exit) { |
michael@0 | 337 | if (!dependentAsmJSModules) |
michael@0 | 338 | return; |
michael@0 | 339 | for (size_t i = 0; i < dependentAsmJSModules->length(); i++) { |
michael@0 | 340 | if (dependentAsmJSModules->begin()[i].module == exit.module && |
michael@0 | 341 | dependentAsmJSModules->begin()[i].exitIndex == exit.exitIndex) |
michael@0 | 342 | { |
michael@0 | 343 | dependentAsmJSModules->erase(dependentAsmJSModules->begin() + i); |
michael@0 | 344 | break; |
michael@0 | 345 | } |
michael@0 | 346 | } |
michael@0 | 347 | } |
michael@0 | 348 | |
michael@0 | 349 | private: |
michael@0 | 350 | void trace(JSTracer *trc); |
michael@0 | 351 | |
michael@0 | 352 | public: |
michael@0 | 353 | // Do not call directly, use IonScript::New. This is public for cx->new_. |
michael@0 | 354 | IonScript(); |
michael@0 | 355 | |
michael@0 | 356 | static IonScript *New(JSContext *cx, types::RecompileInfo recompileInfo, |
michael@0 | 357 | uint32_t frameLocals, uint32_t frameSize, |
michael@0 | 358 | size_t snapshotsListSize, size_t snapshotsRVATableSize, |
michael@0 | 359 | size_t recoversSize, size_t bailoutEntries, |
michael@0 | 360 | size_t constants, size_t safepointIndexEntries, |
michael@0 | 361 | size_t osiIndexEntries, size_t cacheEntries, |
michael@0 | 362 | size_t runtimeSize, size_t safepointsSize, |
michael@0 | 363 | size_t callTargetEntries, size_t backedgeEntries, |
michael@0 | 364 | OptimizationLevel optimizationLevel); |
michael@0 | 365 | static void Trace(JSTracer *trc, IonScript *script); |
michael@0 | 366 | static void Destroy(FreeOp *fop, IonScript *script); |
michael@0 | 367 | |
michael@0 | 368 | static inline size_t offsetOfMethod() { |
michael@0 | 369 | return offsetof(IonScript, method_); |
michael@0 | 370 | } |
michael@0 | 371 | static inline size_t offsetOfOsrEntryOffset() { |
michael@0 | 372 | return offsetof(IonScript, osrEntryOffset_); |
michael@0 | 373 | } |
michael@0 | 374 | static inline size_t offsetOfSkipArgCheckEntryOffset() { |
michael@0 | 375 | return offsetof(IonScript, skipArgCheckEntryOffset_); |
michael@0 | 376 | } |
michael@0 | 377 | static inline size_t offsetOfRefcount() { |
michael@0 | 378 | return offsetof(IonScript, refcount_); |
michael@0 | 379 | } |
michael@0 | 380 | static inline size_t offsetOfRecompiling() { |
michael@0 | 381 | return offsetof(IonScript, recompiling_); |
michael@0 | 382 | } |
michael@0 | 383 | |
michael@0 | 384 | public: |
michael@0 | 385 | JitCode *method() const { |
michael@0 | 386 | return method_; |
michael@0 | 387 | } |
michael@0 | 388 | void setMethod(JitCode *code) { |
michael@0 | 389 | JS_ASSERT(!invalidated()); |
michael@0 | 390 | method_ = code; |
michael@0 | 391 | } |
michael@0 | 392 | void setDeoptTable(JitCode *code) { |
michael@0 | 393 | deoptTable_ = code; |
michael@0 | 394 | } |
michael@0 | 395 | void setOsrPc(jsbytecode *osrPc) { |
michael@0 | 396 | osrPc_ = osrPc; |
michael@0 | 397 | } |
michael@0 | 398 | jsbytecode *osrPc() const { |
michael@0 | 399 | return osrPc_; |
michael@0 | 400 | } |
michael@0 | 401 | void setOsrEntryOffset(uint32_t offset) { |
michael@0 | 402 | JS_ASSERT(!osrEntryOffset_); |
michael@0 | 403 | osrEntryOffset_ = offset; |
michael@0 | 404 | } |
michael@0 | 405 | uint32_t osrEntryOffset() const { |
michael@0 | 406 | return osrEntryOffset_; |
michael@0 | 407 | } |
michael@0 | 408 | void setSkipArgCheckEntryOffset(uint32_t offset) { |
michael@0 | 409 | JS_ASSERT(!skipArgCheckEntryOffset_); |
michael@0 | 410 | skipArgCheckEntryOffset_ = offset; |
michael@0 | 411 | } |
michael@0 | 412 | uint32_t getSkipArgCheckEntryOffset() const { |
michael@0 | 413 | return skipArgCheckEntryOffset_; |
michael@0 | 414 | } |
michael@0 | 415 | bool containsCodeAddress(uint8_t *addr) const { |
michael@0 | 416 | return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); |
michael@0 | 417 | } |
michael@0 | 418 | bool containsReturnAddress(uint8_t *addr) const { |
michael@0 | 419 | // This accounts for an off by one error caused by the return address of a |
michael@0 | 420 | // bailout sitting outside the range of the containing function. |
michael@0 | 421 | return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); |
michael@0 | 422 | } |
michael@0 | 423 | void setInvalidationEpilogueOffset(uint32_t offset) { |
michael@0 | 424 | JS_ASSERT(!invalidateEpilogueOffset_); |
michael@0 | 425 | invalidateEpilogueOffset_ = offset; |
michael@0 | 426 | } |
michael@0 | 427 | uint32_t invalidateEpilogueOffset() const { |
michael@0 | 428 | JS_ASSERT(invalidateEpilogueOffset_); |
michael@0 | 429 | return invalidateEpilogueOffset_; |
michael@0 | 430 | } |
michael@0 | 431 | void setInvalidationEpilogueDataOffset(uint32_t offset) { |
michael@0 | 432 | JS_ASSERT(!invalidateEpilogueDataOffset_); |
michael@0 | 433 | invalidateEpilogueDataOffset_ = offset; |
michael@0 | 434 | } |
michael@0 | 435 | uint32_t invalidateEpilogueDataOffset() const { |
michael@0 | 436 | JS_ASSERT(invalidateEpilogueDataOffset_); |
michael@0 | 437 | return invalidateEpilogueDataOffset_; |
michael@0 | 438 | } |
michael@0 | 439 | void incNumBailouts() { |
michael@0 | 440 | numBailouts_++; |
michael@0 | 441 | } |
michael@0 | 442 | uint32_t numBailouts() const { |
michael@0 | 443 | return numBailouts_; |
michael@0 | 444 | } |
michael@0 | 445 | bool bailoutExpected() const { |
michael@0 | 446 | return numBailouts_ > 0; |
michael@0 | 447 | } |
michael@0 | 448 | void setHasUncompiledCallTarget() { |
michael@0 | 449 | hasUncompiledCallTarget_ = true; |
michael@0 | 450 | } |
michael@0 | 451 | void clearHasUncompiledCallTarget() { |
michael@0 | 452 | hasUncompiledCallTarget_ = false; |
michael@0 | 453 | } |
michael@0 | 454 | bool hasUncompiledCallTarget() const { |
michael@0 | 455 | return hasUncompiledCallTarget_; |
michael@0 | 456 | } |
michael@0 | 457 | void setIsParallelEntryScript() { |
michael@0 | 458 | isParallelEntryScript_ = true; |
michael@0 | 459 | } |
michael@0 | 460 | bool isParallelEntryScript() const { |
michael@0 | 461 | return isParallelEntryScript_; |
michael@0 | 462 | } |
michael@0 | 463 | void setHasSPSInstrumentation() { |
michael@0 | 464 | hasSPSInstrumentation_ = true; |
michael@0 | 465 | } |
michael@0 | 466 | void clearHasSPSInstrumentation() { |
michael@0 | 467 | hasSPSInstrumentation_ = false; |
michael@0 | 468 | } |
michael@0 | 469 | bool hasSPSInstrumentation() const { |
michael@0 | 470 | return hasSPSInstrumentation_; |
michael@0 | 471 | } |
michael@0 | 472 | const uint8_t *snapshots() const { |
michael@0 | 473 | return reinterpret_cast<const uint8_t *>(this) + snapshots_; |
michael@0 | 474 | } |
michael@0 | 475 | size_t snapshotsListSize() const { |
michael@0 | 476 | return snapshotsListSize_; |
michael@0 | 477 | } |
michael@0 | 478 | size_t snapshotsRVATableSize() const { |
michael@0 | 479 | return snapshotsRVATableSize_; |
michael@0 | 480 | } |
michael@0 | 481 | const uint8_t *recovers() const { |
michael@0 | 482 | return reinterpret_cast<const uint8_t *>(this) + recovers_; |
michael@0 | 483 | } |
michael@0 | 484 | size_t recoversSize() const { |
michael@0 | 485 | return recoversSize_; |
michael@0 | 486 | } |
michael@0 | 487 | const uint8_t *safepoints() const { |
michael@0 | 488 | return reinterpret_cast<const uint8_t *>(this) + safepointsStart_; |
michael@0 | 489 | } |
michael@0 | 490 | size_t safepointsSize() const { |
michael@0 | 491 | return safepointsSize_; |
michael@0 | 492 | } |
michael@0 | 493 | size_t callTargetEntries() const { |
michael@0 | 494 | return callTargetEntries_; |
michael@0 | 495 | } |
michael@0 | 496 | size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { |
michael@0 | 497 | return mallocSizeOf(this); |
michael@0 | 498 | } |
michael@0 | 499 | EncapsulatedValue &getConstant(size_t index) { |
michael@0 | 500 | JS_ASSERT(index < numConstants()); |
michael@0 | 501 | return constants()[index]; |
michael@0 | 502 | } |
michael@0 | 503 | size_t numConstants() const { |
michael@0 | 504 | return constantEntries_; |
michael@0 | 505 | } |
michael@0 | 506 | uint32_t frameSlots() const { |
michael@0 | 507 | return frameSlots_; |
michael@0 | 508 | } |
michael@0 | 509 | uint32_t frameSize() const { |
michael@0 | 510 | return frameSize_; |
michael@0 | 511 | } |
michael@0 | 512 | SnapshotOffset bailoutToSnapshot(uint32_t bailoutId) { |
michael@0 | 513 | JS_ASSERT(bailoutId < bailoutEntries_); |
michael@0 | 514 | return bailoutTable()[bailoutId]; |
michael@0 | 515 | } |
michael@0 | 516 | const SafepointIndex *getSafepointIndex(uint32_t disp) const; |
michael@0 | 517 | const SafepointIndex *getSafepointIndex(uint8_t *retAddr) const { |
michael@0 | 518 | JS_ASSERT(containsCodeAddress(retAddr)); |
michael@0 | 519 | return getSafepointIndex(retAddr - method()->raw()); |
michael@0 | 520 | } |
michael@0 | 521 | const OsiIndex *getOsiIndex(uint32_t disp) const; |
michael@0 | 522 | const OsiIndex *getOsiIndex(uint8_t *retAddr) const; |
michael@0 | 523 | inline IonCache &getCacheFromIndex(uint32_t index) { |
michael@0 | 524 | JS_ASSERT(index < cacheEntries_); |
michael@0 | 525 | uint32_t offset = cacheIndex()[index]; |
michael@0 | 526 | return getCache(offset); |
michael@0 | 527 | } |
michael@0 | 528 | inline IonCache &getCache(uint32_t offset) { |
michael@0 | 529 | JS_ASSERT(offset < runtimeSize_); |
michael@0 | 530 | return *(IonCache *) &runtimeData()[offset]; |
michael@0 | 531 | } |
michael@0 | 532 | size_t numCaches() const { |
michael@0 | 533 | return cacheEntries_; |
michael@0 | 534 | } |
michael@0 | 535 | size_t runtimeSize() const { |
michael@0 | 536 | return runtimeSize_; |
michael@0 | 537 | } |
michael@0 | 538 | CacheLocation *getCacheLocs(uint32_t locIndex) { |
michael@0 | 539 | JS_ASSERT(locIndex < runtimeSize_); |
michael@0 | 540 | return (CacheLocation *) &runtimeData()[locIndex]; |
michael@0 | 541 | } |
michael@0 | 542 | void toggleBarriers(bool enabled); |
michael@0 | 543 | void purgeCaches(); |
michael@0 | 544 | void destroyCaches(); |
michael@0 | 545 | void unlinkFromRuntime(FreeOp *fop); |
michael@0 | 546 | void copySnapshots(const SnapshotWriter *writer); |
michael@0 | 547 | void copyRecovers(const RecoverWriter *writer); |
michael@0 | 548 | void copyBailoutTable(const SnapshotOffset *table); |
michael@0 | 549 | void copyConstants(const Value *vp); |
michael@0 | 550 | void copySafepointIndices(const SafepointIndex *firstSafepointIndex, MacroAssembler &masm); |
michael@0 | 551 | void copyOsiIndices(const OsiIndex *firstOsiIndex, MacroAssembler &masm); |
michael@0 | 552 | void copyRuntimeData(const uint8_t *data); |
michael@0 | 553 | void copyCacheEntries(const uint32_t *caches, MacroAssembler &masm); |
michael@0 | 554 | void copySafepoints(const SafepointWriter *writer); |
michael@0 | 555 | void copyCallTargetEntries(JSScript **callTargets); |
michael@0 | 556 | void copyPatchableBackedges(JSContext *cx, JitCode *code, |
michael@0 | 557 | PatchableBackedgeInfo *backedges); |
michael@0 | 558 | |
michael@0 | 559 | bool invalidated() const { |
michael@0 | 560 | return refcount_ != 0; |
michael@0 | 561 | } |
michael@0 | 562 | size_t refcount() const { |
michael@0 | 563 | return refcount_; |
michael@0 | 564 | } |
michael@0 | 565 | void incref() { |
michael@0 | 566 | refcount_++; |
michael@0 | 567 | } |
michael@0 | 568 | void decref(FreeOp *fop) { |
michael@0 | 569 | JS_ASSERT(refcount_); |
michael@0 | 570 | refcount_--; |
michael@0 | 571 | if (!refcount_) |
michael@0 | 572 | Destroy(fop, this); |
michael@0 | 573 | } |
michael@0 | 574 | const types::RecompileInfo& recompileInfo() const { |
michael@0 | 575 | return recompileInfo_; |
michael@0 | 576 | } |
michael@0 | 577 | types::RecompileInfo& recompileInfoRef() { |
michael@0 | 578 | return recompileInfo_; |
michael@0 | 579 | } |
michael@0 | 580 | OptimizationLevel optimizationLevel() const { |
michael@0 | 581 | return optimizationLevel_; |
michael@0 | 582 | } |
michael@0 | 583 | uint32_t incrOsrPcMismatchCounter() { |
michael@0 | 584 | return ++osrPcMismatchCounter_; |
michael@0 | 585 | } |
michael@0 | 586 | void resetOsrPcMismatchCounter() { |
michael@0 | 587 | osrPcMismatchCounter_ = 0; |
michael@0 | 588 | } |
michael@0 | 589 | |
michael@0 | 590 | void setRecompiling() { |
michael@0 | 591 | recompiling_ = true; |
michael@0 | 592 | } |
michael@0 | 593 | |
michael@0 | 594 | bool isRecompiling() const { |
michael@0 | 595 | return recompiling_; |
michael@0 | 596 | } |
michael@0 | 597 | |
michael@0 | 598 | void clearRecompiling() { |
michael@0 | 599 | recompiling_ = false; |
michael@0 | 600 | } |
michael@0 | 601 | |
michael@0 | 602 | static const uint32_t MAX_PARALLEL_AGE = 5; |
michael@0 | 603 | |
michael@0 | 604 | void resetParallelAge() { |
michael@0 | 605 | MOZ_ASSERT(isParallelEntryScript()); |
michael@0 | 606 | parallelAge_ = 0; |
michael@0 | 607 | } |
michael@0 | 608 | uint32_t parallelAge() const { |
michael@0 | 609 | return parallelAge_; |
michael@0 | 610 | } |
michael@0 | 611 | uint32_t increaseParallelAge() { |
michael@0 | 612 | MOZ_ASSERT(isParallelEntryScript()); |
michael@0 | 613 | return ++parallelAge_; |
michael@0 | 614 | } |
michael@0 | 615 | |
michael@0 | 616 | static void writeBarrierPre(Zone *zone, IonScript *ionScript); |
michael@0 | 617 | }; |
michael@0 | 618 | |
michael@0 | 619 | // Execution information for a basic block which may persist after the |
michael@0 | 620 | // accompanying IonScript is destroyed, for use during profiling. |
michael@0 | 621 | struct IonBlockCounts |
michael@0 | 622 | { |
michael@0 | 623 | private: |
michael@0 | 624 | uint32_t id_; |
michael@0 | 625 | |
michael@0 | 626 | // Approximate bytecode in the outer (not inlined) script this block |
michael@0 | 627 | // was generated from. |
michael@0 | 628 | uint32_t offset_; |
michael@0 | 629 | |
michael@0 | 630 | // ids for successors of this block. |
michael@0 | 631 | uint32_t numSuccessors_; |
michael@0 | 632 | uint32_t *successors_; |
michael@0 | 633 | |
michael@0 | 634 | // Hit count for this block. |
michael@0 | 635 | uint64_t hitCount_; |
michael@0 | 636 | |
michael@0 | 637 | // Text information about the code generated for this block. |
michael@0 | 638 | char *code_; |
michael@0 | 639 | |
michael@0 | 640 | public: |
michael@0 | 641 | |
michael@0 | 642 | bool init(uint32_t id, uint32_t offset, uint32_t numSuccessors) { |
michael@0 | 643 | id_ = id; |
michael@0 | 644 | offset_ = offset; |
michael@0 | 645 | numSuccessors_ = numSuccessors; |
michael@0 | 646 | if (numSuccessors) { |
michael@0 | 647 | successors_ = js_pod_calloc<uint32_t>(numSuccessors); |
michael@0 | 648 | if (!successors_) |
michael@0 | 649 | return false; |
michael@0 | 650 | } |
michael@0 | 651 | return true; |
michael@0 | 652 | } |
michael@0 | 653 | |
michael@0 | 654 | void destroy() { |
michael@0 | 655 | js_free(successors_); |
michael@0 | 656 | js_free(code_); |
michael@0 | 657 | } |
michael@0 | 658 | |
michael@0 | 659 | uint32_t id() const { |
michael@0 | 660 | return id_; |
michael@0 | 661 | } |
michael@0 | 662 | |
michael@0 | 663 | uint32_t offset() const { |
michael@0 | 664 | return offset_; |
michael@0 | 665 | } |
michael@0 | 666 | |
michael@0 | 667 | size_t numSuccessors() const { |
michael@0 | 668 | return numSuccessors_; |
michael@0 | 669 | } |
michael@0 | 670 | |
michael@0 | 671 | void setSuccessor(size_t i, uint32_t id) { |
michael@0 | 672 | JS_ASSERT(i < numSuccessors_); |
michael@0 | 673 | successors_[i] = id; |
michael@0 | 674 | } |
michael@0 | 675 | |
michael@0 | 676 | uint32_t successor(size_t i) const { |
michael@0 | 677 | JS_ASSERT(i < numSuccessors_); |
michael@0 | 678 | return successors_[i]; |
michael@0 | 679 | } |
michael@0 | 680 | |
michael@0 | 681 | uint64_t *addressOfHitCount() { |
michael@0 | 682 | return &hitCount_; |
michael@0 | 683 | } |
michael@0 | 684 | |
michael@0 | 685 | uint64_t hitCount() const { |
michael@0 | 686 | return hitCount_; |
michael@0 | 687 | } |
michael@0 | 688 | |
michael@0 | 689 | void setCode(const char *code) { |
michael@0 | 690 | char *ncode = (char *) js_malloc(strlen(code) + 1); |
michael@0 | 691 | if (ncode) { |
michael@0 | 692 | strcpy(ncode, code); |
michael@0 | 693 | code_ = ncode; |
michael@0 | 694 | } |
michael@0 | 695 | } |
michael@0 | 696 | |
michael@0 | 697 | const char *code() const { |
michael@0 | 698 | return code_; |
michael@0 | 699 | } |
michael@0 | 700 | }; |
michael@0 | 701 | |
michael@0 | 702 | // Execution information for a compiled script which may persist after the |
michael@0 | 703 | // IonScript is destroyed, for use during profiling. |
michael@0 | 704 | struct IonScriptCounts |
michael@0 | 705 | { |
michael@0 | 706 | private: |
michael@0 | 707 | // Any previous invalidated compilation(s) for the script. |
michael@0 | 708 | IonScriptCounts *previous_; |
michael@0 | 709 | |
michael@0 | 710 | // Information about basic blocks in this script. |
michael@0 | 711 | size_t numBlocks_; |
michael@0 | 712 | IonBlockCounts *blocks_; |
michael@0 | 713 | |
michael@0 | 714 | public: |
michael@0 | 715 | |
michael@0 | 716 | IonScriptCounts() { |
michael@0 | 717 | mozilla::PodZero(this); |
michael@0 | 718 | } |
michael@0 | 719 | |
michael@0 | 720 | ~IonScriptCounts() { |
michael@0 | 721 | for (size_t i = 0; i < numBlocks_; i++) |
michael@0 | 722 | blocks_[i].destroy(); |
michael@0 | 723 | js_free(blocks_); |
michael@0 | 724 | js_delete(previous_); |
michael@0 | 725 | } |
michael@0 | 726 | |
michael@0 | 727 | bool init(size_t numBlocks) { |
michael@0 | 728 | numBlocks_ = numBlocks; |
michael@0 | 729 | blocks_ = js_pod_calloc<IonBlockCounts>(numBlocks); |
michael@0 | 730 | return blocks_ != nullptr; |
michael@0 | 731 | } |
michael@0 | 732 | |
michael@0 | 733 | size_t numBlocks() const { |
michael@0 | 734 | return numBlocks_; |
michael@0 | 735 | } |
michael@0 | 736 | |
michael@0 | 737 | IonBlockCounts &block(size_t i) { |
michael@0 | 738 | JS_ASSERT(i < numBlocks_); |
michael@0 | 739 | return blocks_[i]; |
michael@0 | 740 | } |
michael@0 | 741 | |
michael@0 | 742 | void setPrevious(IonScriptCounts *previous) { |
michael@0 | 743 | previous_ = previous; |
michael@0 | 744 | } |
michael@0 | 745 | |
michael@0 | 746 | IonScriptCounts *previous() const { |
michael@0 | 747 | return previous_; |
michael@0 | 748 | } |
michael@0 | 749 | }; |
michael@0 | 750 | |
michael@0 | 751 | struct VMFunction; |
michael@0 | 752 | |
michael@0 | 753 | class JitCompartment; |
michael@0 | 754 | class JitRuntime; |
michael@0 | 755 | |
michael@0 | 756 | struct AutoFlushICache |
michael@0 | 757 | { |
michael@0 | 758 | private: |
michael@0 | 759 | uintptr_t start_; |
michael@0 | 760 | uintptr_t stop_; |
michael@0 | 761 | const char *name_; |
michael@0 | 762 | bool inhibit_; |
michael@0 | 763 | AutoFlushICache *prev_; |
michael@0 | 764 | |
michael@0 | 765 | public: |
michael@0 | 766 | static void setRange(uintptr_t p, size_t len); |
michael@0 | 767 | static void flush(uintptr_t p, size_t len); |
michael@0 | 768 | static void setInhibit(); |
michael@0 | 769 | ~AutoFlushICache(); |
michael@0 | 770 | AutoFlushICache(const char *nonce, bool inhibit=false); |
michael@0 | 771 | }; |
michael@0 | 772 | |
michael@0 | 773 | } // namespace jit |
michael@0 | 774 | |
michael@0 | 775 | namespace gc { |
michael@0 | 776 | |
michael@0 | 777 | inline bool |
michael@0 | 778 | IsMarked(const jit::VMFunction *) |
michael@0 | 779 | { |
michael@0 | 780 | // VMFunction are only static objects which are used by WeakMaps as keys. |
michael@0 | 781 | // It is considered as a root object which is always marked. |
michael@0 | 782 | return true; |
michael@0 | 783 | } |
michael@0 | 784 | |
michael@0 | 785 | } // namespace gc |
michael@0 | 786 | |
michael@0 | 787 | } // namespace js |
michael@0 | 788 | |
michael@0 | 789 | #endif /* jit_IonCode_h */ |