michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_IonCode_h michael@0: #define jit_IonCode_h michael@0: michael@0: #include "mozilla/Atomics.h" michael@0: #include "mozilla/MemoryReporting.h" michael@0: #include "mozilla/PodOperations.h" michael@0: michael@0: #include "jsinfer.h" michael@0: #include "jstypes.h" michael@0: michael@0: #include "assembler/jit/ExecutableAllocator.h" michael@0: #include "gc/Heap.h" michael@0: #include "jit/IonOptimizationLevels.h" michael@0: #include "jit/IonTypes.h" michael@0: michael@0: namespace JSC { michael@0: class ExecutablePool; michael@0: } michael@0: michael@0: namespace js { michael@0: michael@0: class AsmJSModule; michael@0: michael@0: namespace jit { michael@0: michael@0: class MacroAssembler; michael@0: class CodeOffsetLabel; michael@0: class PatchableBackedge; michael@0: michael@0: class JitCode : public gc::BarrieredCell michael@0: { michael@0: protected: michael@0: uint8_t *code_; michael@0: JSC::ExecutablePool *pool_; michael@0: uint32_t bufferSize_; // Total buffer size. Does not include headerSize_. michael@0: uint32_t insnSize_; // Instruction stream size. michael@0: uint32_t dataSize_; // Size of the read-only data area. michael@0: uint32_t jumpRelocTableBytes_; // Size of the jump relocation table. michael@0: uint32_t dataRelocTableBytes_; // Size of the data relocation table. michael@0: uint32_t preBarrierTableBytes_; // Size of the prebarrier table. michael@0: uint8_t headerSize_ : 5; // Number of bytes allocated before codeStart. michael@0: uint8_t kind_ : 3; // JSC::CodeKind, for the memory reporters. michael@0: bool invalidated_ : 1; // Whether the code object has been invalidated. michael@0: // This is necessary to prevent GC tracing. michael@0: michael@0: #if JS_BITS_PER_WORD == 32 michael@0: // Ensure JitCode is gc::Cell aligned. michael@0: uint32_t padding_; michael@0: #endif michael@0: michael@0: JitCode() michael@0: : code_(nullptr), michael@0: pool_(nullptr) michael@0: { } michael@0: JitCode(uint8_t *code, uint32_t bufferSize, uint32_t headerSize, JSC::ExecutablePool *pool, michael@0: JSC::CodeKind kind) michael@0: : code_(code), michael@0: pool_(pool), michael@0: bufferSize_(bufferSize), michael@0: insnSize_(0), michael@0: dataSize_(0), michael@0: jumpRelocTableBytes_(0), michael@0: dataRelocTableBytes_(0), michael@0: preBarrierTableBytes_(0), michael@0: headerSize_(headerSize), michael@0: kind_(kind), michael@0: invalidated_(false) michael@0: { michael@0: MOZ_ASSERT(JSC::CodeKind(kind_) == kind); michael@0: MOZ_ASSERT(headerSize_ == headerSize); michael@0: } michael@0: michael@0: uint32_t dataOffset() const { michael@0: return insnSize_; michael@0: } michael@0: uint32_t jumpRelocTableOffset() const { michael@0: return dataOffset() + dataSize_; michael@0: } michael@0: uint32_t dataRelocTableOffset() const { michael@0: return jumpRelocTableOffset() + jumpRelocTableBytes_; michael@0: } michael@0: uint32_t preBarrierTableOffset() const { michael@0: return dataRelocTableOffset() + dataRelocTableBytes_; michael@0: } michael@0: michael@0: public: michael@0: uint8_t *raw() const { michael@0: return code_; michael@0: } michael@0: size_t instructionsSize() const { michael@0: return insnSize_; michael@0: } michael@0: void trace(JSTracer *trc); michael@0: void finalize(FreeOp *fop); michael@0: void setInvalidated() { michael@0: invalidated_ = true; michael@0: } michael@0: michael@0: void togglePreBarriers(bool enabled); michael@0: michael@0: // If this JitCode object has been, effectively, corrupted due to michael@0: // invalidation patching, then we have to remember this so we don't try and michael@0: // trace relocation entries that may now be corrupt. michael@0: bool invalidated() const { michael@0: return !!invalidated_; michael@0: } michael@0: michael@0: template T as() const { michael@0: return JS_DATA_TO_FUNC_PTR(T, raw()); michael@0: } michael@0: michael@0: void copyFrom(MacroAssembler &masm); michael@0: michael@0: static JitCode *FromExecutable(uint8_t *buffer) { michael@0: JitCode *code = *(JitCode **)(buffer - sizeof(JitCode *)); michael@0: JS_ASSERT(code->raw() == buffer); michael@0: return code; michael@0: } michael@0: michael@0: static size_t offsetOfCode() { michael@0: return offsetof(JitCode, code_); michael@0: } michael@0: michael@0: uint8_t *jumpRelocTable() { michael@0: return code_ + jumpRelocTableOffset(); michael@0: } michael@0: michael@0: // Allocates a new JitCode object which will be managed by the GC. If no michael@0: // object can be allocated, nullptr is returned. On failure, |pool| is michael@0: // automatically released, so the code may be freed. michael@0: template michael@0: static JitCode *New(JSContext *cx, uint8_t *code, uint32_t bufferSize, uint32_t headerSize, michael@0: JSC::ExecutablePool *pool, JSC::CodeKind kind); michael@0: michael@0: public: michael@0: static inline ThingRootKind rootKind() { return THING_ROOT_JIT_CODE; } michael@0: }; michael@0: michael@0: class SnapshotWriter; michael@0: class RecoverWriter; michael@0: class SafepointWriter; michael@0: class SafepointIndex; michael@0: class OsiIndex; michael@0: class IonCache; michael@0: struct PatchableBackedgeInfo; michael@0: struct CacheLocation; michael@0: michael@0: // Describes a single AsmJSModule which jumps (via an FFI exit with the given michael@0: // index) directly into an IonScript. michael@0: struct DependentAsmJSModuleExit michael@0: { michael@0: const AsmJSModule *module; michael@0: size_t exitIndex; michael@0: michael@0: DependentAsmJSModuleExit(const AsmJSModule *module, size_t exitIndex) michael@0: : module(module), michael@0: exitIndex(exitIndex) michael@0: { } michael@0: }; michael@0: michael@0: // An IonScript attaches Ion-generated information to a JSScript. michael@0: struct IonScript michael@0: { michael@0: private: michael@0: // Code pointer containing the actual method. michael@0: EncapsulatedPtr method_; michael@0: michael@0: // Deoptimization table used by this method. michael@0: EncapsulatedPtr deoptTable_; michael@0: michael@0: // Entrypoint for OSR, or nullptr. michael@0: jsbytecode *osrPc_; michael@0: michael@0: // Offset to OSR entrypoint from method_->raw(), or 0. michael@0: uint32_t osrEntryOffset_; michael@0: michael@0: // Offset to entrypoint skipping type arg check from method_->raw(). michael@0: uint32_t skipArgCheckEntryOffset_; michael@0: michael@0: // Offset of the invalidation epilogue (which pushes this IonScript michael@0: // and calls the invalidation thunk). michael@0: uint32_t invalidateEpilogueOffset_; michael@0: michael@0: // The offset immediately after the IonScript immediate. michael@0: // NOTE: technically a constant delta from michael@0: // |invalidateEpilogueOffset_|, so we could hard-code this michael@0: // per-platform if we want. michael@0: uint32_t invalidateEpilogueDataOffset_; michael@0: michael@0: // Number of times this script bailed out without invalidation. michael@0: uint32_t numBailouts_; michael@0: michael@0: // Flag set when it is likely that one of our (transitive) call michael@0: // targets is not compiled. Used in ForkJoin.cpp to decide when michael@0: // we should add call targets to the worklist. michael@0: mozilla::Atomic hasUncompiledCallTarget_; michael@0: michael@0: // Flag set when this script is used as an entry script to parallel michael@0: // execution. If this is true, then the parent JSScript must be in its michael@0: // JitCompartment's parallel entry script set. michael@0: bool isParallelEntryScript_; michael@0: michael@0: // Flag set if IonScript was compiled with SPS profiling enabled. michael@0: bool hasSPSInstrumentation_; michael@0: michael@0: // Flag for if this script is getting recompiled. michael@0: uint32_t recompiling_; michael@0: michael@0: // Any kind of data needed by the runtime, these can be either cache michael@0: // information or profiling info. michael@0: uint32_t runtimeData_; michael@0: uint32_t runtimeSize_; michael@0: michael@0: // State for polymorphic caches in the compiled code. All caches are stored michael@0: // in the runtimeData buffer and indexed by the cacheIndex which give a michael@0: // relative offset in the runtimeData array. michael@0: uint32_t cacheIndex_; michael@0: uint32_t cacheEntries_; michael@0: michael@0: // Map code displacement to safepoint / OSI-patch-delta. michael@0: uint32_t safepointIndexOffset_; michael@0: uint32_t safepointIndexEntries_; michael@0: michael@0: // Offset to and length of the safepoint table in bytes. michael@0: uint32_t safepointsStart_; michael@0: uint32_t safepointsSize_; michael@0: michael@0: // Number of bytes this function reserves on the stack. michael@0: uint32_t frameSlots_; michael@0: michael@0: // Frame size is the value that can be added to the StackPointer along michael@0: // with the frame prefix to get a valid IonJSFrameLayout. michael@0: uint32_t frameSize_; michael@0: michael@0: // Table mapping bailout IDs to snapshot offsets. michael@0: uint32_t bailoutTable_; michael@0: uint32_t bailoutEntries_; michael@0: michael@0: // Map OSI-point displacement to snapshot. michael@0: uint32_t osiIndexOffset_; michael@0: uint32_t osiIndexEntries_; michael@0: michael@0: // Offset from the start of the code buffer to its snapshot buffer. michael@0: uint32_t snapshots_; michael@0: uint32_t snapshotsListSize_; michael@0: uint32_t snapshotsRVATableSize_; michael@0: michael@0: // List of instructions needed to recover stack frames. michael@0: uint32_t recovers_; michael@0: uint32_t recoversSize_; michael@0: michael@0: // Constant table for constants stored in snapshots. michael@0: uint32_t constantTable_; michael@0: uint32_t constantEntries_; michael@0: michael@0: // List of scripts that we call. michael@0: // michael@0: // Currently this is only non-nullptr for parallel IonScripts. michael@0: uint32_t callTargetList_; michael@0: uint32_t callTargetEntries_; michael@0: michael@0: // List of patchable backedges which are threaded into the runtime's list. michael@0: uint32_t backedgeList_; michael@0: uint32_t backedgeEntries_; michael@0: michael@0: // Number of references from invalidation records. michael@0: uint32_t refcount_; michael@0: michael@0: // If this is a parallel script, the number of major GC collections it has michael@0: // been idle, otherwise 0. michael@0: // michael@0: // JSScripts with parallel IonScripts are preserved across GC if the michael@0: // parallel age is < MAX_PARALLEL_AGE. michael@0: uint32_t parallelAge_; michael@0: michael@0: // Identifier of the compilation which produced this code. michael@0: types::RecompileInfo recompileInfo_; michael@0: michael@0: // The optimization level this script was compiled in. michael@0: OptimizationLevel optimizationLevel_; michael@0: michael@0: // Number of times we tried to enter this script via OSR but failed due to michael@0: // a LOOPENTRY pc other than osrPc_. michael@0: uint32_t osrPcMismatchCounter_; michael@0: michael@0: // If non-null, the list of AsmJSModules michael@0: // that contain an optimized call directly into this IonScript. michael@0: Vector *dependentAsmJSModules; michael@0: michael@0: private: michael@0: inline uint8_t *bottomBuffer() { michael@0: return reinterpret_cast(this); michael@0: } michael@0: inline const uint8_t *bottomBuffer() const { michael@0: return reinterpret_cast(this); michael@0: } michael@0: michael@0: public: michael@0: SnapshotOffset *bailoutTable() { michael@0: return (SnapshotOffset *) &bottomBuffer()[bailoutTable_]; michael@0: } michael@0: EncapsulatedValue *constants() { michael@0: return (EncapsulatedValue *) &bottomBuffer()[constantTable_]; michael@0: } michael@0: const SafepointIndex *safepointIndices() const { michael@0: return const_cast(this)->safepointIndices(); michael@0: } michael@0: SafepointIndex *safepointIndices() { michael@0: return (SafepointIndex *) &bottomBuffer()[safepointIndexOffset_]; michael@0: } michael@0: const OsiIndex *osiIndices() const { michael@0: return const_cast(this)->osiIndices(); michael@0: } michael@0: OsiIndex *osiIndices() { michael@0: return (OsiIndex *) &bottomBuffer()[osiIndexOffset_]; michael@0: } michael@0: uint32_t *cacheIndex() { michael@0: return (uint32_t *) &bottomBuffer()[cacheIndex_]; michael@0: } michael@0: uint8_t *runtimeData() { michael@0: return &bottomBuffer()[runtimeData_]; michael@0: } michael@0: JSScript **callTargetList() { michael@0: return (JSScript **) &bottomBuffer()[callTargetList_]; michael@0: } michael@0: PatchableBackedge *backedgeList() { michael@0: return (PatchableBackedge *) &bottomBuffer()[backedgeList_]; michael@0: } michael@0: bool addDependentAsmJSModule(JSContext *cx, DependentAsmJSModuleExit exit); michael@0: void removeDependentAsmJSModule(DependentAsmJSModuleExit exit) { michael@0: if (!dependentAsmJSModules) michael@0: return; michael@0: for (size_t i = 0; i < dependentAsmJSModules->length(); i++) { michael@0: if (dependentAsmJSModules->begin()[i].module == exit.module && michael@0: dependentAsmJSModules->begin()[i].exitIndex == exit.exitIndex) michael@0: { michael@0: dependentAsmJSModules->erase(dependentAsmJSModules->begin() + i); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: private: michael@0: void trace(JSTracer *trc); michael@0: michael@0: public: michael@0: // Do not call directly, use IonScript::New. This is public for cx->new_. michael@0: IonScript(); michael@0: michael@0: static IonScript *New(JSContext *cx, types::RecompileInfo recompileInfo, michael@0: uint32_t frameLocals, uint32_t frameSize, michael@0: size_t snapshotsListSize, size_t snapshotsRVATableSize, michael@0: size_t recoversSize, size_t bailoutEntries, michael@0: size_t constants, size_t safepointIndexEntries, michael@0: size_t osiIndexEntries, size_t cacheEntries, michael@0: size_t runtimeSize, size_t safepointsSize, michael@0: size_t callTargetEntries, size_t backedgeEntries, michael@0: OptimizationLevel optimizationLevel); michael@0: static void Trace(JSTracer *trc, IonScript *script); michael@0: static void Destroy(FreeOp *fop, IonScript *script); michael@0: michael@0: static inline size_t offsetOfMethod() { michael@0: return offsetof(IonScript, method_); michael@0: } michael@0: static inline size_t offsetOfOsrEntryOffset() { michael@0: return offsetof(IonScript, osrEntryOffset_); michael@0: } michael@0: static inline size_t offsetOfSkipArgCheckEntryOffset() { michael@0: return offsetof(IonScript, skipArgCheckEntryOffset_); michael@0: } michael@0: static inline size_t offsetOfRefcount() { michael@0: return offsetof(IonScript, refcount_); michael@0: } michael@0: static inline size_t offsetOfRecompiling() { michael@0: return offsetof(IonScript, recompiling_); michael@0: } michael@0: michael@0: public: michael@0: JitCode *method() const { michael@0: return method_; michael@0: } michael@0: void setMethod(JitCode *code) { michael@0: JS_ASSERT(!invalidated()); michael@0: method_ = code; michael@0: } michael@0: void setDeoptTable(JitCode *code) { michael@0: deoptTable_ = code; michael@0: } michael@0: void setOsrPc(jsbytecode *osrPc) { michael@0: osrPc_ = osrPc; michael@0: } michael@0: jsbytecode *osrPc() const { michael@0: return osrPc_; michael@0: } michael@0: void setOsrEntryOffset(uint32_t offset) { michael@0: JS_ASSERT(!osrEntryOffset_); michael@0: osrEntryOffset_ = offset; michael@0: } michael@0: uint32_t osrEntryOffset() const { michael@0: return osrEntryOffset_; michael@0: } michael@0: void setSkipArgCheckEntryOffset(uint32_t offset) { michael@0: JS_ASSERT(!skipArgCheckEntryOffset_); michael@0: skipArgCheckEntryOffset_ = offset; michael@0: } michael@0: uint32_t getSkipArgCheckEntryOffset() const { michael@0: return skipArgCheckEntryOffset_; michael@0: } michael@0: bool containsCodeAddress(uint8_t *addr) const { michael@0: return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); michael@0: } michael@0: bool containsReturnAddress(uint8_t *addr) const { michael@0: // This accounts for an off by one error caused by the return address of a michael@0: // bailout sitting outside the range of the containing function. michael@0: return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize(); michael@0: } michael@0: void setInvalidationEpilogueOffset(uint32_t offset) { michael@0: JS_ASSERT(!invalidateEpilogueOffset_); michael@0: invalidateEpilogueOffset_ = offset; michael@0: } michael@0: uint32_t invalidateEpilogueOffset() const { michael@0: JS_ASSERT(invalidateEpilogueOffset_); michael@0: return invalidateEpilogueOffset_; michael@0: } michael@0: void setInvalidationEpilogueDataOffset(uint32_t offset) { michael@0: JS_ASSERT(!invalidateEpilogueDataOffset_); michael@0: invalidateEpilogueDataOffset_ = offset; michael@0: } michael@0: uint32_t invalidateEpilogueDataOffset() const { michael@0: JS_ASSERT(invalidateEpilogueDataOffset_); michael@0: return invalidateEpilogueDataOffset_; michael@0: } michael@0: void incNumBailouts() { michael@0: numBailouts_++; michael@0: } michael@0: uint32_t numBailouts() const { michael@0: return numBailouts_; michael@0: } michael@0: bool bailoutExpected() const { michael@0: return numBailouts_ > 0; michael@0: } michael@0: void setHasUncompiledCallTarget() { michael@0: hasUncompiledCallTarget_ = true; michael@0: } michael@0: void clearHasUncompiledCallTarget() { michael@0: hasUncompiledCallTarget_ = false; michael@0: } michael@0: bool hasUncompiledCallTarget() const { michael@0: return hasUncompiledCallTarget_; michael@0: } michael@0: void setIsParallelEntryScript() { michael@0: isParallelEntryScript_ = true; michael@0: } michael@0: bool isParallelEntryScript() const { michael@0: return isParallelEntryScript_; michael@0: } michael@0: void setHasSPSInstrumentation() { michael@0: hasSPSInstrumentation_ = true; michael@0: } michael@0: void clearHasSPSInstrumentation() { michael@0: hasSPSInstrumentation_ = false; michael@0: } michael@0: bool hasSPSInstrumentation() const { michael@0: return hasSPSInstrumentation_; michael@0: } michael@0: const uint8_t *snapshots() const { michael@0: return reinterpret_cast(this) + snapshots_; michael@0: } michael@0: size_t snapshotsListSize() const { michael@0: return snapshotsListSize_; michael@0: } michael@0: size_t snapshotsRVATableSize() const { michael@0: return snapshotsRVATableSize_; michael@0: } michael@0: const uint8_t *recovers() const { michael@0: return reinterpret_cast(this) + recovers_; michael@0: } michael@0: size_t recoversSize() const { michael@0: return recoversSize_; michael@0: } michael@0: const uint8_t *safepoints() const { michael@0: return reinterpret_cast(this) + safepointsStart_; michael@0: } michael@0: size_t safepointsSize() const { michael@0: return safepointsSize_; michael@0: } michael@0: size_t callTargetEntries() const { michael@0: return callTargetEntries_; michael@0: } michael@0: size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { michael@0: return mallocSizeOf(this); michael@0: } michael@0: EncapsulatedValue &getConstant(size_t index) { michael@0: JS_ASSERT(index < numConstants()); michael@0: return constants()[index]; michael@0: } michael@0: size_t numConstants() const { michael@0: return constantEntries_; michael@0: } michael@0: uint32_t frameSlots() const { michael@0: return frameSlots_; michael@0: } michael@0: uint32_t frameSize() const { michael@0: return frameSize_; michael@0: } michael@0: SnapshotOffset bailoutToSnapshot(uint32_t bailoutId) { michael@0: JS_ASSERT(bailoutId < bailoutEntries_); michael@0: return bailoutTable()[bailoutId]; michael@0: } michael@0: const SafepointIndex *getSafepointIndex(uint32_t disp) const; michael@0: const SafepointIndex *getSafepointIndex(uint8_t *retAddr) const { michael@0: JS_ASSERT(containsCodeAddress(retAddr)); michael@0: return getSafepointIndex(retAddr - method()->raw()); michael@0: } michael@0: const OsiIndex *getOsiIndex(uint32_t disp) const; michael@0: const OsiIndex *getOsiIndex(uint8_t *retAddr) const; michael@0: inline IonCache &getCacheFromIndex(uint32_t index) { michael@0: JS_ASSERT(index < cacheEntries_); michael@0: uint32_t offset = cacheIndex()[index]; michael@0: return getCache(offset); michael@0: } michael@0: inline IonCache &getCache(uint32_t offset) { michael@0: JS_ASSERT(offset < runtimeSize_); michael@0: return *(IonCache *) &runtimeData()[offset]; michael@0: } michael@0: size_t numCaches() const { michael@0: return cacheEntries_; michael@0: } michael@0: size_t runtimeSize() const { michael@0: return runtimeSize_; michael@0: } michael@0: CacheLocation *getCacheLocs(uint32_t locIndex) { michael@0: JS_ASSERT(locIndex < runtimeSize_); michael@0: return (CacheLocation *) &runtimeData()[locIndex]; michael@0: } michael@0: void toggleBarriers(bool enabled); michael@0: void purgeCaches(); michael@0: void destroyCaches(); michael@0: void unlinkFromRuntime(FreeOp *fop); michael@0: void copySnapshots(const SnapshotWriter *writer); michael@0: void copyRecovers(const RecoverWriter *writer); michael@0: void copyBailoutTable(const SnapshotOffset *table); michael@0: void copyConstants(const Value *vp); michael@0: void copySafepointIndices(const SafepointIndex *firstSafepointIndex, MacroAssembler &masm); michael@0: void copyOsiIndices(const OsiIndex *firstOsiIndex, MacroAssembler &masm); michael@0: void copyRuntimeData(const uint8_t *data); michael@0: void copyCacheEntries(const uint32_t *caches, MacroAssembler &masm); michael@0: void copySafepoints(const SafepointWriter *writer); michael@0: void copyCallTargetEntries(JSScript **callTargets); michael@0: void copyPatchableBackedges(JSContext *cx, JitCode *code, michael@0: PatchableBackedgeInfo *backedges); michael@0: michael@0: bool invalidated() const { michael@0: return refcount_ != 0; michael@0: } michael@0: size_t refcount() const { michael@0: return refcount_; michael@0: } michael@0: void incref() { michael@0: refcount_++; michael@0: } michael@0: void decref(FreeOp *fop) { michael@0: JS_ASSERT(refcount_); michael@0: refcount_--; michael@0: if (!refcount_) michael@0: Destroy(fop, this); michael@0: } michael@0: const types::RecompileInfo& recompileInfo() const { michael@0: return recompileInfo_; michael@0: } michael@0: types::RecompileInfo& recompileInfoRef() { michael@0: return recompileInfo_; michael@0: } michael@0: OptimizationLevel optimizationLevel() const { michael@0: return optimizationLevel_; michael@0: } michael@0: uint32_t incrOsrPcMismatchCounter() { michael@0: return ++osrPcMismatchCounter_; michael@0: } michael@0: void resetOsrPcMismatchCounter() { michael@0: osrPcMismatchCounter_ = 0; michael@0: } michael@0: michael@0: void setRecompiling() { michael@0: recompiling_ = true; michael@0: } michael@0: michael@0: bool isRecompiling() const { michael@0: return recompiling_; michael@0: } michael@0: michael@0: void clearRecompiling() { michael@0: recompiling_ = false; michael@0: } michael@0: michael@0: static const uint32_t MAX_PARALLEL_AGE = 5; michael@0: michael@0: void resetParallelAge() { michael@0: MOZ_ASSERT(isParallelEntryScript()); michael@0: parallelAge_ = 0; michael@0: } michael@0: uint32_t parallelAge() const { michael@0: return parallelAge_; michael@0: } michael@0: uint32_t increaseParallelAge() { michael@0: MOZ_ASSERT(isParallelEntryScript()); michael@0: return ++parallelAge_; michael@0: } michael@0: michael@0: static void writeBarrierPre(Zone *zone, IonScript *ionScript); michael@0: }; michael@0: michael@0: // Execution information for a basic block which may persist after the michael@0: // accompanying IonScript is destroyed, for use during profiling. michael@0: struct IonBlockCounts michael@0: { michael@0: private: michael@0: uint32_t id_; michael@0: michael@0: // Approximate bytecode in the outer (not inlined) script this block michael@0: // was generated from. michael@0: uint32_t offset_; michael@0: michael@0: // ids for successors of this block. michael@0: uint32_t numSuccessors_; michael@0: uint32_t *successors_; michael@0: michael@0: // Hit count for this block. michael@0: uint64_t hitCount_; michael@0: michael@0: // Text information about the code generated for this block. michael@0: char *code_; michael@0: michael@0: public: michael@0: michael@0: bool init(uint32_t id, uint32_t offset, uint32_t numSuccessors) { michael@0: id_ = id; michael@0: offset_ = offset; michael@0: numSuccessors_ = numSuccessors; michael@0: if (numSuccessors) { michael@0: successors_ = js_pod_calloc(numSuccessors); michael@0: if (!successors_) michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void destroy() { michael@0: js_free(successors_); michael@0: js_free(code_); michael@0: } michael@0: michael@0: uint32_t id() const { michael@0: return id_; michael@0: } michael@0: michael@0: uint32_t offset() const { michael@0: return offset_; michael@0: } michael@0: michael@0: size_t numSuccessors() const { michael@0: return numSuccessors_; michael@0: } michael@0: michael@0: void setSuccessor(size_t i, uint32_t id) { michael@0: JS_ASSERT(i < numSuccessors_); michael@0: successors_[i] = id; michael@0: } michael@0: michael@0: uint32_t successor(size_t i) const { michael@0: JS_ASSERT(i < numSuccessors_); michael@0: return successors_[i]; michael@0: } michael@0: michael@0: uint64_t *addressOfHitCount() { michael@0: return &hitCount_; michael@0: } michael@0: michael@0: uint64_t hitCount() const { michael@0: return hitCount_; michael@0: } michael@0: michael@0: void setCode(const char *code) { michael@0: char *ncode = (char *) js_malloc(strlen(code) + 1); michael@0: if (ncode) { michael@0: strcpy(ncode, code); michael@0: code_ = ncode; michael@0: } michael@0: } michael@0: michael@0: const char *code() const { michael@0: return code_; michael@0: } michael@0: }; michael@0: michael@0: // Execution information for a compiled script which may persist after the michael@0: // IonScript is destroyed, for use during profiling. michael@0: struct IonScriptCounts michael@0: { michael@0: private: michael@0: // Any previous invalidated compilation(s) for the script. michael@0: IonScriptCounts *previous_; michael@0: michael@0: // Information about basic blocks in this script. michael@0: size_t numBlocks_; michael@0: IonBlockCounts *blocks_; michael@0: michael@0: public: michael@0: michael@0: IonScriptCounts() { michael@0: mozilla::PodZero(this); michael@0: } michael@0: michael@0: ~IonScriptCounts() { michael@0: for (size_t i = 0; i < numBlocks_; i++) michael@0: blocks_[i].destroy(); michael@0: js_free(blocks_); michael@0: js_delete(previous_); michael@0: } michael@0: michael@0: bool init(size_t numBlocks) { michael@0: numBlocks_ = numBlocks; michael@0: blocks_ = js_pod_calloc(numBlocks); michael@0: return blocks_ != nullptr; michael@0: } michael@0: michael@0: size_t numBlocks() const { michael@0: return numBlocks_; michael@0: } michael@0: michael@0: IonBlockCounts &block(size_t i) { michael@0: JS_ASSERT(i < numBlocks_); michael@0: return blocks_[i]; michael@0: } michael@0: michael@0: void setPrevious(IonScriptCounts *previous) { michael@0: previous_ = previous; michael@0: } michael@0: michael@0: IonScriptCounts *previous() const { michael@0: return previous_; michael@0: } michael@0: }; michael@0: michael@0: struct VMFunction; michael@0: michael@0: class JitCompartment; michael@0: class JitRuntime; michael@0: michael@0: struct AutoFlushICache michael@0: { michael@0: private: michael@0: uintptr_t start_; michael@0: uintptr_t stop_; michael@0: const char *name_; michael@0: bool inhibit_; michael@0: AutoFlushICache *prev_; michael@0: michael@0: public: michael@0: static void setRange(uintptr_t p, size_t len); michael@0: static void flush(uintptr_t p, size_t len); michael@0: static void setInhibit(); michael@0: ~AutoFlushICache(); michael@0: AutoFlushICache(const char *nonce, bool inhibit=false); michael@0: }; michael@0: michael@0: } // namespace jit michael@0: michael@0: namespace gc { michael@0: michael@0: inline bool michael@0: IsMarked(const jit::VMFunction *) michael@0: { michael@0: // VMFunction are only static objects which are used by WeakMaps as keys. michael@0: // It is considered as a root object which is always marked. michael@0: return true; michael@0: } michael@0: michael@0: } // namespace gc michael@0: michael@0: } // namespace js michael@0: michael@0: #endif /* jit_IonCode_h */