michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_shared_CodeGenerator_shared_h michael@0: #define jit_shared_CodeGenerator_shared_h michael@0: michael@0: #include "mozilla/Alignment.h" michael@0: michael@0: #include "jit/IonFrames.h" michael@0: #include "jit/IonMacroAssembler.h" michael@0: #include "jit/LIR.h" michael@0: #include "jit/MIRGenerator.h" michael@0: #include "jit/MIRGraph.h" michael@0: #include "jit/Safepoints.h" michael@0: #include "jit/Snapshots.h" michael@0: #include "jit/VMFunctions.h" michael@0: #include "vm/ForkJoin.h" michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: class OutOfLineCode; michael@0: class CodeGenerator; michael@0: class MacroAssembler; michael@0: class IonCache; michael@0: class OutOfLineAbortPar; michael@0: class OutOfLinePropagateAbortPar; michael@0: michael@0: template michael@0: class OutOfLineCallVM; michael@0: michael@0: class OutOfLineTruncateSlow; michael@0: michael@0: struct PatchableBackedgeInfo michael@0: { michael@0: CodeOffsetJump backedge; michael@0: Label *loopHeader; michael@0: Label *interruptCheck; michael@0: michael@0: PatchableBackedgeInfo(CodeOffsetJump backedge, Label *loopHeader, Label *interruptCheck) michael@0: : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck) michael@0: {} michael@0: }; michael@0: michael@0: struct ReciprocalMulConstants { michael@0: int32_t multiplier; michael@0: int32_t shiftAmount; michael@0: }; michael@0: michael@0: class CodeGeneratorShared : public LInstructionVisitor michael@0: { michael@0: js::Vector outOfLineCode_; michael@0: OutOfLineCode *oolIns; michael@0: michael@0: MacroAssembler &ensureMasm(MacroAssembler *masm); michael@0: mozilla::Maybe maybeMasm_; michael@0: michael@0: public: michael@0: MacroAssembler &masm; michael@0: michael@0: protected: michael@0: MIRGenerator *gen; michael@0: LIRGraph &graph; michael@0: LBlock *current; michael@0: SnapshotWriter snapshots_; michael@0: RecoverWriter recovers_; michael@0: JitCode *deoptTable_; michael@0: #ifdef DEBUG michael@0: uint32_t pushedArgs_; michael@0: #endif michael@0: uint32_t lastOsiPointOffset_; michael@0: SafepointWriter safepoints_; michael@0: Label invalidate_; michael@0: CodeOffsetLabel invalidateEpilogueData_; michael@0: michael@0: js::Vector safepointIndices_; michael@0: js::Vector osiIndices_; michael@0: michael@0: // Mapping from bailout table ID to an offset in the snapshot buffer. michael@0: js::Vector bailouts_; michael@0: michael@0: // Allocated data space needed at runtime. michael@0: js::Vector runtimeData_; michael@0: michael@0: // Vector of information about generated polymorphic inline caches. michael@0: js::Vector cacheList_; michael@0: michael@0: // List of stack slots that have been pushed as arguments to an MCall. michael@0: js::Vector pushedArgumentSlots_; michael@0: michael@0: // Patchable backedges generated for loops. michael@0: Vector patchableBackedges_; michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: js::Vector patchableTraceLoggers_; michael@0: js::Vector patchableTLScripts_; michael@0: #endif michael@0: michael@0: // When profiling is enabled, this is the instrumentation manager which michael@0: // maintains state of what script is currently being generated (for inline michael@0: // scripts) and when instrumentation needs to be emitted or skipped. michael@0: IonInstrumentation sps_; michael@0: michael@0: protected: michael@0: // The offset of the first instruction of the OSR entry block from the michael@0: // beginning of the code buffer. michael@0: size_t osrEntryOffset_; michael@0: michael@0: TempAllocator &alloc() const { michael@0: return graph.mir().alloc(); michael@0: } michael@0: michael@0: inline void setOsrEntryOffset(size_t offset) { michael@0: JS_ASSERT(osrEntryOffset_ == 0); michael@0: osrEntryOffset_ = offset; michael@0: } michael@0: inline size_t getOsrEntryOffset() const { michael@0: return osrEntryOffset_; michael@0: } michael@0: michael@0: // The offset of the first instruction of the body. michael@0: // This skips the arguments type checks. michael@0: size_t skipArgCheckEntryOffset_; michael@0: michael@0: inline void setSkipArgCheckEntryOffset(size_t offset) { michael@0: JS_ASSERT(skipArgCheckEntryOffset_ == 0); michael@0: skipArgCheckEntryOffset_ = offset; michael@0: } michael@0: inline size_t getSkipArgCheckEntryOffset() const { michael@0: return skipArgCheckEntryOffset_; michael@0: } michael@0: michael@0: typedef js::Vector SafepointIndices; michael@0: michael@0: bool markArgumentSlots(LSafepoint *safepoint); michael@0: void dropArguments(unsigned argc); michael@0: michael@0: protected: michael@0: // The initial size of the frame in bytes. These are bytes beyond the michael@0: // constant header present for every Ion frame, used for pre-determined michael@0: // spills. michael@0: int32_t frameDepth_; michael@0: michael@0: // Frame class this frame's size falls into (see IonFrame.h). michael@0: FrameSizeClass frameClass_; michael@0: michael@0: // For arguments to the current function. michael@0: inline int32_t ArgToStackOffset(int32_t slot) const { michael@0: return masm.framePushed() + michael@0: (gen->compilingAsmJS() ? NativeFrameSize : sizeof(IonJSFrameLayout)) + michael@0: slot; michael@0: } michael@0: michael@0: // For the callee of the current function. michael@0: inline int32_t CalleeStackOffset() const { michael@0: return masm.framePushed() + IonJSFrameLayout::offsetOfCalleeToken(); michael@0: } michael@0: michael@0: inline int32_t SlotToStackOffset(int32_t slot) const { michael@0: JS_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount())); michael@0: int32_t offset = masm.framePushed() - slot; michael@0: JS_ASSERT(offset >= 0); michael@0: return offset; michael@0: } michael@0: inline int32_t StackOffsetToSlot(int32_t offset) const { michael@0: // See: SlotToStackOffset. This is used to convert pushed arguments michael@0: // to a slot index that safepoints can use. michael@0: // michael@0: // offset = framePushed - slot michael@0: // offset + slot = framePushed michael@0: // slot = framePushed - offset michael@0: return masm.framePushed() - offset; michael@0: } michael@0: michael@0: // For argument construction for calls. Argslots are Value-sized. michael@0: inline int32_t StackOffsetOfPassedArg(int32_t slot) const { michael@0: // A slot of 0 is permitted only to calculate %esp offset for calls. michael@0: JS_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount())); michael@0: int32_t offset = masm.framePushed() - michael@0: graph.paddedLocalSlotsSize() - michael@0: (slot * sizeof(Value)); michael@0: michael@0: // Passed arguments go below A function's local stack storage. michael@0: // When arguments are being pushed, there is nothing important on the stack. michael@0: // Therefore, It is safe to push the arguments down arbitrarily. Pushing michael@0: // by sizeof(Value) is desirable since everything on the stack is a Value. michael@0: // Note that paddedLocalSlotCount() aligns to at least a Value boundary michael@0: // specifically to support this. michael@0: JS_ASSERT(offset >= 0); michael@0: JS_ASSERT(offset % sizeof(Value) == 0); michael@0: return offset; michael@0: } michael@0: michael@0: inline int32_t ToStackOffset(const LAllocation *a) const { michael@0: if (a->isArgument()) michael@0: return ArgToStackOffset(a->toArgument()->index()); michael@0: return SlotToStackOffset(a->toStackSlot()->slot()); michael@0: } michael@0: michael@0: uint32_t frameSize() const { michael@0: return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize(); michael@0: } michael@0: michael@0: protected: michael@0: // Ensure the cache is an IonCache while expecting the size of the derived michael@0: // class. We only need the cache list at GC time. Everyone else can just take michael@0: // runtimeData offsets. michael@0: size_t allocateCache(const IonCache &, size_t size) { michael@0: size_t dataOffset = allocateData(size); michael@0: masm.propagateOOM(cacheList_.append(dataOffset)); michael@0: return dataOffset; michael@0: } michael@0: michael@0: #ifdef CHECK_OSIPOINT_REGISTERS michael@0: void resetOsiPointRegs(LSafepoint *safepoint); michael@0: bool shouldVerifyOsiPointRegs(LSafepoint *safepoint); michael@0: void verifyOsiPointRegs(LSafepoint *safepoint); michael@0: #endif michael@0: michael@0: public: michael@0: michael@0: // When appending to runtimeData_, the vector might realloc, leaving pointers michael@0: // int the origianl vector stale and unusable. DataPtr acts like a pointer, michael@0: // but allows safety in the face of potentially realloc'ing vector appends. michael@0: friend class DataPtr; michael@0: template michael@0: class DataPtr michael@0: { michael@0: CodeGeneratorShared *cg_; michael@0: size_t index_; michael@0: michael@0: T *lookup() { michael@0: return reinterpret_cast(&cg_->runtimeData_[index_]); michael@0: } michael@0: public: michael@0: DataPtr(CodeGeneratorShared *cg, size_t index) michael@0: : cg_(cg), index_(index) { } michael@0: michael@0: T * operator ->() { michael@0: return lookup(); michael@0: } michael@0: T * operator *() { michael@0: return lookup(); michael@0: } michael@0: }; michael@0: michael@0: protected: michael@0: michael@0: size_t allocateData(size_t size) { michael@0: JS_ASSERT(size % sizeof(void *) == 0); michael@0: size_t dataOffset = runtimeData_.length(); michael@0: masm.propagateOOM(runtimeData_.appendN(0, size)); michael@0: return dataOffset; michael@0: } michael@0: michael@0: template michael@0: inline size_t allocateCache(const T &cache) { michael@0: size_t index = allocateCache(cache, sizeof(mozilla::AlignedStorage2)); michael@0: if (masm.oom()) michael@0: return SIZE_MAX; michael@0: // Use the copy constructor on the allocated space. michael@0: JS_ASSERT(index == cacheList_.back()); michael@0: new (&runtimeData_[index]) T(cache); michael@0: return index; michael@0: } michael@0: michael@0: protected: michael@0: // Encodes an LSnapshot into the compressed snapshot buffer, returning michael@0: // false on failure. michael@0: bool encode(LRecoverInfo *recover); michael@0: bool encode(LSnapshot *snapshot); michael@0: bool encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint, uint32_t *startIndex); michael@0: michael@0: // Attempts to assign a BailoutId to a snapshot, if one isn't already set. michael@0: // If the bailout table is full, this returns false, which is not a fatal michael@0: // error (the code generator may use a slower bailout mechanism). michael@0: bool assignBailoutId(LSnapshot *snapshot); michael@0: michael@0: // Encode all encountered safepoints in CG-order, and resolve |indices| for michael@0: // safepoint offsets. michael@0: void encodeSafepoints(); michael@0: michael@0: // Mark the safepoint on |ins| as corresponding to the current assembler location. michael@0: // The location should be just after a call. michael@0: bool markSafepoint(LInstruction *ins); michael@0: bool markSafepointAt(uint32_t offset, LInstruction *ins); michael@0: michael@0: // Mark the OSI point |ins| as corresponding to the current michael@0: // assembler location inside the |osiIndices_|. Return the assembler michael@0: // location for the OSI point return location within michael@0: // |returnPointOffset|. michael@0: bool markOsiPoint(LOsiPoint *ins, uint32_t *returnPointOffset); michael@0: michael@0: // Ensure that there is enough room between the last OSI point and the michael@0: // current instruction, such that: michael@0: // (1) Invalidation will not overwrite the current instruction, and michael@0: // (2) Overwriting the current instruction will not overwrite michael@0: // an invalidation marker. michael@0: void ensureOsiSpace(); michael@0: michael@0: OutOfLineCode *oolTruncateDouble(const FloatRegister &src, const Register &dest); michael@0: bool emitTruncateDouble(const FloatRegister &src, const Register &dest); michael@0: bool emitTruncateFloat32(const FloatRegister &src, const Register &dest); michael@0: michael@0: void emitPreBarrier(Register base, const LAllocation *index, MIRType type); michael@0: void emitPreBarrier(Address address, MIRType type); michael@0: michael@0: inline bool isNextBlock(LBlock *block) { michael@0: return current->mir()->id() + 1 == block->mir()->id(); michael@0: } michael@0: michael@0: public: michael@0: // Save and restore all volatile registers to/from the stack, excluding the michael@0: // specified register(s), before a function call made using callWithABI and michael@0: // after storing the function call's return value to an output register. michael@0: // (The only registers that don't need to be saved/restored are 1) the michael@0: // temporary register used to store the return value of the function call, michael@0: // if there is one [otherwise that stored value would be overwritten]; and michael@0: // 2) temporary registers whose values aren't needed in the rest of the LIR michael@0: // instruction [this is purely an optimization]. All other volatiles must michael@0: // be saved and restored in case future LIR instructions need those values.) michael@0: void saveVolatile(Register output) { michael@0: RegisterSet regs = RegisterSet::Volatile(); michael@0: regs.takeUnchecked(output); michael@0: masm.PushRegsInMask(regs); michael@0: } michael@0: void restoreVolatile(Register output) { michael@0: RegisterSet regs = RegisterSet::Volatile(); michael@0: regs.takeUnchecked(output); michael@0: masm.PopRegsInMask(regs); michael@0: } michael@0: void saveVolatile(FloatRegister output) { michael@0: RegisterSet regs = RegisterSet::Volatile(); michael@0: regs.takeUnchecked(output); michael@0: masm.PushRegsInMask(regs); michael@0: } michael@0: void restoreVolatile(FloatRegister output) { michael@0: RegisterSet regs = RegisterSet::Volatile(); michael@0: regs.takeUnchecked(output); michael@0: masm.PopRegsInMask(regs); michael@0: } michael@0: void saveVolatile(RegisterSet temps) { michael@0: masm.PushRegsInMask(RegisterSet::VolatileNot(temps)); michael@0: } michael@0: void restoreVolatile(RegisterSet temps) { michael@0: masm.PopRegsInMask(RegisterSet::VolatileNot(temps)); michael@0: } michael@0: void saveVolatile() { michael@0: masm.PushRegsInMask(RegisterSet::Volatile()); michael@0: } michael@0: void restoreVolatile() { michael@0: masm.PopRegsInMask(RegisterSet::Volatile()); michael@0: } michael@0: michael@0: // These functions have to be called before and after any callVM and before michael@0: // any modifications of the stack. Modification of the stack made after michael@0: // these calls should update the framePushed variable, needed by the exit michael@0: // frame produced by callVM. michael@0: inline void saveLive(LInstruction *ins); michael@0: inline void restoreLive(LInstruction *ins); michael@0: inline void restoreLiveIgnore(LInstruction *ins, RegisterSet reg); michael@0: michael@0: // Save/restore all registers that are both live and volatile. michael@0: inline void saveLiveVolatile(LInstruction *ins); michael@0: inline void restoreLiveVolatile(LInstruction *ins); michael@0: michael@0: template michael@0: void pushArg(const T &t) { michael@0: masm.Push(t); michael@0: #ifdef DEBUG michael@0: pushedArgs_++; michael@0: #endif michael@0: } michael@0: michael@0: void storeResultTo(const Register ®) { michael@0: masm.storeCallResult(reg); michael@0: } michael@0: michael@0: void storeFloatResultTo(const FloatRegister ®) { michael@0: masm.storeCallFloatResult(reg); michael@0: } michael@0: michael@0: template michael@0: void storeResultValueTo(const T &t) { michael@0: masm.storeCallResultValue(t); michael@0: } michael@0: michael@0: bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = nullptr); michael@0: michael@0: template michael@0: inline OutOfLineCode *oolCallVM(const VMFunction &fun, LInstruction *ins, const ArgSeq &args, michael@0: const StoreOutputTo &out); michael@0: michael@0: bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = nullptr) { michael@0: return callVM(f[gen->info().executionMode()], ins, dynStack); michael@0: } michael@0: michael@0: template michael@0: inline OutOfLineCode *oolCallVM(const VMFunctionsModal &f, LInstruction *ins, michael@0: const ArgSeq &args, const StoreOutputTo &out) michael@0: { michael@0: return oolCallVM(f[gen->info().executionMode()], ins, args, out); michael@0: } michael@0: michael@0: bool addCache(LInstruction *lir, size_t cacheIndex); michael@0: size_t addCacheLocations(const CacheLocationList &locs, size_t *numLocs); michael@0: ReciprocalMulConstants computeDivisionConstants(int d); michael@0: michael@0: protected: michael@0: bool addOutOfLineCode(OutOfLineCode *code); michael@0: bool hasOutOfLineCode() { return !outOfLineCode_.empty(); } michael@0: bool generateOutOfLineCode(); michael@0: michael@0: Label *labelForBackedgeWithImplicitCheck(MBasicBlock *mir); michael@0: michael@0: // Generate a jump to the start of the specified block, adding information michael@0: // if this is a loop backedge. Use this in place of jumping directly to michael@0: // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use michael@0: // directly is needed. michael@0: void jumpToBlock(MBasicBlock *mir); michael@0: void jumpToBlock(MBasicBlock *mir, Assembler::Condition cond); michael@0: michael@0: private: michael@0: void generateInvalidateEpilogue(); michael@0: michael@0: public: michael@0: CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm); michael@0: michael@0: public: michael@0: template michael@0: bool visitOutOfLineCallVM(OutOfLineCallVM *ool); michael@0: michael@0: bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool); michael@0: michael@0: bool omitOverRecursedCheck() const; michael@0: michael@0: public: michael@0: bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = nullptr); michael@0: michael@0: // Parallel aborts: michael@0: // michael@0: // Parallel aborts work somewhat differently from sequential michael@0: // bailouts. When an abort occurs, we first invoke michael@0: // ReportAbortPar() and then we return JS_ION_ERROR. Each michael@0: // call on the stack will check for this error return and michael@0: // propagate it upwards until the C++ code that invoked the ion michael@0: // code is reached. michael@0: // michael@0: // The snapshot that is provided to `oolAbortPar` is currently michael@0: // only used for error reporting, so that we can provide feedback michael@0: // to the user about which instruction aborted and (perhaps) why. michael@0: OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, michael@0: jsbytecode *bytecode); michael@0: OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, LInstruction *lir); michael@0: OutOfLinePropagateAbortPar *oolPropagateAbortPar(LInstruction *lir); michael@0: virtual bool visitOutOfLineAbortPar(OutOfLineAbortPar *ool) = 0; michael@0: virtual bool visitOutOfLinePropagateAbortPar(OutOfLinePropagateAbortPar *ool) = 0; michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: protected: michael@0: bool emitTracelogScript(bool isStart); michael@0: bool emitTracelogTree(bool isStart, uint32_t textId); michael@0: michael@0: public: michael@0: bool emitTracelogScriptStart() { michael@0: return emitTracelogScript(/* isStart =*/ true); michael@0: } michael@0: bool emitTracelogScriptStop() { michael@0: return emitTracelogScript(/* isStart =*/ false); michael@0: } michael@0: bool emitTracelogStartEvent(uint32_t textId) { michael@0: return emitTracelogTree(/* isStart =*/ true, textId); michael@0: } michael@0: bool emitTracelogStopEvent(uint32_t textId) { michael@0: return emitTracelogTree(/* isStart =*/ false, textId); michael@0: } michael@0: #endif michael@0: }; michael@0: michael@0: // An out-of-line path is generated at the end of the function. michael@0: class OutOfLineCode : public TempObject michael@0: { michael@0: Label entry_; michael@0: Label rejoin_; michael@0: uint32_t framePushed_; michael@0: jsbytecode *pc_; michael@0: JSScript *script_; michael@0: michael@0: public: michael@0: OutOfLineCode() michael@0: : framePushed_(0), michael@0: pc_(nullptr), michael@0: script_(nullptr) michael@0: { } michael@0: michael@0: virtual bool generate(CodeGeneratorShared *codegen) = 0; michael@0: michael@0: Label *entry() { michael@0: return &entry_; michael@0: } michael@0: virtual void bind(MacroAssembler *masm) { michael@0: masm->bind(entry()); michael@0: } michael@0: Label *rejoin() { michael@0: return &rejoin_; michael@0: } michael@0: void setFramePushed(uint32_t framePushed) { michael@0: framePushed_ = framePushed; michael@0: } michael@0: uint32_t framePushed() const { michael@0: return framePushed_; michael@0: } michael@0: void setSource(JSScript *script, jsbytecode *pc) { michael@0: script_ = script; michael@0: pc_ = pc; michael@0: } michael@0: jsbytecode *pc() { michael@0: return pc_; michael@0: } michael@0: JSScript *script() { michael@0: return script_; michael@0: } michael@0: }; michael@0: michael@0: // For OOL paths that want a specific-typed code generator. michael@0: template michael@0: class OutOfLineCodeBase : public OutOfLineCode michael@0: { michael@0: public: michael@0: virtual bool generate(CodeGeneratorShared *codegen) { michael@0: return accept(static_cast(codegen)); michael@0: } michael@0: michael@0: public: michael@0: virtual bool accept(T *codegen) = 0; michael@0: }; michael@0: michael@0: // ArgSeq store arguments for OutOfLineCallVM. michael@0: // michael@0: // OutOfLineCallVM are created with "oolCallVM" function. The third argument of michael@0: // this function is an instance of a class which provides a "generate" function michael@0: // to call the "pushArg" needed by the VMFunction call. The list of argument michael@0: // can be created by using the ArgList function which create an empty list of michael@0: // arguments. Arguments are added to this list by using the comma operator. michael@0: // The type of the argument list is returned by the comma operator, and due to michael@0: // templates arguments, it is quite painful to write by hand. It is recommended michael@0: // to use it directly as argument of a template function which would get its michael@0: // arguments infered by the compiler (such as oolCallVM). The list of arguments michael@0: // must be written in the same order as if you were calling the function in C++. michael@0: // michael@0: // Example: michael@0: // (ArgList(), ToRegister(lir->lhs()), ToRegister(lir->rhs())) michael@0: michael@0: template michael@0: class ArgSeq : public SeqType michael@0: { michael@0: private: michael@0: typedef ArgSeq ThisType; michael@0: LastType last_; michael@0: michael@0: public: michael@0: ArgSeq(const SeqType &seq, const LastType &last) michael@0: : SeqType(seq), michael@0: last_(last) michael@0: { } michael@0: michael@0: template michael@0: inline ArgSeq michael@0: operator, (const NextType &last) const { michael@0: return ArgSeq(*this, last); michael@0: } michael@0: michael@0: inline void generate(CodeGeneratorShared *codegen) const { michael@0: codegen->pushArg(last_); michael@0: this->SeqType::generate(codegen); michael@0: } michael@0: }; michael@0: michael@0: // Mark the end of an argument list. michael@0: template <> michael@0: class ArgSeq michael@0: { michael@0: private: michael@0: typedef ArgSeq ThisType; michael@0: michael@0: public: michael@0: ArgSeq() { } michael@0: ArgSeq(const ThisType &) { } michael@0: michael@0: template michael@0: inline ArgSeq michael@0: operator, (const NextType &last) const { michael@0: return ArgSeq(*this, last); michael@0: } michael@0: michael@0: inline void generate(CodeGeneratorShared *codegen) const { michael@0: } michael@0: }; michael@0: michael@0: inline ArgSeq michael@0: ArgList() michael@0: { michael@0: return ArgSeq(); michael@0: } michael@0: michael@0: // Store wrappers, to generate the right move of data after the VM call. michael@0: michael@0: struct StoreNothing michael@0: { michael@0: inline void generate(CodeGeneratorShared *codegen) const { michael@0: } michael@0: inline RegisterSet clobbered() const { michael@0: return RegisterSet(); // No register gets clobbered michael@0: } michael@0: }; michael@0: michael@0: class StoreRegisterTo michael@0: { michael@0: private: michael@0: Register out_; michael@0: michael@0: public: michael@0: StoreRegisterTo(const Register &out) michael@0: : out_(out) michael@0: { } michael@0: michael@0: inline void generate(CodeGeneratorShared *codegen) const { michael@0: codegen->storeResultTo(out_); michael@0: } michael@0: inline RegisterSet clobbered() const { michael@0: RegisterSet set = RegisterSet(); michael@0: set.add(out_); michael@0: return set; michael@0: } michael@0: }; michael@0: michael@0: class StoreFloatRegisterTo michael@0: { michael@0: private: michael@0: FloatRegister out_; michael@0: michael@0: public: michael@0: StoreFloatRegisterTo(const FloatRegister &out) michael@0: : out_(out) michael@0: { } michael@0: michael@0: inline void generate(CodeGeneratorShared *codegen) const { michael@0: codegen->storeFloatResultTo(out_); michael@0: } michael@0: inline RegisterSet clobbered() const { michael@0: RegisterSet set = RegisterSet(); michael@0: set.add(out_); michael@0: return set; michael@0: } michael@0: }; michael@0: michael@0: template michael@0: class StoreValueTo_ michael@0: { michael@0: private: michael@0: Output out_; michael@0: michael@0: public: michael@0: StoreValueTo_(const Output &out) michael@0: : out_(out) michael@0: { } michael@0: michael@0: inline void generate(CodeGeneratorShared *codegen) const { michael@0: codegen->storeResultValueTo(out_); michael@0: } michael@0: inline RegisterSet clobbered() const { michael@0: RegisterSet set = RegisterSet(); michael@0: set.add(out_); michael@0: return set; michael@0: } michael@0: }; michael@0: michael@0: template michael@0: StoreValueTo_ StoreValueTo(const Output &out) michael@0: { michael@0: return StoreValueTo_(out); michael@0: } michael@0: michael@0: template michael@0: class OutOfLineCallVM : public OutOfLineCodeBase michael@0: { michael@0: private: michael@0: LInstruction *lir_; michael@0: const VMFunction &fun_; michael@0: ArgSeq args_; michael@0: StoreOutputTo out_; michael@0: michael@0: public: michael@0: OutOfLineCallVM(LInstruction *lir, const VMFunction &fun, const ArgSeq &args, michael@0: const StoreOutputTo &out) michael@0: : lir_(lir), michael@0: fun_(fun), michael@0: args_(args), michael@0: out_(out) michael@0: { } michael@0: michael@0: bool accept(CodeGeneratorShared *codegen) { michael@0: return codegen->visitOutOfLineCallVM(this); michael@0: } michael@0: michael@0: LInstruction *lir() const { return lir_; } michael@0: const VMFunction &function() const { return fun_; } michael@0: const ArgSeq &args() const { return args_; } michael@0: const StoreOutputTo &out() const { return out_; } michael@0: }; michael@0: michael@0: template michael@0: inline OutOfLineCode * michael@0: CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const ArgSeq &args, michael@0: const StoreOutputTo &out) michael@0: { michael@0: OutOfLineCode *ool = new(alloc()) OutOfLineCallVM(lir, fun, args, out); michael@0: if (!addOutOfLineCode(ool)) michael@0: return nullptr; michael@0: return ool; michael@0: } michael@0: michael@0: template michael@0: bool michael@0: CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM *ool) michael@0: { michael@0: LInstruction *lir = ool->lir(); michael@0: michael@0: saveLive(lir); michael@0: ool->args().generate(this); michael@0: if (!callVM(ool->function(), lir)) michael@0: return false; michael@0: ool->out().generate(this); michael@0: restoreLiveIgnore(lir, ool->out().clobbered()); michael@0: masm.jump(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: // Initiate a parallel abort. The snapshot is used to record the michael@0: // cause. michael@0: class OutOfLineAbortPar : public OutOfLineCode michael@0: { michael@0: private: michael@0: ParallelBailoutCause cause_; michael@0: MBasicBlock *basicBlock_; michael@0: jsbytecode *bytecode_; michael@0: michael@0: public: michael@0: OutOfLineAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, jsbytecode *bytecode) michael@0: : cause_(cause), michael@0: basicBlock_(basicBlock), michael@0: bytecode_(bytecode) michael@0: { } michael@0: michael@0: ParallelBailoutCause cause() { michael@0: return cause_; michael@0: } michael@0: michael@0: MBasicBlock *basicBlock() { michael@0: return basicBlock_; michael@0: } michael@0: michael@0: jsbytecode *bytecode() { michael@0: return bytecode_; michael@0: } michael@0: michael@0: bool generate(CodeGeneratorShared *codegen); michael@0: }; michael@0: michael@0: // Used when some callee has aborted. michael@0: class OutOfLinePropagateAbortPar : public OutOfLineCode michael@0: { michael@0: private: michael@0: LInstruction *lir_; michael@0: michael@0: public: michael@0: OutOfLinePropagateAbortPar(LInstruction *lir) michael@0: : lir_(lir) michael@0: { } michael@0: michael@0: LInstruction *lir() { return lir_; } michael@0: michael@0: bool generate(CodeGeneratorShared *codegen); michael@0: }; michael@0: michael@0: extern const VMFunction InterruptCheckInfo; michael@0: michael@0: } // namespace jit michael@0: } // namespace js michael@0: michael@0: #endif /* jit_shared_CodeGenerator_shared_h */