js/src/jit/shared/CodeGenerator-shared.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #ifndef jit_shared_CodeGenerator_shared_h
     8 #define jit_shared_CodeGenerator_shared_h
    10 #include "mozilla/Alignment.h"
    12 #include "jit/IonFrames.h"
    13 #include "jit/IonMacroAssembler.h"
    14 #include "jit/LIR.h"
    15 #include "jit/MIRGenerator.h"
    16 #include "jit/MIRGraph.h"
    17 #include "jit/Safepoints.h"
    18 #include "jit/Snapshots.h"
    19 #include "jit/VMFunctions.h"
    20 #include "vm/ForkJoin.h"
    22 namespace js {
    23 namespace jit {
    25 class OutOfLineCode;
    26 class CodeGenerator;
    27 class MacroAssembler;
    28 class IonCache;
    29 class OutOfLineAbortPar;
    30 class OutOfLinePropagateAbortPar;
    32 template <class ArgSeq, class StoreOutputTo>
    33 class OutOfLineCallVM;
    35 class OutOfLineTruncateSlow;
    37 struct PatchableBackedgeInfo
    38 {
    39     CodeOffsetJump backedge;
    40     Label *loopHeader;
    41     Label *interruptCheck;
    43     PatchableBackedgeInfo(CodeOffsetJump backedge, Label *loopHeader, Label *interruptCheck)
    44       : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
    45     {}
    46 };
    48 struct ReciprocalMulConstants {
    49     int32_t multiplier;
    50     int32_t shiftAmount;
    51 };
    53 class CodeGeneratorShared : public LInstructionVisitor
    54 {
    55     js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_;
    56     OutOfLineCode *oolIns;
    58     MacroAssembler &ensureMasm(MacroAssembler *masm);
    59     mozilla::Maybe<MacroAssembler> maybeMasm_;
    61   public:
    62     MacroAssembler &masm;
    64   protected:
    65     MIRGenerator *gen;
    66     LIRGraph &graph;
    67     LBlock *current;
    68     SnapshotWriter snapshots_;
    69     RecoverWriter recovers_;
    70     JitCode *deoptTable_;
    71 #ifdef DEBUG
    72     uint32_t pushedArgs_;
    73 #endif
    74     uint32_t lastOsiPointOffset_;
    75     SafepointWriter safepoints_;
    76     Label invalidate_;
    77     CodeOffsetLabel invalidateEpilogueData_;
    79     js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
    80     js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
    82     // Mapping from bailout table ID to an offset in the snapshot buffer.
    83     js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_;
    85     // Allocated data space needed at runtime.
    86     js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
    88     // Vector of information about generated polymorphic inline caches.
    89     js::Vector<uint32_t, 0, SystemAllocPolicy> cacheList_;
    91     // List of stack slots that have been pushed as arguments to an MCall.
    92     js::Vector<uint32_t, 0, SystemAllocPolicy> pushedArgumentSlots_;
    94     // Patchable backedges generated for loops.
    95     Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_;
    97 #ifdef JS_TRACE_LOGGING
    98     js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTraceLoggers_;
    99     js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTLScripts_;
   100 #endif
   102     // When profiling is enabled, this is the instrumentation manager which
   103     // maintains state of what script is currently being generated (for inline
   104     // scripts) and when instrumentation needs to be emitted or skipped.
   105     IonInstrumentation sps_;
   107   protected:
   108     // The offset of the first instruction of the OSR entry block from the
   109     // beginning of the code buffer.
   110     size_t osrEntryOffset_;
   112     TempAllocator &alloc() const {
   113         return graph.mir().alloc();
   114     }
   116     inline void setOsrEntryOffset(size_t offset) {
   117         JS_ASSERT(osrEntryOffset_ == 0);
   118         osrEntryOffset_ = offset;
   119     }
   120     inline size_t getOsrEntryOffset() const {
   121         return osrEntryOffset_;
   122     }
   124     // The offset of the first instruction of the body.
   125     // This skips the arguments type checks.
   126     size_t skipArgCheckEntryOffset_;
   128     inline void setSkipArgCheckEntryOffset(size_t offset) {
   129         JS_ASSERT(skipArgCheckEntryOffset_ == 0);
   130         skipArgCheckEntryOffset_ = offset;
   131     }
   132     inline size_t getSkipArgCheckEntryOffset() const {
   133         return skipArgCheckEntryOffset_;
   134     }
   136     typedef js::Vector<SafepointIndex, 8, SystemAllocPolicy> SafepointIndices;
   138     bool markArgumentSlots(LSafepoint *safepoint);
   139     void dropArguments(unsigned argc);
   141   protected:
   142     // The initial size of the frame in bytes. These are bytes beyond the
   143     // constant header present for every Ion frame, used for pre-determined
   144     // spills.
   145     int32_t frameDepth_;
   147     // Frame class this frame's size falls into (see IonFrame.h).
   148     FrameSizeClass frameClass_;
   150     // For arguments to the current function.
   151     inline int32_t ArgToStackOffset(int32_t slot) const {
   152         return masm.framePushed() +
   153                (gen->compilingAsmJS() ? NativeFrameSize : sizeof(IonJSFrameLayout)) +
   154                slot;
   155     }
   157     // For the callee of the current function.
   158     inline int32_t CalleeStackOffset() const {
   159         return masm.framePushed() + IonJSFrameLayout::offsetOfCalleeToken();
   160     }
   162     inline int32_t SlotToStackOffset(int32_t slot) const {
   163         JS_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount()));
   164         int32_t offset = masm.framePushed() - slot;
   165         JS_ASSERT(offset >= 0);
   166         return offset;
   167     }
   168     inline int32_t StackOffsetToSlot(int32_t offset) const {
   169         // See: SlotToStackOffset. This is used to convert pushed arguments
   170         // to a slot index that safepoints can use.
   171         //
   172         // offset = framePushed - slot
   173         // offset + slot = framePushed
   174         // slot = framePushed - offset
   175         return masm.framePushed() - offset;
   176     }
   178     // For argument construction for calls. Argslots are Value-sized.
   179     inline int32_t StackOffsetOfPassedArg(int32_t slot) const {
   180         // A slot of 0 is permitted only to calculate %esp offset for calls.
   181         JS_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount()));
   182         int32_t offset = masm.framePushed() -
   183                        graph.paddedLocalSlotsSize() -
   184                        (slot * sizeof(Value));
   186         // Passed arguments go below A function's local stack storage.
   187         // When arguments are being pushed, there is nothing important on the stack.
   188         // Therefore, It is safe to push the arguments down arbitrarily.  Pushing
   189         // by sizeof(Value) is desirable since everything on the stack is a Value.
   190         // Note that paddedLocalSlotCount() aligns to at least a Value boundary
   191         // specifically to support this.
   192         JS_ASSERT(offset >= 0);
   193         JS_ASSERT(offset % sizeof(Value) == 0);
   194         return offset;
   195     }
   197     inline int32_t ToStackOffset(const LAllocation *a) const {
   198         if (a->isArgument())
   199             return ArgToStackOffset(a->toArgument()->index());
   200         return SlotToStackOffset(a->toStackSlot()->slot());
   201     }
   203     uint32_t frameSize() const {
   204         return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize();
   205     }
   207   protected:
   208     // Ensure the cache is an IonCache while expecting the size of the derived
   209     // class. We only need the cache list at GC time. Everyone else can just take
   210     // runtimeData offsets.
   211     size_t allocateCache(const IonCache &, size_t size) {
   212         size_t dataOffset = allocateData(size);
   213         masm.propagateOOM(cacheList_.append(dataOffset));
   214         return dataOffset;
   215     }
   217 #ifdef CHECK_OSIPOINT_REGISTERS
   218     void resetOsiPointRegs(LSafepoint *safepoint);
   219     bool shouldVerifyOsiPointRegs(LSafepoint *safepoint);
   220     void verifyOsiPointRegs(LSafepoint *safepoint);
   221 #endif
   223   public:
   225     // When appending to runtimeData_, the vector might realloc, leaving pointers
   226     // int the origianl vector stale and unusable. DataPtr acts like a pointer,
   227     // but allows safety in the face of potentially realloc'ing vector appends.
   228     friend class DataPtr;
   229     template <typename T>
   230     class DataPtr
   231     {
   232         CodeGeneratorShared *cg_;
   233         size_t index_;
   235         T *lookup() {
   236             return reinterpret_cast<T *>(&cg_->runtimeData_[index_]);
   237         }
   238       public:
   239         DataPtr(CodeGeneratorShared *cg, size_t index)
   240           : cg_(cg), index_(index) { }
   242         T * operator ->() {
   243             return lookup();
   244         }
   245         T * operator *() {
   246             return lookup();
   247         }
   248     };
   250   protected:
   252     size_t allocateData(size_t size) {
   253         JS_ASSERT(size % sizeof(void *) == 0);
   254         size_t dataOffset = runtimeData_.length();
   255         masm.propagateOOM(runtimeData_.appendN(0, size));
   256         return dataOffset;
   257     }
   259     template <typename T>
   260     inline size_t allocateCache(const T &cache) {
   261         size_t index = allocateCache(cache, sizeof(mozilla::AlignedStorage2<T>));
   262         if (masm.oom())
   263             return SIZE_MAX;
   264         // Use the copy constructor on the allocated space.
   265         JS_ASSERT(index == cacheList_.back());
   266         new (&runtimeData_[index]) T(cache);
   267         return index;
   268     }
   270   protected:
   271     // Encodes an LSnapshot into the compressed snapshot buffer, returning
   272     // false on failure.
   273     bool encode(LRecoverInfo *recover);
   274     bool encode(LSnapshot *snapshot);
   275     bool encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint, uint32_t *startIndex);
   277     // Attempts to assign a BailoutId to a snapshot, if one isn't already set.
   278     // If the bailout table is full, this returns false, which is not a fatal
   279     // error (the code generator may use a slower bailout mechanism).
   280     bool assignBailoutId(LSnapshot *snapshot);
   282     // Encode all encountered safepoints in CG-order, and resolve |indices| for
   283     // safepoint offsets.
   284     void encodeSafepoints();
   286     // Mark the safepoint on |ins| as corresponding to the current assembler location.
   287     // The location should be just after a call.
   288     bool markSafepoint(LInstruction *ins);
   289     bool markSafepointAt(uint32_t offset, LInstruction *ins);
   291     // Mark the OSI point |ins| as corresponding to the current
   292     // assembler location inside the |osiIndices_|. Return the assembler
   293     // location for the OSI point return location within
   294     // |returnPointOffset|.
   295     bool markOsiPoint(LOsiPoint *ins, uint32_t *returnPointOffset);
   297     // Ensure that there is enough room between the last OSI point and the
   298     // current instruction, such that:
   299     //  (1) Invalidation will not overwrite the current instruction, and
   300     //  (2) Overwriting the current instruction will not overwrite
   301     //      an invalidation marker.
   302     void ensureOsiSpace();
   304     OutOfLineCode *oolTruncateDouble(const FloatRegister &src, const Register &dest);
   305     bool emitTruncateDouble(const FloatRegister &src, const Register &dest);
   306     bool emitTruncateFloat32(const FloatRegister &src, const Register &dest);
   308     void emitPreBarrier(Register base, const LAllocation *index, MIRType type);
   309     void emitPreBarrier(Address address, MIRType type);
   311     inline bool isNextBlock(LBlock *block) {
   312         return current->mir()->id() + 1 == block->mir()->id();
   313     }
   315   public:
   316     // Save and restore all volatile registers to/from the stack, excluding the
   317     // specified register(s), before a function call made using callWithABI and
   318     // after storing the function call's return value to an output register.
   319     // (The only registers that don't need to be saved/restored are 1) the
   320     // temporary register used to store the return value of the function call,
   321     // if there is one [otherwise that stored value would be overwritten]; and
   322     // 2) temporary registers whose values aren't needed in the rest of the LIR
   323     // instruction [this is purely an optimization].  All other volatiles must
   324     // be saved and restored in case future LIR instructions need those values.)
   325     void saveVolatile(Register output) {
   326         RegisterSet regs = RegisterSet::Volatile();
   327         regs.takeUnchecked(output);
   328         masm.PushRegsInMask(regs);
   329     }
   330     void restoreVolatile(Register output) {
   331         RegisterSet regs = RegisterSet::Volatile();
   332         regs.takeUnchecked(output);
   333         masm.PopRegsInMask(regs);
   334     }
   335     void saveVolatile(FloatRegister output) {
   336         RegisterSet regs = RegisterSet::Volatile();
   337         regs.takeUnchecked(output);
   338         masm.PushRegsInMask(regs);
   339     }
   340     void restoreVolatile(FloatRegister output) {
   341         RegisterSet regs = RegisterSet::Volatile();
   342         regs.takeUnchecked(output);
   343         masm.PopRegsInMask(regs);
   344     }
   345     void saveVolatile(RegisterSet temps) {
   346         masm.PushRegsInMask(RegisterSet::VolatileNot(temps));
   347     }
   348     void restoreVolatile(RegisterSet temps) {
   349         masm.PopRegsInMask(RegisterSet::VolatileNot(temps));
   350     }
   351     void saveVolatile() {
   352         masm.PushRegsInMask(RegisterSet::Volatile());
   353     }
   354     void restoreVolatile() {
   355         masm.PopRegsInMask(RegisterSet::Volatile());
   356     }
   358     // These functions have to be called before and after any callVM and before
   359     // any modifications of the stack.  Modification of the stack made after
   360     // these calls should update the framePushed variable, needed by the exit
   361     // frame produced by callVM.
   362     inline void saveLive(LInstruction *ins);
   363     inline void restoreLive(LInstruction *ins);
   364     inline void restoreLiveIgnore(LInstruction *ins, RegisterSet reg);
   366     // Save/restore all registers that are both live and volatile.
   367     inline void saveLiveVolatile(LInstruction *ins);
   368     inline void restoreLiveVolatile(LInstruction *ins);
   370     template <typename T>
   371     void pushArg(const T &t) {
   372         masm.Push(t);
   373 #ifdef DEBUG
   374         pushedArgs_++;
   375 #endif
   376     }
   378     void storeResultTo(const Register &reg) {
   379         masm.storeCallResult(reg);
   380     }
   382     void storeFloatResultTo(const FloatRegister &reg) {
   383         masm.storeCallFloatResult(reg);
   384     }
   386     template <typename T>
   387     void storeResultValueTo(const T &t) {
   388         masm.storeCallResultValue(t);
   389     }
   391     bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = nullptr);
   393     template <class ArgSeq, class StoreOutputTo>
   394     inline OutOfLineCode *oolCallVM(const VMFunction &fun, LInstruction *ins, const ArgSeq &args,
   395                                     const StoreOutputTo &out);
   397     bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = nullptr) {
   398         return callVM(f[gen->info().executionMode()], ins, dynStack);
   399     }
   401     template <class ArgSeq, class StoreOutputTo>
   402     inline OutOfLineCode *oolCallVM(const VMFunctionsModal &f, LInstruction *ins,
   403                                     const ArgSeq &args, const StoreOutputTo &out)
   404     {
   405         return oolCallVM(f[gen->info().executionMode()], ins, args, out);
   406     }
   408     bool addCache(LInstruction *lir, size_t cacheIndex);
   409     size_t addCacheLocations(const CacheLocationList &locs, size_t *numLocs);
   410     ReciprocalMulConstants computeDivisionConstants(int d);
   412   protected:
   413     bool addOutOfLineCode(OutOfLineCode *code);
   414     bool hasOutOfLineCode() { return !outOfLineCode_.empty(); }
   415     bool generateOutOfLineCode();
   417     Label *labelForBackedgeWithImplicitCheck(MBasicBlock *mir);
   419     // Generate a jump to the start of the specified block, adding information
   420     // if this is a loop backedge. Use this in place of jumping directly to
   421     // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use
   422     // directly is needed.
   423     void jumpToBlock(MBasicBlock *mir);
   424     void jumpToBlock(MBasicBlock *mir, Assembler::Condition cond);
   426   private:
   427     void generateInvalidateEpilogue();
   429   public:
   430     CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
   432   public:
   433     template <class ArgSeq, class StoreOutputTo>
   434     bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool);
   436     bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool);
   438     bool omitOverRecursedCheck() const;
   440   public:
   441     bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = nullptr);
   443     // Parallel aborts:
   444     //
   445     //    Parallel aborts work somewhat differently from sequential
   446     //    bailouts.  When an abort occurs, we first invoke
   447     //    ReportAbortPar() and then we return JS_ION_ERROR.  Each
   448     //    call on the stack will check for this error return and
   449     //    propagate it upwards until the C++ code that invoked the ion
   450     //    code is reached.
   451     //
   452     //    The snapshot that is provided to `oolAbortPar` is currently
   453     //    only used for error reporting, so that we can provide feedback
   454     //    to the user about which instruction aborted and (perhaps) why.
   455     OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock,
   456                                    jsbytecode *bytecode);
   457     OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, LInstruction *lir);
   458     OutOfLinePropagateAbortPar *oolPropagateAbortPar(LInstruction *lir);
   459     virtual bool visitOutOfLineAbortPar(OutOfLineAbortPar *ool) = 0;
   460     virtual bool visitOutOfLinePropagateAbortPar(OutOfLinePropagateAbortPar *ool) = 0;
   462 #ifdef JS_TRACE_LOGGING
   463   protected:
   464     bool emitTracelogScript(bool isStart);
   465     bool emitTracelogTree(bool isStart, uint32_t textId);
   467   public:
   468     bool emitTracelogScriptStart() {
   469         return emitTracelogScript(/* isStart =*/ true);
   470     }
   471     bool emitTracelogScriptStop() {
   472         return emitTracelogScript(/* isStart =*/ false);
   473     }
   474     bool emitTracelogStartEvent(uint32_t textId) {
   475         return emitTracelogTree(/* isStart =*/ true, textId);
   476     }
   477     bool emitTracelogStopEvent(uint32_t textId) {
   478         return emitTracelogTree(/* isStart =*/ false, textId);
   479     }
   480 #endif
   481 };
   483 // An out-of-line path is generated at the end of the function.
   484 class OutOfLineCode : public TempObject
   485 {
   486     Label entry_;
   487     Label rejoin_;
   488     uint32_t framePushed_;
   489     jsbytecode *pc_;
   490     JSScript *script_;
   492   public:
   493     OutOfLineCode()
   494       : framePushed_(0),
   495         pc_(nullptr),
   496         script_(nullptr)
   497     { }
   499     virtual bool generate(CodeGeneratorShared *codegen) = 0;
   501     Label *entry() {
   502         return &entry_;
   503     }
   504     virtual void bind(MacroAssembler *masm) {
   505         masm->bind(entry());
   506     }
   507     Label *rejoin() {
   508         return &rejoin_;
   509     }
   510     void setFramePushed(uint32_t framePushed) {
   511         framePushed_ = framePushed;
   512     }
   513     uint32_t framePushed() const {
   514         return framePushed_;
   515     }
   516     void setSource(JSScript *script, jsbytecode *pc) {
   517         script_ = script;
   518         pc_ = pc;
   519     }
   520     jsbytecode *pc() {
   521         return pc_;
   522     }
   523     JSScript *script() {
   524         return script_;
   525     }
   526 };
   528 // For OOL paths that want a specific-typed code generator.
   529 template <typename T>
   530 class OutOfLineCodeBase : public OutOfLineCode
   531 {
   532   public:
   533     virtual bool generate(CodeGeneratorShared *codegen) {
   534         return accept(static_cast<T *>(codegen));
   535     }
   537   public:
   538     virtual bool accept(T *codegen) = 0;
   539 };
   541 // ArgSeq store arguments for OutOfLineCallVM.
   542 //
   543 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
   544 // this function is an instance of a class which provides a "generate" function
   545 // to call the "pushArg" needed by the VMFunction call.  The list of argument
   546 // can be created by using the ArgList function which create an empty list of
   547 // arguments.  Arguments are added to this list by using the comma operator.
   548 // The type of the argument list is returned by the comma operator, and due to
   549 // templates arguments, it is quite painful to write by hand.  It is recommended
   550 // to use it directly as argument of a template function which would get its
   551 // arguments infered by the compiler (such as oolCallVM).  The list of arguments
   552 // must be written in the same order as if you were calling the function in C++.
   553 //
   554 // Example:
   555 //   (ArgList(), ToRegister(lir->lhs()), ToRegister(lir->rhs()))
   557 template <class SeqType, typename LastType>
   558 class ArgSeq : public SeqType
   559 {
   560   private:
   561     typedef ArgSeq<SeqType, LastType> ThisType;
   562     LastType last_;
   564   public:
   565     ArgSeq(const SeqType &seq, const LastType &last)
   566       : SeqType(seq),
   567         last_(last)
   568     { }
   570     template <typename NextType>
   571     inline ArgSeq<ThisType, NextType>
   572     operator, (const NextType &last) const {
   573         return ArgSeq<ThisType, NextType>(*this, last);
   574     }
   576     inline void generate(CodeGeneratorShared *codegen) const {
   577         codegen->pushArg(last_);
   578         this->SeqType::generate(codegen);
   579     }
   580 };
   582 // Mark the end of an argument list.
   583 template <>
   584 class ArgSeq<void, void>
   585 {
   586   private:
   587     typedef ArgSeq<void, void> ThisType;
   589   public:
   590     ArgSeq() { }
   591     ArgSeq(const ThisType &) { }
   593     template <typename NextType>
   594     inline ArgSeq<ThisType, NextType>
   595     operator, (const NextType &last) const {
   596         return ArgSeq<ThisType, NextType>(*this, last);
   597     }
   599     inline void generate(CodeGeneratorShared *codegen) const {
   600     }
   601 };
   603 inline ArgSeq<void, void>
   604 ArgList()
   605 {
   606     return ArgSeq<void, void>();
   607 }
   609 // Store wrappers, to generate the right move of data after the VM call.
   611 struct StoreNothing
   612 {
   613     inline void generate(CodeGeneratorShared *codegen) const {
   614     }
   615     inline RegisterSet clobbered() const {
   616         return RegisterSet(); // No register gets clobbered
   617     }
   618 };
   620 class StoreRegisterTo
   621 {
   622   private:
   623     Register out_;
   625   public:
   626     StoreRegisterTo(const Register &out)
   627       : out_(out)
   628     { }
   630     inline void generate(CodeGeneratorShared *codegen) const {
   631         codegen->storeResultTo(out_);
   632     }
   633     inline RegisterSet clobbered() const {
   634         RegisterSet set = RegisterSet();
   635         set.add(out_);
   636         return set;
   637     }
   638 };
   640 class StoreFloatRegisterTo
   641 {
   642   private:
   643     FloatRegister out_;
   645   public:
   646     StoreFloatRegisterTo(const FloatRegister &out)
   647       : out_(out)
   648     { }
   650     inline void generate(CodeGeneratorShared *codegen) const {
   651         codegen->storeFloatResultTo(out_);
   652     }
   653     inline RegisterSet clobbered() const {
   654         RegisterSet set = RegisterSet();
   655         set.add(out_);
   656         return set;
   657     }
   658 };
   660 template <typename Output>
   661 class StoreValueTo_
   662 {
   663   private:
   664     Output out_;
   666   public:
   667     StoreValueTo_(const Output &out)
   668       : out_(out)
   669     { }
   671     inline void generate(CodeGeneratorShared *codegen) const {
   672         codegen->storeResultValueTo(out_);
   673     }
   674     inline RegisterSet clobbered() const {
   675         RegisterSet set = RegisterSet();
   676         set.add(out_);
   677         return set;
   678     }
   679 };
   681 template <typename Output>
   682 StoreValueTo_<Output> StoreValueTo(const Output &out)
   683 {
   684     return StoreValueTo_<Output>(out);
   685 }
   687 template <class ArgSeq, class StoreOutputTo>
   688 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGeneratorShared>
   689 {
   690   private:
   691     LInstruction *lir_;
   692     const VMFunction &fun_;
   693     ArgSeq args_;
   694     StoreOutputTo out_;
   696   public:
   697     OutOfLineCallVM(LInstruction *lir, const VMFunction &fun, const ArgSeq &args,
   698                     const StoreOutputTo &out)
   699       : lir_(lir),
   700         fun_(fun),
   701         args_(args),
   702         out_(out)
   703     { }
   705     bool accept(CodeGeneratorShared *codegen) {
   706         return codegen->visitOutOfLineCallVM(this);
   707     }
   709     LInstruction *lir() const { return lir_; }
   710     const VMFunction &function() const { return fun_; }
   711     const ArgSeq &args() const { return args_; }
   712     const StoreOutputTo &out() const { return out_; }
   713 };
   715 template <class ArgSeq, class StoreOutputTo>
   716 inline OutOfLineCode *
   717 CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const ArgSeq &args,
   718                                const StoreOutputTo &out)
   719 {
   720     OutOfLineCode *ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
   721     if (!addOutOfLineCode(ool))
   722         return nullptr;
   723     return ool;
   724 }
   726 template <class ArgSeq, class StoreOutputTo>
   727 bool
   728 CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool)
   729 {
   730     LInstruction *lir = ool->lir();
   732     saveLive(lir);
   733     ool->args().generate(this);
   734     if (!callVM(ool->function(), lir))
   735         return false;
   736     ool->out().generate(this);
   737     restoreLiveIgnore(lir, ool->out().clobbered());
   738     masm.jump(ool->rejoin());
   739     return true;
   740 }
   742 // Initiate a parallel abort.  The snapshot is used to record the
   743 // cause.
   744 class OutOfLineAbortPar : public OutOfLineCode
   745 {
   746   private:
   747     ParallelBailoutCause cause_;
   748     MBasicBlock *basicBlock_;
   749     jsbytecode *bytecode_;
   751   public:
   752     OutOfLineAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, jsbytecode *bytecode)
   753       : cause_(cause),
   754         basicBlock_(basicBlock),
   755         bytecode_(bytecode)
   756     { }
   758     ParallelBailoutCause cause() {
   759         return cause_;
   760     }
   762     MBasicBlock *basicBlock() {
   763         return basicBlock_;
   764     }
   766     jsbytecode *bytecode() {
   767         return bytecode_;
   768     }
   770     bool generate(CodeGeneratorShared *codegen);
   771 };
   773 // Used when some callee has aborted.
   774 class OutOfLinePropagateAbortPar : public OutOfLineCode
   775 {
   776   private:
   777     LInstruction *lir_;
   779   public:
   780     OutOfLinePropagateAbortPar(LInstruction *lir)
   781       : lir_(lir)
   782     { }
   784     LInstruction *lir() { return lir_; }
   786     bool generate(CodeGeneratorShared *codegen);
   787 };
   789 extern const VMFunction InterruptCheckInfo;
   791 } // namespace jit
   792 } // namespace js
   794 #endif /* jit_shared_CodeGenerator_shared_h */

mercurial