1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/js/src/jit/shared/CodeGenerator-shared.h Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,794 @@ 1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99: 1.6 + * This Source Code Form is subject to the terms of the Mozilla Public 1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.9 + 1.10 +#ifndef jit_shared_CodeGenerator_shared_h 1.11 +#define jit_shared_CodeGenerator_shared_h 1.12 + 1.13 +#include "mozilla/Alignment.h" 1.14 + 1.15 +#include "jit/IonFrames.h" 1.16 +#include "jit/IonMacroAssembler.h" 1.17 +#include "jit/LIR.h" 1.18 +#include "jit/MIRGenerator.h" 1.19 +#include "jit/MIRGraph.h" 1.20 +#include "jit/Safepoints.h" 1.21 +#include "jit/Snapshots.h" 1.22 +#include "jit/VMFunctions.h" 1.23 +#include "vm/ForkJoin.h" 1.24 + 1.25 +namespace js { 1.26 +namespace jit { 1.27 + 1.28 +class OutOfLineCode; 1.29 +class CodeGenerator; 1.30 +class MacroAssembler; 1.31 +class IonCache; 1.32 +class OutOfLineAbortPar; 1.33 +class OutOfLinePropagateAbortPar; 1.34 + 1.35 +template <class ArgSeq, class StoreOutputTo> 1.36 +class OutOfLineCallVM; 1.37 + 1.38 +class OutOfLineTruncateSlow; 1.39 + 1.40 +struct PatchableBackedgeInfo 1.41 +{ 1.42 + CodeOffsetJump backedge; 1.43 + Label *loopHeader; 1.44 + Label *interruptCheck; 1.45 + 1.46 + PatchableBackedgeInfo(CodeOffsetJump backedge, Label *loopHeader, Label *interruptCheck) 1.47 + : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck) 1.48 + {} 1.49 +}; 1.50 + 1.51 +struct ReciprocalMulConstants { 1.52 + int32_t multiplier; 1.53 + int32_t shiftAmount; 1.54 +}; 1.55 + 1.56 +class CodeGeneratorShared : public LInstructionVisitor 1.57 +{ 1.58 + js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_; 1.59 + OutOfLineCode *oolIns; 1.60 + 1.61 + MacroAssembler &ensureMasm(MacroAssembler *masm); 1.62 + mozilla::Maybe<MacroAssembler> maybeMasm_; 1.63 + 1.64 + public: 1.65 + MacroAssembler &masm; 1.66 + 1.67 + protected: 1.68 + MIRGenerator *gen; 1.69 + LIRGraph &graph; 1.70 + LBlock *current; 1.71 + SnapshotWriter snapshots_; 1.72 + RecoverWriter recovers_; 1.73 + JitCode *deoptTable_; 1.74 +#ifdef DEBUG 1.75 + uint32_t pushedArgs_; 1.76 +#endif 1.77 + uint32_t lastOsiPointOffset_; 1.78 + SafepointWriter safepoints_; 1.79 + Label invalidate_; 1.80 + CodeOffsetLabel invalidateEpilogueData_; 1.81 + 1.82 + js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_; 1.83 + js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_; 1.84 + 1.85 + // Mapping from bailout table ID to an offset in the snapshot buffer. 1.86 + js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_; 1.87 + 1.88 + // Allocated data space needed at runtime. 1.89 + js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_; 1.90 + 1.91 + // Vector of information about generated polymorphic inline caches. 1.92 + js::Vector<uint32_t, 0, SystemAllocPolicy> cacheList_; 1.93 + 1.94 + // List of stack slots that have been pushed as arguments to an MCall. 1.95 + js::Vector<uint32_t, 0, SystemAllocPolicy> pushedArgumentSlots_; 1.96 + 1.97 + // Patchable backedges generated for loops. 1.98 + Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_; 1.99 + 1.100 +#ifdef JS_TRACE_LOGGING 1.101 + js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTraceLoggers_; 1.102 + js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTLScripts_; 1.103 +#endif 1.104 + 1.105 + // When profiling is enabled, this is the instrumentation manager which 1.106 + // maintains state of what script is currently being generated (for inline 1.107 + // scripts) and when instrumentation needs to be emitted or skipped. 1.108 + IonInstrumentation sps_; 1.109 + 1.110 + protected: 1.111 + // The offset of the first instruction of the OSR entry block from the 1.112 + // beginning of the code buffer. 1.113 + size_t osrEntryOffset_; 1.114 + 1.115 + TempAllocator &alloc() const { 1.116 + return graph.mir().alloc(); 1.117 + } 1.118 + 1.119 + inline void setOsrEntryOffset(size_t offset) { 1.120 + JS_ASSERT(osrEntryOffset_ == 0); 1.121 + osrEntryOffset_ = offset; 1.122 + } 1.123 + inline size_t getOsrEntryOffset() const { 1.124 + return osrEntryOffset_; 1.125 + } 1.126 + 1.127 + // The offset of the first instruction of the body. 1.128 + // This skips the arguments type checks. 1.129 + size_t skipArgCheckEntryOffset_; 1.130 + 1.131 + inline void setSkipArgCheckEntryOffset(size_t offset) { 1.132 + JS_ASSERT(skipArgCheckEntryOffset_ == 0); 1.133 + skipArgCheckEntryOffset_ = offset; 1.134 + } 1.135 + inline size_t getSkipArgCheckEntryOffset() const { 1.136 + return skipArgCheckEntryOffset_; 1.137 + } 1.138 + 1.139 + typedef js::Vector<SafepointIndex, 8, SystemAllocPolicy> SafepointIndices; 1.140 + 1.141 + bool markArgumentSlots(LSafepoint *safepoint); 1.142 + void dropArguments(unsigned argc); 1.143 + 1.144 + protected: 1.145 + // The initial size of the frame in bytes. These are bytes beyond the 1.146 + // constant header present for every Ion frame, used for pre-determined 1.147 + // spills. 1.148 + int32_t frameDepth_; 1.149 + 1.150 + // Frame class this frame's size falls into (see IonFrame.h). 1.151 + FrameSizeClass frameClass_; 1.152 + 1.153 + // For arguments to the current function. 1.154 + inline int32_t ArgToStackOffset(int32_t slot) const { 1.155 + return masm.framePushed() + 1.156 + (gen->compilingAsmJS() ? NativeFrameSize : sizeof(IonJSFrameLayout)) + 1.157 + slot; 1.158 + } 1.159 + 1.160 + // For the callee of the current function. 1.161 + inline int32_t CalleeStackOffset() const { 1.162 + return masm.framePushed() + IonJSFrameLayout::offsetOfCalleeToken(); 1.163 + } 1.164 + 1.165 + inline int32_t SlotToStackOffset(int32_t slot) const { 1.166 + JS_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount())); 1.167 + int32_t offset = masm.framePushed() - slot; 1.168 + JS_ASSERT(offset >= 0); 1.169 + return offset; 1.170 + } 1.171 + inline int32_t StackOffsetToSlot(int32_t offset) const { 1.172 + // See: SlotToStackOffset. This is used to convert pushed arguments 1.173 + // to a slot index that safepoints can use. 1.174 + // 1.175 + // offset = framePushed - slot 1.176 + // offset + slot = framePushed 1.177 + // slot = framePushed - offset 1.178 + return masm.framePushed() - offset; 1.179 + } 1.180 + 1.181 + // For argument construction for calls. Argslots are Value-sized. 1.182 + inline int32_t StackOffsetOfPassedArg(int32_t slot) const { 1.183 + // A slot of 0 is permitted only to calculate %esp offset for calls. 1.184 + JS_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount())); 1.185 + int32_t offset = masm.framePushed() - 1.186 + graph.paddedLocalSlotsSize() - 1.187 + (slot * sizeof(Value)); 1.188 + 1.189 + // Passed arguments go below A function's local stack storage. 1.190 + // When arguments are being pushed, there is nothing important on the stack. 1.191 + // Therefore, It is safe to push the arguments down arbitrarily. Pushing 1.192 + // by sizeof(Value) is desirable since everything on the stack is a Value. 1.193 + // Note that paddedLocalSlotCount() aligns to at least a Value boundary 1.194 + // specifically to support this. 1.195 + JS_ASSERT(offset >= 0); 1.196 + JS_ASSERT(offset % sizeof(Value) == 0); 1.197 + return offset; 1.198 + } 1.199 + 1.200 + inline int32_t ToStackOffset(const LAllocation *a) const { 1.201 + if (a->isArgument()) 1.202 + return ArgToStackOffset(a->toArgument()->index()); 1.203 + return SlotToStackOffset(a->toStackSlot()->slot()); 1.204 + } 1.205 + 1.206 + uint32_t frameSize() const { 1.207 + return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize(); 1.208 + } 1.209 + 1.210 + protected: 1.211 + // Ensure the cache is an IonCache while expecting the size of the derived 1.212 + // class. We only need the cache list at GC time. Everyone else can just take 1.213 + // runtimeData offsets. 1.214 + size_t allocateCache(const IonCache &, size_t size) { 1.215 + size_t dataOffset = allocateData(size); 1.216 + masm.propagateOOM(cacheList_.append(dataOffset)); 1.217 + return dataOffset; 1.218 + } 1.219 + 1.220 +#ifdef CHECK_OSIPOINT_REGISTERS 1.221 + void resetOsiPointRegs(LSafepoint *safepoint); 1.222 + bool shouldVerifyOsiPointRegs(LSafepoint *safepoint); 1.223 + void verifyOsiPointRegs(LSafepoint *safepoint); 1.224 +#endif 1.225 + 1.226 + public: 1.227 + 1.228 + // When appending to runtimeData_, the vector might realloc, leaving pointers 1.229 + // int the origianl vector stale and unusable. DataPtr acts like a pointer, 1.230 + // but allows safety in the face of potentially realloc'ing vector appends. 1.231 + friend class DataPtr; 1.232 + template <typename T> 1.233 + class DataPtr 1.234 + { 1.235 + CodeGeneratorShared *cg_; 1.236 + size_t index_; 1.237 + 1.238 + T *lookup() { 1.239 + return reinterpret_cast<T *>(&cg_->runtimeData_[index_]); 1.240 + } 1.241 + public: 1.242 + DataPtr(CodeGeneratorShared *cg, size_t index) 1.243 + : cg_(cg), index_(index) { } 1.244 + 1.245 + T * operator ->() { 1.246 + return lookup(); 1.247 + } 1.248 + T * operator *() { 1.249 + return lookup(); 1.250 + } 1.251 + }; 1.252 + 1.253 + protected: 1.254 + 1.255 + size_t allocateData(size_t size) { 1.256 + JS_ASSERT(size % sizeof(void *) == 0); 1.257 + size_t dataOffset = runtimeData_.length(); 1.258 + masm.propagateOOM(runtimeData_.appendN(0, size)); 1.259 + return dataOffset; 1.260 + } 1.261 + 1.262 + template <typename T> 1.263 + inline size_t allocateCache(const T &cache) { 1.264 + size_t index = allocateCache(cache, sizeof(mozilla::AlignedStorage2<T>)); 1.265 + if (masm.oom()) 1.266 + return SIZE_MAX; 1.267 + // Use the copy constructor on the allocated space. 1.268 + JS_ASSERT(index == cacheList_.back()); 1.269 + new (&runtimeData_[index]) T(cache); 1.270 + return index; 1.271 + } 1.272 + 1.273 + protected: 1.274 + // Encodes an LSnapshot into the compressed snapshot buffer, returning 1.275 + // false on failure. 1.276 + bool encode(LRecoverInfo *recover); 1.277 + bool encode(LSnapshot *snapshot); 1.278 + bool encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint, uint32_t *startIndex); 1.279 + 1.280 + // Attempts to assign a BailoutId to a snapshot, if one isn't already set. 1.281 + // If the bailout table is full, this returns false, which is not a fatal 1.282 + // error (the code generator may use a slower bailout mechanism). 1.283 + bool assignBailoutId(LSnapshot *snapshot); 1.284 + 1.285 + // Encode all encountered safepoints in CG-order, and resolve |indices| for 1.286 + // safepoint offsets. 1.287 + void encodeSafepoints(); 1.288 + 1.289 + // Mark the safepoint on |ins| as corresponding to the current assembler location. 1.290 + // The location should be just after a call. 1.291 + bool markSafepoint(LInstruction *ins); 1.292 + bool markSafepointAt(uint32_t offset, LInstruction *ins); 1.293 + 1.294 + // Mark the OSI point |ins| as corresponding to the current 1.295 + // assembler location inside the |osiIndices_|. Return the assembler 1.296 + // location for the OSI point return location within 1.297 + // |returnPointOffset|. 1.298 + bool markOsiPoint(LOsiPoint *ins, uint32_t *returnPointOffset); 1.299 + 1.300 + // Ensure that there is enough room between the last OSI point and the 1.301 + // current instruction, such that: 1.302 + // (1) Invalidation will not overwrite the current instruction, and 1.303 + // (2) Overwriting the current instruction will not overwrite 1.304 + // an invalidation marker. 1.305 + void ensureOsiSpace(); 1.306 + 1.307 + OutOfLineCode *oolTruncateDouble(const FloatRegister &src, const Register &dest); 1.308 + bool emitTruncateDouble(const FloatRegister &src, const Register &dest); 1.309 + bool emitTruncateFloat32(const FloatRegister &src, const Register &dest); 1.310 + 1.311 + void emitPreBarrier(Register base, const LAllocation *index, MIRType type); 1.312 + void emitPreBarrier(Address address, MIRType type); 1.313 + 1.314 + inline bool isNextBlock(LBlock *block) { 1.315 + return current->mir()->id() + 1 == block->mir()->id(); 1.316 + } 1.317 + 1.318 + public: 1.319 + // Save and restore all volatile registers to/from the stack, excluding the 1.320 + // specified register(s), before a function call made using callWithABI and 1.321 + // after storing the function call's return value to an output register. 1.322 + // (The only registers that don't need to be saved/restored are 1) the 1.323 + // temporary register used to store the return value of the function call, 1.324 + // if there is one [otherwise that stored value would be overwritten]; and 1.325 + // 2) temporary registers whose values aren't needed in the rest of the LIR 1.326 + // instruction [this is purely an optimization]. All other volatiles must 1.327 + // be saved and restored in case future LIR instructions need those values.) 1.328 + void saveVolatile(Register output) { 1.329 + RegisterSet regs = RegisterSet::Volatile(); 1.330 + regs.takeUnchecked(output); 1.331 + masm.PushRegsInMask(regs); 1.332 + } 1.333 + void restoreVolatile(Register output) { 1.334 + RegisterSet regs = RegisterSet::Volatile(); 1.335 + regs.takeUnchecked(output); 1.336 + masm.PopRegsInMask(regs); 1.337 + } 1.338 + void saveVolatile(FloatRegister output) { 1.339 + RegisterSet regs = RegisterSet::Volatile(); 1.340 + regs.takeUnchecked(output); 1.341 + masm.PushRegsInMask(regs); 1.342 + } 1.343 + void restoreVolatile(FloatRegister output) { 1.344 + RegisterSet regs = RegisterSet::Volatile(); 1.345 + regs.takeUnchecked(output); 1.346 + masm.PopRegsInMask(regs); 1.347 + } 1.348 + void saveVolatile(RegisterSet temps) { 1.349 + masm.PushRegsInMask(RegisterSet::VolatileNot(temps)); 1.350 + } 1.351 + void restoreVolatile(RegisterSet temps) { 1.352 + masm.PopRegsInMask(RegisterSet::VolatileNot(temps)); 1.353 + } 1.354 + void saveVolatile() { 1.355 + masm.PushRegsInMask(RegisterSet::Volatile()); 1.356 + } 1.357 + void restoreVolatile() { 1.358 + masm.PopRegsInMask(RegisterSet::Volatile()); 1.359 + } 1.360 + 1.361 + // These functions have to be called before and after any callVM and before 1.362 + // any modifications of the stack. Modification of the stack made after 1.363 + // these calls should update the framePushed variable, needed by the exit 1.364 + // frame produced by callVM. 1.365 + inline void saveLive(LInstruction *ins); 1.366 + inline void restoreLive(LInstruction *ins); 1.367 + inline void restoreLiveIgnore(LInstruction *ins, RegisterSet reg); 1.368 + 1.369 + // Save/restore all registers that are both live and volatile. 1.370 + inline void saveLiveVolatile(LInstruction *ins); 1.371 + inline void restoreLiveVolatile(LInstruction *ins); 1.372 + 1.373 + template <typename T> 1.374 + void pushArg(const T &t) { 1.375 + masm.Push(t); 1.376 +#ifdef DEBUG 1.377 + pushedArgs_++; 1.378 +#endif 1.379 + } 1.380 + 1.381 + void storeResultTo(const Register ®) { 1.382 + masm.storeCallResult(reg); 1.383 + } 1.384 + 1.385 + void storeFloatResultTo(const FloatRegister ®) { 1.386 + masm.storeCallFloatResult(reg); 1.387 + } 1.388 + 1.389 + template <typename T> 1.390 + void storeResultValueTo(const T &t) { 1.391 + masm.storeCallResultValue(t); 1.392 + } 1.393 + 1.394 + bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = nullptr); 1.395 + 1.396 + template <class ArgSeq, class StoreOutputTo> 1.397 + inline OutOfLineCode *oolCallVM(const VMFunction &fun, LInstruction *ins, const ArgSeq &args, 1.398 + const StoreOutputTo &out); 1.399 + 1.400 + bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = nullptr) { 1.401 + return callVM(f[gen->info().executionMode()], ins, dynStack); 1.402 + } 1.403 + 1.404 + template <class ArgSeq, class StoreOutputTo> 1.405 + inline OutOfLineCode *oolCallVM(const VMFunctionsModal &f, LInstruction *ins, 1.406 + const ArgSeq &args, const StoreOutputTo &out) 1.407 + { 1.408 + return oolCallVM(f[gen->info().executionMode()], ins, args, out); 1.409 + } 1.410 + 1.411 + bool addCache(LInstruction *lir, size_t cacheIndex); 1.412 + size_t addCacheLocations(const CacheLocationList &locs, size_t *numLocs); 1.413 + ReciprocalMulConstants computeDivisionConstants(int d); 1.414 + 1.415 + protected: 1.416 + bool addOutOfLineCode(OutOfLineCode *code); 1.417 + bool hasOutOfLineCode() { return !outOfLineCode_.empty(); } 1.418 + bool generateOutOfLineCode(); 1.419 + 1.420 + Label *labelForBackedgeWithImplicitCheck(MBasicBlock *mir); 1.421 + 1.422 + // Generate a jump to the start of the specified block, adding information 1.423 + // if this is a loop backedge. Use this in place of jumping directly to 1.424 + // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use 1.425 + // directly is needed. 1.426 + void jumpToBlock(MBasicBlock *mir); 1.427 + void jumpToBlock(MBasicBlock *mir, Assembler::Condition cond); 1.428 + 1.429 + private: 1.430 + void generateInvalidateEpilogue(); 1.431 + 1.432 + public: 1.433 + CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm); 1.434 + 1.435 + public: 1.436 + template <class ArgSeq, class StoreOutputTo> 1.437 + bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool); 1.438 + 1.439 + bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool); 1.440 + 1.441 + bool omitOverRecursedCheck() const; 1.442 + 1.443 + public: 1.444 + bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = nullptr); 1.445 + 1.446 + // Parallel aborts: 1.447 + // 1.448 + // Parallel aborts work somewhat differently from sequential 1.449 + // bailouts. When an abort occurs, we first invoke 1.450 + // ReportAbortPar() and then we return JS_ION_ERROR. Each 1.451 + // call on the stack will check for this error return and 1.452 + // propagate it upwards until the C++ code that invoked the ion 1.453 + // code is reached. 1.454 + // 1.455 + // The snapshot that is provided to `oolAbortPar` is currently 1.456 + // only used for error reporting, so that we can provide feedback 1.457 + // to the user about which instruction aborted and (perhaps) why. 1.458 + OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, 1.459 + jsbytecode *bytecode); 1.460 + OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, LInstruction *lir); 1.461 + OutOfLinePropagateAbortPar *oolPropagateAbortPar(LInstruction *lir); 1.462 + virtual bool visitOutOfLineAbortPar(OutOfLineAbortPar *ool) = 0; 1.463 + virtual bool visitOutOfLinePropagateAbortPar(OutOfLinePropagateAbortPar *ool) = 0; 1.464 + 1.465 +#ifdef JS_TRACE_LOGGING 1.466 + protected: 1.467 + bool emitTracelogScript(bool isStart); 1.468 + bool emitTracelogTree(bool isStart, uint32_t textId); 1.469 + 1.470 + public: 1.471 + bool emitTracelogScriptStart() { 1.472 + return emitTracelogScript(/* isStart =*/ true); 1.473 + } 1.474 + bool emitTracelogScriptStop() { 1.475 + return emitTracelogScript(/* isStart =*/ false); 1.476 + } 1.477 + bool emitTracelogStartEvent(uint32_t textId) { 1.478 + return emitTracelogTree(/* isStart =*/ true, textId); 1.479 + } 1.480 + bool emitTracelogStopEvent(uint32_t textId) { 1.481 + return emitTracelogTree(/* isStart =*/ false, textId); 1.482 + } 1.483 +#endif 1.484 +}; 1.485 + 1.486 +// An out-of-line path is generated at the end of the function. 1.487 +class OutOfLineCode : public TempObject 1.488 +{ 1.489 + Label entry_; 1.490 + Label rejoin_; 1.491 + uint32_t framePushed_; 1.492 + jsbytecode *pc_; 1.493 + JSScript *script_; 1.494 + 1.495 + public: 1.496 + OutOfLineCode() 1.497 + : framePushed_(0), 1.498 + pc_(nullptr), 1.499 + script_(nullptr) 1.500 + { } 1.501 + 1.502 + virtual bool generate(CodeGeneratorShared *codegen) = 0; 1.503 + 1.504 + Label *entry() { 1.505 + return &entry_; 1.506 + } 1.507 + virtual void bind(MacroAssembler *masm) { 1.508 + masm->bind(entry()); 1.509 + } 1.510 + Label *rejoin() { 1.511 + return &rejoin_; 1.512 + } 1.513 + void setFramePushed(uint32_t framePushed) { 1.514 + framePushed_ = framePushed; 1.515 + } 1.516 + uint32_t framePushed() const { 1.517 + return framePushed_; 1.518 + } 1.519 + void setSource(JSScript *script, jsbytecode *pc) { 1.520 + script_ = script; 1.521 + pc_ = pc; 1.522 + } 1.523 + jsbytecode *pc() { 1.524 + return pc_; 1.525 + } 1.526 + JSScript *script() { 1.527 + return script_; 1.528 + } 1.529 +}; 1.530 + 1.531 +// For OOL paths that want a specific-typed code generator. 1.532 +template <typename T> 1.533 +class OutOfLineCodeBase : public OutOfLineCode 1.534 +{ 1.535 + public: 1.536 + virtual bool generate(CodeGeneratorShared *codegen) { 1.537 + return accept(static_cast<T *>(codegen)); 1.538 + } 1.539 + 1.540 + public: 1.541 + virtual bool accept(T *codegen) = 0; 1.542 +}; 1.543 + 1.544 +// ArgSeq store arguments for OutOfLineCallVM. 1.545 +// 1.546 +// OutOfLineCallVM are created with "oolCallVM" function. The third argument of 1.547 +// this function is an instance of a class which provides a "generate" function 1.548 +// to call the "pushArg" needed by the VMFunction call. The list of argument 1.549 +// can be created by using the ArgList function which create an empty list of 1.550 +// arguments. Arguments are added to this list by using the comma operator. 1.551 +// The type of the argument list is returned by the comma operator, and due to 1.552 +// templates arguments, it is quite painful to write by hand. It is recommended 1.553 +// to use it directly as argument of a template function which would get its 1.554 +// arguments infered by the compiler (such as oolCallVM). The list of arguments 1.555 +// must be written in the same order as if you were calling the function in C++. 1.556 +// 1.557 +// Example: 1.558 +// (ArgList(), ToRegister(lir->lhs()), ToRegister(lir->rhs())) 1.559 + 1.560 +template <class SeqType, typename LastType> 1.561 +class ArgSeq : public SeqType 1.562 +{ 1.563 + private: 1.564 + typedef ArgSeq<SeqType, LastType> ThisType; 1.565 + LastType last_; 1.566 + 1.567 + public: 1.568 + ArgSeq(const SeqType &seq, const LastType &last) 1.569 + : SeqType(seq), 1.570 + last_(last) 1.571 + { } 1.572 + 1.573 + template <typename NextType> 1.574 + inline ArgSeq<ThisType, NextType> 1.575 + operator, (const NextType &last) const { 1.576 + return ArgSeq<ThisType, NextType>(*this, last); 1.577 + } 1.578 + 1.579 + inline void generate(CodeGeneratorShared *codegen) const { 1.580 + codegen->pushArg(last_); 1.581 + this->SeqType::generate(codegen); 1.582 + } 1.583 +}; 1.584 + 1.585 +// Mark the end of an argument list. 1.586 +template <> 1.587 +class ArgSeq<void, void> 1.588 +{ 1.589 + private: 1.590 + typedef ArgSeq<void, void> ThisType; 1.591 + 1.592 + public: 1.593 + ArgSeq() { } 1.594 + ArgSeq(const ThisType &) { } 1.595 + 1.596 + template <typename NextType> 1.597 + inline ArgSeq<ThisType, NextType> 1.598 + operator, (const NextType &last) const { 1.599 + return ArgSeq<ThisType, NextType>(*this, last); 1.600 + } 1.601 + 1.602 + inline void generate(CodeGeneratorShared *codegen) const { 1.603 + } 1.604 +}; 1.605 + 1.606 +inline ArgSeq<void, void> 1.607 +ArgList() 1.608 +{ 1.609 + return ArgSeq<void, void>(); 1.610 +} 1.611 + 1.612 +// Store wrappers, to generate the right move of data after the VM call. 1.613 + 1.614 +struct StoreNothing 1.615 +{ 1.616 + inline void generate(CodeGeneratorShared *codegen) const { 1.617 + } 1.618 + inline RegisterSet clobbered() const { 1.619 + return RegisterSet(); // No register gets clobbered 1.620 + } 1.621 +}; 1.622 + 1.623 +class StoreRegisterTo 1.624 +{ 1.625 + private: 1.626 + Register out_; 1.627 + 1.628 + public: 1.629 + StoreRegisterTo(const Register &out) 1.630 + : out_(out) 1.631 + { } 1.632 + 1.633 + inline void generate(CodeGeneratorShared *codegen) const { 1.634 + codegen->storeResultTo(out_); 1.635 + } 1.636 + inline RegisterSet clobbered() const { 1.637 + RegisterSet set = RegisterSet(); 1.638 + set.add(out_); 1.639 + return set; 1.640 + } 1.641 +}; 1.642 + 1.643 +class StoreFloatRegisterTo 1.644 +{ 1.645 + private: 1.646 + FloatRegister out_; 1.647 + 1.648 + public: 1.649 + StoreFloatRegisterTo(const FloatRegister &out) 1.650 + : out_(out) 1.651 + { } 1.652 + 1.653 + inline void generate(CodeGeneratorShared *codegen) const { 1.654 + codegen->storeFloatResultTo(out_); 1.655 + } 1.656 + inline RegisterSet clobbered() const { 1.657 + RegisterSet set = RegisterSet(); 1.658 + set.add(out_); 1.659 + return set; 1.660 + } 1.661 +}; 1.662 + 1.663 +template <typename Output> 1.664 +class StoreValueTo_ 1.665 +{ 1.666 + private: 1.667 + Output out_; 1.668 + 1.669 + public: 1.670 + StoreValueTo_(const Output &out) 1.671 + : out_(out) 1.672 + { } 1.673 + 1.674 + inline void generate(CodeGeneratorShared *codegen) const { 1.675 + codegen->storeResultValueTo(out_); 1.676 + } 1.677 + inline RegisterSet clobbered() const { 1.678 + RegisterSet set = RegisterSet(); 1.679 + set.add(out_); 1.680 + return set; 1.681 + } 1.682 +}; 1.683 + 1.684 +template <typename Output> 1.685 +StoreValueTo_<Output> StoreValueTo(const Output &out) 1.686 +{ 1.687 + return StoreValueTo_<Output>(out); 1.688 +} 1.689 + 1.690 +template <class ArgSeq, class StoreOutputTo> 1.691 +class OutOfLineCallVM : public OutOfLineCodeBase<CodeGeneratorShared> 1.692 +{ 1.693 + private: 1.694 + LInstruction *lir_; 1.695 + const VMFunction &fun_; 1.696 + ArgSeq args_; 1.697 + StoreOutputTo out_; 1.698 + 1.699 + public: 1.700 + OutOfLineCallVM(LInstruction *lir, const VMFunction &fun, const ArgSeq &args, 1.701 + const StoreOutputTo &out) 1.702 + : lir_(lir), 1.703 + fun_(fun), 1.704 + args_(args), 1.705 + out_(out) 1.706 + { } 1.707 + 1.708 + bool accept(CodeGeneratorShared *codegen) { 1.709 + return codegen->visitOutOfLineCallVM(this); 1.710 + } 1.711 + 1.712 + LInstruction *lir() const { return lir_; } 1.713 + const VMFunction &function() const { return fun_; } 1.714 + const ArgSeq &args() const { return args_; } 1.715 + const StoreOutputTo &out() const { return out_; } 1.716 +}; 1.717 + 1.718 +template <class ArgSeq, class StoreOutputTo> 1.719 +inline OutOfLineCode * 1.720 +CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const ArgSeq &args, 1.721 + const StoreOutputTo &out) 1.722 +{ 1.723 + OutOfLineCode *ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out); 1.724 + if (!addOutOfLineCode(ool)) 1.725 + return nullptr; 1.726 + return ool; 1.727 +} 1.728 + 1.729 +template <class ArgSeq, class StoreOutputTo> 1.730 +bool 1.731 +CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool) 1.732 +{ 1.733 + LInstruction *lir = ool->lir(); 1.734 + 1.735 + saveLive(lir); 1.736 + ool->args().generate(this); 1.737 + if (!callVM(ool->function(), lir)) 1.738 + return false; 1.739 + ool->out().generate(this); 1.740 + restoreLiveIgnore(lir, ool->out().clobbered()); 1.741 + masm.jump(ool->rejoin()); 1.742 + return true; 1.743 +} 1.744 + 1.745 +// Initiate a parallel abort. The snapshot is used to record the 1.746 +// cause. 1.747 +class OutOfLineAbortPar : public OutOfLineCode 1.748 +{ 1.749 + private: 1.750 + ParallelBailoutCause cause_; 1.751 + MBasicBlock *basicBlock_; 1.752 + jsbytecode *bytecode_; 1.753 + 1.754 + public: 1.755 + OutOfLineAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, jsbytecode *bytecode) 1.756 + : cause_(cause), 1.757 + basicBlock_(basicBlock), 1.758 + bytecode_(bytecode) 1.759 + { } 1.760 + 1.761 + ParallelBailoutCause cause() { 1.762 + return cause_; 1.763 + } 1.764 + 1.765 + MBasicBlock *basicBlock() { 1.766 + return basicBlock_; 1.767 + } 1.768 + 1.769 + jsbytecode *bytecode() { 1.770 + return bytecode_; 1.771 + } 1.772 + 1.773 + bool generate(CodeGeneratorShared *codegen); 1.774 +}; 1.775 + 1.776 +// Used when some callee has aborted. 1.777 +class OutOfLinePropagateAbortPar : public OutOfLineCode 1.778 +{ 1.779 + private: 1.780 + LInstruction *lir_; 1.781 + 1.782 + public: 1.783 + OutOfLinePropagateAbortPar(LInstruction *lir) 1.784 + : lir_(lir) 1.785 + { } 1.786 + 1.787 + LInstruction *lir() { return lir_; } 1.788 + 1.789 + bool generate(CodeGeneratorShared *codegen); 1.790 +}; 1.791 + 1.792 +extern const VMFunction InterruptCheckInfo; 1.793 + 1.794 +} // namespace jit 1.795 +} // namespace js 1.796 + 1.797 +#endif /* jit_shared_CodeGenerator_shared_h */