Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef jit_shared_CodeGenerator_shared_h |
michael@0 | 8 | #define jit_shared_CodeGenerator_shared_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/Alignment.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "jit/IonFrames.h" |
michael@0 | 13 | #include "jit/IonMacroAssembler.h" |
michael@0 | 14 | #include "jit/LIR.h" |
michael@0 | 15 | #include "jit/MIRGenerator.h" |
michael@0 | 16 | #include "jit/MIRGraph.h" |
michael@0 | 17 | #include "jit/Safepoints.h" |
michael@0 | 18 | #include "jit/Snapshots.h" |
michael@0 | 19 | #include "jit/VMFunctions.h" |
michael@0 | 20 | #include "vm/ForkJoin.h" |
michael@0 | 21 | |
michael@0 | 22 | namespace js { |
michael@0 | 23 | namespace jit { |
michael@0 | 24 | |
michael@0 | 25 | class OutOfLineCode; |
michael@0 | 26 | class CodeGenerator; |
michael@0 | 27 | class MacroAssembler; |
michael@0 | 28 | class IonCache; |
michael@0 | 29 | class OutOfLineAbortPar; |
michael@0 | 30 | class OutOfLinePropagateAbortPar; |
michael@0 | 31 | |
michael@0 | 32 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 33 | class OutOfLineCallVM; |
michael@0 | 34 | |
michael@0 | 35 | class OutOfLineTruncateSlow; |
michael@0 | 36 | |
michael@0 | 37 | struct PatchableBackedgeInfo |
michael@0 | 38 | { |
michael@0 | 39 | CodeOffsetJump backedge; |
michael@0 | 40 | Label *loopHeader; |
michael@0 | 41 | Label *interruptCheck; |
michael@0 | 42 | |
michael@0 | 43 | PatchableBackedgeInfo(CodeOffsetJump backedge, Label *loopHeader, Label *interruptCheck) |
michael@0 | 44 | : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck) |
michael@0 | 45 | {} |
michael@0 | 46 | }; |
michael@0 | 47 | |
michael@0 | 48 | struct ReciprocalMulConstants { |
michael@0 | 49 | int32_t multiplier; |
michael@0 | 50 | int32_t shiftAmount; |
michael@0 | 51 | }; |
michael@0 | 52 | |
michael@0 | 53 | class CodeGeneratorShared : public LInstructionVisitor |
michael@0 | 54 | { |
michael@0 | 55 | js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_; |
michael@0 | 56 | OutOfLineCode *oolIns; |
michael@0 | 57 | |
michael@0 | 58 | MacroAssembler &ensureMasm(MacroAssembler *masm); |
michael@0 | 59 | mozilla::Maybe<MacroAssembler> maybeMasm_; |
michael@0 | 60 | |
michael@0 | 61 | public: |
michael@0 | 62 | MacroAssembler &masm; |
michael@0 | 63 | |
michael@0 | 64 | protected: |
michael@0 | 65 | MIRGenerator *gen; |
michael@0 | 66 | LIRGraph &graph; |
michael@0 | 67 | LBlock *current; |
michael@0 | 68 | SnapshotWriter snapshots_; |
michael@0 | 69 | RecoverWriter recovers_; |
michael@0 | 70 | JitCode *deoptTable_; |
michael@0 | 71 | #ifdef DEBUG |
michael@0 | 72 | uint32_t pushedArgs_; |
michael@0 | 73 | #endif |
michael@0 | 74 | uint32_t lastOsiPointOffset_; |
michael@0 | 75 | SafepointWriter safepoints_; |
michael@0 | 76 | Label invalidate_; |
michael@0 | 77 | CodeOffsetLabel invalidateEpilogueData_; |
michael@0 | 78 | |
michael@0 | 79 | js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_; |
michael@0 | 80 | js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_; |
michael@0 | 81 | |
michael@0 | 82 | // Mapping from bailout table ID to an offset in the snapshot buffer. |
michael@0 | 83 | js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_; |
michael@0 | 84 | |
michael@0 | 85 | // Allocated data space needed at runtime. |
michael@0 | 86 | js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_; |
michael@0 | 87 | |
michael@0 | 88 | // Vector of information about generated polymorphic inline caches. |
michael@0 | 89 | js::Vector<uint32_t, 0, SystemAllocPolicy> cacheList_; |
michael@0 | 90 | |
michael@0 | 91 | // List of stack slots that have been pushed as arguments to an MCall. |
michael@0 | 92 | js::Vector<uint32_t, 0, SystemAllocPolicy> pushedArgumentSlots_; |
michael@0 | 93 | |
michael@0 | 94 | // Patchable backedges generated for loops. |
michael@0 | 95 | Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_; |
michael@0 | 96 | |
michael@0 | 97 | #ifdef JS_TRACE_LOGGING |
michael@0 | 98 | js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTraceLoggers_; |
michael@0 | 99 | js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTLScripts_; |
michael@0 | 100 | #endif |
michael@0 | 101 | |
michael@0 | 102 | // When profiling is enabled, this is the instrumentation manager which |
michael@0 | 103 | // maintains state of what script is currently being generated (for inline |
michael@0 | 104 | // scripts) and when instrumentation needs to be emitted or skipped. |
michael@0 | 105 | IonInstrumentation sps_; |
michael@0 | 106 | |
michael@0 | 107 | protected: |
michael@0 | 108 | // The offset of the first instruction of the OSR entry block from the |
michael@0 | 109 | // beginning of the code buffer. |
michael@0 | 110 | size_t osrEntryOffset_; |
michael@0 | 111 | |
michael@0 | 112 | TempAllocator &alloc() const { |
michael@0 | 113 | return graph.mir().alloc(); |
michael@0 | 114 | } |
michael@0 | 115 | |
michael@0 | 116 | inline void setOsrEntryOffset(size_t offset) { |
michael@0 | 117 | JS_ASSERT(osrEntryOffset_ == 0); |
michael@0 | 118 | osrEntryOffset_ = offset; |
michael@0 | 119 | } |
michael@0 | 120 | inline size_t getOsrEntryOffset() const { |
michael@0 | 121 | return osrEntryOffset_; |
michael@0 | 122 | } |
michael@0 | 123 | |
michael@0 | 124 | // The offset of the first instruction of the body. |
michael@0 | 125 | // This skips the arguments type checks. |
michael@0 | 126 | size_t skipArgCheckEntryOffset_; |
michael@0 | 127 | |
michael@0 | 128 | inline void setSkipArgCheckEntryOffset(size_t offset) { |
michael@0 | 129 | JS_ASSERT(skipArgCheckEntryOffset_ == 0); |
michael@0 | 130 | skipArgCheckEntryOffset_ = offset; |
michael@0 | 131 | } |
michael@0 | 132 | inline size_t getSkipArgCheckEntryOffset() const { |
michael@0 | 133 | return skipArgCheckEntryOffset_; |
michael@0 | 134 | } |
michael@0 | 135 | |
michael@0 | 136 | typedef js::Vector<SafepointIndex, 8, SystemAllocPolicy> SafepointIndices; |
michael@0 | 137 | |
michael@0 | 138 | bool markArgumentSlots(LSafepoint *safepoint); |
michael@0 | 139 | void dropArguments(unsigned argc); |
michael@0 | 140 | |
michael@0 | 141 | protected: |
michael@0 | 142 | // The initial size of the frame in bytes. These are bytes beyond the |
michael@0 | 143 | // constant header present for every Ion frame, used for pre-determined |
michael@0 | 144 | // spills. |
michael@0 | 145 | int32_t frameDepth_; |
michael@0 | 146 | |
michael@0 | 147 | // Frame class this frame's size falls into (see IonFrame.h). |
michael@0 | 148 | FrameSizeClass frameClass_; |
michael@0 | 149 | |
michael@0 | 150 | // For arguments to the current function. |
michael@0 | 151 | inline int32_t ArgToStackOffset(int32_t slot) const { |
michael@0 | 152 | return masm.framePushed() + |
michael@0 | 153 | (gen->compilingAsmJS() ? NativeFrameSize : sizeof(IonJSFrameLayout)) + |
michael@0 | 154 | slot; |
michael@0 | 155 | } |
michael@0 | 156 | |
michael@0 | 157 | // For the callee of the current function. |
michael@0 | 158 | inline int32_t CalleeStackOffset() const { |
michael@0 | 159 | return masm.framePushed() + IonJSFrameLayout::offsetOfCalleeToken(); |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | inline int32_t SlotToStackOffset(int32_t slot) const { |
michael@0 | 163 | JS_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount())); |
michael@0 | 164 | int32_t offset = masm.framePushed() - slot; |
michael@0 | 165 | JS_ASSERT(offset >= 0); |
michael@0 | 166 | return offset; |
michael@0 | 167 | } |
michael@0 | 168 | inline int32_t StackOffsetToSlot(int32_t offset) const { |
michael@0 | 169 | // See: SlotToStackOffset. This is used to convert pushed arguments |
michael@0 | 170 | // to a slot index that safepoints can use. |
michael@0 | 171 | // |
michael@0 | 172 | // offset = framePushed - slot |
michael@0 | 173 | // offset + slot = framePushed |
michael@0 | 174 | // slot = framePushed - offset |
michael@0 | 175 | return masm.framePushed() - offset; |
michael@0 | 176 | } |
michael@0 | 177 | |
michael@0 | 178 | // For argument construction for calls. Argslots are Value-sized. |
michael@0 | 179 | inline int32_t StackOffsetOfPassedArg(int32_t slot) const { |
michael@0 | 180 | // A slot of 0 is permitted only to calculate %esp offset for calls. |
michael@0 | 181 | JS_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount())); |
michael@0 | 182 | int32_t offset = masm.framePushed() - |
michael@0 | 183 | graph.paddedLocalSlotsSize() - |
michael@0 | 184 | (slot * sizeof(Value)); |
michael@0 | 185 | |
michael@0 | 186 | // Passed arguments go below A function's local stack storage. |
michael@0 | 187 | // When arguments are being pushed, there is nothing important on the stack. |
michael@0 | 188 | // Therefore, It is safe to push the arguments down arbitrarily. Pushing |
michael@0 | 189 | // by sizeof(Value) is desirable since everything on the stack is a Value. |
michael@0 | 190 | // Note that paddedLocalSlotCount() aligns to at least a Value boundary |
michael@0 | 191 | // specifically to support this. |
michael@0 | 192 | JS_ASSERT(offset >= 0); |
michael@0 | 193 | JS_ASSERT(offset % sizeof(Value) == 0); |
michael@0 | 194 | return offset; |
michael@0 | 195 | } |
michael@0 | 196 | |
michael@0 | 197 | inline int32_t ToStackOffset(const LAllocation *a) const { |
michael@0 | 198 | if (a->isArgument()) |
michael@0 | 199 | return ArgToStackOffset(a->toArgument()->index()); |
michael@0 | 200 | return SlotToStackOffset(a->toStackSlot()->slot()); |
michael@0 | 201 | } |
michael@0 | 202 | |
michael@0 | 203 | uint32_t frameSize() const { |
michael@0 | 204 | return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize(); |
michael@0 | 205 | } |
michael@0 | 206 | |
michael@0 | 207 | protected: |
michael@0 | 208 | // Ensure the cache is an IonCache while expecting the size of the derived |
michael@0 | 209 | // class. We only need the cache list at GC time. Everyone else can just take |
michael@0 | 210 | // runtimeData offsets. |
michael@0 | 211 | size_t allocateCache(const IonCache &, size_t size) { |
michael@0 | 212 | size_t dataOffset = allocateData(size); |
michael@0 | 213 | masm.propagateOOM(cacheList_.append(dataOffset)); |
michael@0 | 214 | return dataOffset; |
michael@0 | 215 | } |
michael@0 | 216 | |
michael@0 | 217 | #ifdef CHECK_OSIPOINT_REGISTERS |
michael@0 | 218 | void resetOsiPointRegs(LSafepoint *safepoint); |
michael@0 | 219 | bool shouldVerifyOsiPointRegs(LSafepoint *safepoint); |
michael@0 | 220 | void verifyOsiPointRegs(LSafepoint *safepoint); |
michael@0 | 221 | #endif |
michael@0 | 222 | |
michael@0 | 223 | public: |
michael@0 | 224 | |
michael@0 | 225 | // When appending to runtimeData_, the vector might realloc, leaving pointers |
michael@0 | 226 | // int the origianl vector stale and unusable. DataPtr acts like a pointer, |
michael@0 | 227 | // but allows safety in the face of potentially realloc'ing vector appends. |
michael@0 | 228 | friend class DataPtr; |
michael@0 | 229 | template <typename T> |
michael@0 | 230 | class DataPtr |
michael@0 | 231 | { |
michael@0 | 232 | CodeGeneratorShared *cg_; |
michael@0 | 233 | size_t index_; |
michael@0 | 234 | |
michael@0 | 235 | T *lookup() { |
michael@0 | 236 | return reinterpret_cast<T *>(&cg_->runtimeData_[index_]); |
michael@0 | 237 | } |
michael@0 | 238 | public: |
michael@0 | 239 | DataPtr(CodeGeneratorShared *cg, size_t index) |
michael@0 | 240 | : cg_(cg), index_(index) { } |
michael@0 | 241 | |
michael@0 | 242 | T * operator ->() { |
michael@0 | 243 | return lookup(); |
michael@0 | 244 | } |
michael@0 | 245 | T * operator *() { |
michael@0 | 246 | return lookup(); |
michael@0 | 247 | } |
michael@0 | 248 | }; |
michael@0 | 249 | |
michael@0 | 250 | protected: |
michael@0 | 251 | |
michael@0 | 252 | size_t allocateData(size_t size) { |
michael@0 | 253 | JS_ASSERT(size % sizeof(void *) == 0); |
michael@0 | 254 | size_t dataOffset = runtimeData_.length(); |
michael@0 | 255 | masm.propagateOOM(runtimeData_.appendN(0, size)); |
michael@0 | 256 | return dataOffset; |
michael@0 | 257 | } |
michael@0 | 258 | |
michael@0 | 259 | template <typename T> |
michael@0 | 260 | inline size_t allocateCache(const T &cache) { |
michael@0 | 261 | size_t index = allocateCache(cache, sizeof(mozilla::AlignedStorage2<T>)); |
michael@0 | 262 | if (masm.oom()) |
michael@0 | 263 | return SIZE_MAX; |
michael@0 | 264 | // Use the copy constructor on the allocated space. |
michael@0 | 265 | JS_ASSERT(index == cacheList_.back()); |
michael@0 | 266 | new (&runtimeData_[index]) T(cache); |
michael@0 | 267 | return index; |
michael@0 | 268 | } |
michael@0 | 269 | |
michael@0 | 270 | protected: |
michael@0 | 271 | // Encodes an LSnapshot into the compressed snapshot buffer, returning |
michael@0 | 272 | // false on failure. |
michael@0 | 273 | bool encode(LRecoverInfo *recover); |
michael@0 | 274 | bool encode(LSnapshot *snapshot); |
michael@0 | 275 | bool encodeAllocations(LSnapshot *snapshot, MResumePoint *resumePoint, uint32_t *startIndex); |
michael@0 | 276 | |
michael@0 | 277 | // Attempts to assign a BailoutId to a snapshot, if one isn't already set. |
michael@0 | 278 | // If the bailout table is full, this returns false, which is not a fatal |
michael@0 | 279 | // error (the code generator may use a slower bailout mechanism). |
michael@0 | 280 | bool assignBailoutId(LSnapshot *snapshot); |
michael@0 | 281 | |
michael@0 | 282 | // Encode all encountered safepoints in CG-order, and resolve |indices| for |
michael@0 | 283 | // safepoint offsets. |
michael@0 | 284 | void encodeSafepoints(); |
michael@0 | 285 | |
michael@0 | 286 | // Mark the safepoint on |ins| as corresponding to the current assembler location. |
michael@0 | 287 | // The location should be just after a call. |
michael@0 | 288 | bool markSafepoint(LInstruction *ins); |
michael@0 | 289 | bool markSafepointAt(uint32_t offset, LInstruction *ins); |
michael@0 | 290 | |
michael@0 | 291 | // Mark the OSI point |ins| as corresponding to the current |
michael@0 | 292 | // assembler location inside the |osiIndices_|. Return the assembler |
michael@0 | 293 | // location for the OSI point return location within |
michael@0 | 294 | // |returnPointOffset|. |
michael@0 | 295 | bool markOsiPoint(LOsiPoint *ins, uint32_t *returnPointOffset); |
michael@0 | 296 | |
michael@0 | 297 | // Ensure that there is enough room between the last OSI point and the |
michael@0 | 298 | // current instruction, such that: |
michael@0 | 299 | // (1) Invalidation will not overwrite the current instruction, and |
michael@0 | 300 | // (2) Overwriting the current instruction will not overwrite |
michael@0 | 301 | // an invalidation marker. |
michael@0 | 302 | void ensureOsiSpace(); |
michael@0 | 303 | |
michael@0 | 304 | OutOfLineCode *oolTruncateDouble(const FloatRegister &src, const Register &dest); |
michael@0 | 305 | bool emitTruncateDouble(const FloatRegister &src, const Register &dest); |
michael@0 | 306 | bool emitTruncateFloat32(const FloatRegister &src, const Register &dest); |
michael@0 | 307 | |
michael@0 | 308 | void emitPreBarrier(Register base, const LAllocation *index, MIRType type); |
michael@0 | 309 | void emitPreBarrier(Address address, MIRType type); |
michael@0 | 310 | |
michael@0 | 311 | inline bool isNextBlock(LBlock *block) { |
michael@0 | 312 | return current->mir()->id() + 1 == block->mir()->id(); |
michael@0 | 313 | } |
michael@0 | 314 | |
michael@0 | 315 | public: |
michael@0 | 316 | // Save and restore all volatile registers to/from the stack, excluding the |
michael@0 | 317 | // specified register(s), before a function call made using callWithABI and |
michael@0 | 318 | // after storing the function call's return value to an output register. |
michael@0 | 319 | // (The only registers that don't need to be saved/restored are 1) the |
michael@0 | 320 | // temporary register used to store the return value of the function call, |
michael@0 | 321 | // if there is one [otherwise that stored value would be overwritten]; and |
michael@0 | 322 | // 2) temporary registers whose values aren't needed in the rest of the LIR |
michael@0 | 323 | // instruction [this is purely an optimization]. All other volatiles must |
michael@0 | 324 | // be saved and restored in case future LIR instructions need those values.) |
michael@0 | 325 | void saveVolatile(Register output) { |
michael@0 | 326 | RegisterSet regs = RegisterSet::Volatile(); |
michael@0 | 327 | regs.takeUnchecked(output); |
michael@0 | 328 | masm.PushRegsInMask(regs); |
michael@0 | 329 | } |
michael@0 | 330 | void restoreVolatile(Register output) { |
michael@0 | 331 | RegisterSet regs = RegisterSet::Volatile(); |
michael@0 | 332 | regs.takeUnchecked(output); |
michael@0 | 333 | masm.PopRegsInMask(regs); |
michael@0 | 334 | } |
michael@0 | 335 | void saveVolatile(FloatRegister output) { |
michael@0 | 336 | RegisterSet regs = RegisterSet::Volatile(); |
michael@0 | 337 | regs.takeUnchecked(output); |
michael@0 | 338 | masm.PushRegsInMask(regs); |
michael@0 | 339 | } |
michael@0 | 340 | void restoreVolatile(FloatRegister output) { |
michael@0 | 341 | RegisterSet regs = RegisterSet::Volatile(); |
michael@0 | 342 | regs.takeUnchecked(output); |
michael@0 | 343 | masm.PopRegsInMask(regs); |
michael@0 | 344 | } |
michael@0 | 345 | void saveVolatile(RegisterSet temps) { |
michael@0 | 346 | masm.PushRegsInMask(RegisterSet::VolatileNot(temps)); |
michael@0 | 347 | } |
michael@0 | 348 | void restoreVolatile(RegisterSet temps) { |
michael@0 | 349 | masm.PopRegsInMask(RegisterSet::VolatileNot(temps)); |
michael@0 | 350 | } |
michael@0 | 351 | void saveVolatile() { |
michael@0 | 352 | masm.PushRegsInMask(RegisterSet::Volatile()); |
michael@0 | 353 | } |
michael@0 | 354 | void restoreVolatile() { |
michael@0 | 355 | masm.PopRegsInMask(RegisterSet::Volatile()); |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | // These functions have to be called before and after any callVM and before |
michael@0 | 359 | // any modifications of the stack. Modification of the stack made after |
michael@0 | 360 | // these calls should update the framePushed variable, needed by the exit |
michael@0 | 361 | // frame produced by callVM. |
michael@0 | 362 | inline void saveLive(LInstruction *ins); |
michael@0 | 363 | inline void restoreLive(LInstruction *ins); |
michael@0 | 364 | inline void restoreLiveIgnore(LInstruction *ins, RegisterSet reg); |
michael@0 | 365 | |
michael@0 | 366 | // Save/restore all registers that are both live and volatile. |
michael@0 | 367 | inline void saveLiveVolatile(LInstruction *ins); |
michael@0 | 368 | inline void restoreLiveVolatile(LInstruction *ins); |
michael@0 | 369 | |
michael@0 | 370 | template <typename T> |
michael@0 | 371 | void pushArg(const T &t) { |
michael@0 | 372 | masm.Push(t); |
michael@0 | 373 | #ifdef DEBUG |
michael@0 | 374 | pushedArgs_++; |
michael@0 | 375 | #endif |
michael@0 | 376 | } |
michael@0 | 377 | |
michael@0 | 378 | void storeResultTo(const Register ®) { |
michael@0 | 379 | masm.storeCallResult(reg); |
michael@0 | 380 | } |
michael@0 | 381 | |
michael@0 | 382 | void storeFloatResultTo(const FloatRegister ®) { |
michael@0 | 383 | masm.storeCallFloatResult(reg); |
michael@0 | 384 | } |
michael@0 | 385 | |
michael@0 | 386 | template <typename T> |
michael@0 | 387 | void storeResultValueTo(const T &t) { |
michael@0 | 388 | masm.storeCallResultValue(t); |
michael@0 | 389 | } |
michael@0 | 390 | |
michael@0 | 391 | bool callVM(const VMFunction &f, LInstruction *ins, const Register *dynStack = nullptr); |
michael@0 | 392 | |
michael@0 | 393 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 394 | inline OutOfLineCode *oolCallVM(const VMFunction &fun, LInstruction *ins, const ArgSeq &args, |
michael@0 | 395 | const StoreOutputTo &out); |
michael@0 | 396 | |
michael@0 | 397 | bool callVM(const VMFunctionsModal &f, LInstruction *ins, const Register *dynStack = nullptr) { |
michael@0 | 398 | return callVM(f[gen->info().executionMode()], ins, dynStack); |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 402 | inline OutOfLineCode *oolCallVM(const VMFunctionsModal &f, LInstruction *ins, |
michael@0 | 403 | const ArgSeq &args, const StoreOutputTo &out) |
michael@0 | 404 | { |
michael@0 | 405 | return oolCallVM(f[gen->info().executionMode()], ins, args, out); |
michael@0 | 406 | } |
michael@0 | 407 | |
michael@0 | 408 | bool addCache(LInstruction *lir, size_t cacheIndex); |
michael@0 | 409 | size_t addCacheLocations(const CacheLocationList &locs, size_t *numLocs); |
michael@0 | 410 | ReciprocalMulConstants computeDivisionConstants(int d); |
michael@0 | 411 | |
michael@0 | 412 | protected: |
michael@0 | 413 | bool addOutOfLineCode(OutOfLineCode *code); |
michael@0 | 414 | bool hasOutOfLineCode() { return !outOfLineCode_.empty(); } |
michael@0 | 415 | bool generateOutOfLineCode(); |
michael@0 | 416 | |
michael@0 | 417 | Label *labelForBackedgeWithImplicitCheck(MBasicBlock *mir); |
michael@0 | 418 | |
michael@0 | 419 | // Generate a jump to the start of the specified block, adding information |
michael@0 | 420 | // if this is a loop backedge. Use this in place of jumping directly to |
michael@0 | 421 | // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use |
michael@0 | 422 | // directly is needed. |
michael@0 | 423 | void jumpToBlock(MBasicBlock *mir); |
michael@0 | 424 | void jumpToBlock(MBasicBlock *mir, Assembler::Condition cond); |
michael@0 | 425 | |
michael@0 | 426 | private: |
michael@0 | 427 | void generateInvalidateEpilogue(); |
michael@0 | 428 | |
michael@0 | 429 | public: |
michael@0 | 430 | CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm); |
michael@0 | 431 | |
michael@0 | 432 | public: |
michael@0 | 433 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 434 | bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool); |
michael@0 | 435 | |
michael@0 | 436 | bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool); |
michael@0 | 437 | |
michael@0 | 438 | bool omitOverRecursedCheck() const; |
michael@0 | 439 | |
michael@0 | 440 | public: |
michael@0 | 441 | bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = nullptr); |
michael@0 | 442 | |
michael@0 | 443 | // Parallel aborts: |
michael@0 | 444 | // |
michael@0 | 445 | // Parallel aborts work somewhat differently from sequential |
michael@0 | 446 | // bailouts. When an abort occurs, we first invoke |
michael@0 | 447 | // ReportAbortPar() and then we return JS_ION_ERROR. Each |
michael@0 | 448 | // call on the stack will check for this error return and |
michael@0 | 449 | // propagate it upwards until the C++ code that invoked the ion |
michael@0 | 450 | // code is reached. |
michael@0 | 451 | // |
michael@0 | 452 | // The snapshot that is provided to `oolAbortPar` is currently |
michael@0 | 453 | // only used for error reporting, so that we can provide feedback |
michael@0 | 454 | // to the user about which instruction aborted and (perhaps) why. |
michael@0 | 455 | OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, |
michael@0 | 456 | jsbytecode *bytecode); |
michael@0 | 457 | OutOfLineAbortPar *oolAbortPar(ParallelBailoutCause cause, LInstruction *lir); |
michael@0 | 458 | OutOfLinePropagateAbortPar *oolPropagateAbortPar(LInstruction *lir); |
michael@0 | 459 | virtual bool visitOutOfLineAbortPar(OutOfLineAbortPar *ool) = 0; |
michael@0 | 460 | virtual bool visitOutOfLinePropagateAbortPar(OutOfLinePropagateAbortPar *ool) = 0; |
michael@0 | 461 | |
michael@0 | 462 | #ifdef JS_TRACE_LOGGING |
michael@0 | 463 | protected: |
michael@0 | 464 | bool emitTracelogScript(bool isStart); |
michael@0 | 465 | bool emitTracelogTree(bool isStart, uint32_t textId); |
michael@0 | 466 | |
michael@0 | 467 | public: |
michael@0 | 468 | bool emitTracelogScriptStart() { |
michael@0 | 469 | return emitTracelogScript(/* isStart =*/ true); |
michael@0 | 470 | } |
michael@0 | 471 | bool emitTracelogScriptStop() { |
michael@0 | 472 | return emitTracelogScript(/* isStart =*/ false); |
michael@0 | 473 | } |
michael@0 | 474 | bool emitTracelogStartEvent(uint32_t textId) { |
michael@0 | 475 | return emitTracelogTree(/* isStart =*/ true, textId); |
michael@0 | 476 | } |
michael@0 | 477 | bool emitTracelogStopEvent(uint32_t textId) { |
michael@0 | 478 | return emitTracelogTree(/* isStart =*/ false, textId); |
michael@0 | 479 | } |
michael@0 | 480 | #endif |
michael@0 | 481 | }; |
michael@0 | 482 | |
michael@0 | 483 | // An out-of-line path is generated at the end of the function. |
michael@0 | 484 | class OutOfLineCode : public TempObject |
michael@0 | 485 | { |
michael@0 | 486 | Label entry_; |
michael@0 | 487 | Label rejoin_; |
michael@0 | 488 | uint32_t framePushed_; |
michael@0 | 489 | jsbytecode *pc_; |
michael@0 | 490 | JSScript *script_; |
michael@0 | 491 | |
michael@0 | 492 | public: |
michael@0 | 493 | OutOfLineCode() |
michael@0 | 494 | : framePushed_(0), |
michael@0 | 495 | pc_(nullptr), |
michael@0 | 496 | script_(nullptr) |
michael@0 | 497 | { } |
michael@0 | 498 | |
michael@0 | 499 | virtual bool generate(CodeGeneratorShared *codegen) = 0; |
michael@0 | 500 | |
michael@0 | 501 | Label *entry() { |
michael@0 | 502 | return &entry_; |
michael@0 | 503 | } |
michael@0 | 504 | virtual void bind(MacroAssembler *masm) { |
michael@0 | 505 | masm->bind(entry()); |
michael@0 | 506 | } |
michael@0 | 507 | Label *rejoin() { |
michael@0 | 508 | return &rejoin_; |
michael@0 | 509 | } |
michael@0 | 510 | void setFramePushed(uint32_t framePushed) { |
michael@0 | 511 | framePushed_ = framePushed; |
michael@0 | 512 | } |
michael@0 | 513 | uint32_t framePushed() const { |
michael@0 | 514 | return framePushed_; |
michael@0 | 515 | } |
michael@0 | 516 | void setSource(JSScript *script, jsbytecode *pc) { |
michael@0 | 517 | script_ = script; |
michael@0 | 518 | pc_ = pc; |
michael@0 | 519 | } |
michael@0 | 520 | jsbytecode *pc() { |
michael@0 | 521 | return pc_; |
michael@0 | 522 | } |
michael@0 | 523 | JSScript *script() { |
michael@0 | 524 | return script_; |
michael@0 | 525 | } |
michael@0 | 526 | }; |
michael@0 | 527 | |
michael@0 | 528 | // For OOL paths that want a specific-typed code generator. |
michael@0 | 529 | template <typename T> |
michael@0 | 530 | class OutOfLineCodeBase : public OutOfLineCode |
michael@0 | 531 | { |
michael@0 | 532 | public: |
michael@0 | 533 | virtual bool generate(CodeGeneratorShared *codegen) { |
michael@0 | 534 | return accept(static_cast<T *>(codegen)); |
michael@0 | 535 | } |
michael@0 | 536 | |
michael@0 | 537 | public: |
michael@0 | 538 | virtual bool accept(T *codegen) = 0; |
michael@0 | 539 | }; |
michael@0 | 540 | |
michael@0 | 541 | // ArgSeq store arguments for OutOfLineCallVM. |
michael@0 | 542 | // |
michael@0 | 543 | // OutOfLineCallVM are created with "oolCallVM" function. The third argument of |
michael@0 | 544 | // this function is an instance of a class which provides a "generate" function |
michael@0 | 545 | // to call the "pushArg" needed by the VMFunction call. The list of argument |
michael@0 | 546 | // can be created by using the ArgList function which create an empty list of |
michael@0 | 547 | // arguments. Arguments are added to this list by using the comma operator. |
michael@0 | 548 | // The type of the argument list is returned by the comma operator, and due to |
michael@0 | 549 | // templates arguments, it is quite painful to write by hand. It is recommended |
michael@0 | 550 | // to use it directly as argument of a template function which would get its |
michael@0 | 551 | // arguments infered by the compiler (such as oolCallVM). The list of arguments |
michael@0 | 552 | // must be written in the same order as if you were calling the function in C++. |
michael@0 | 553 | // |
michael@0 | 554 | // Example: |
michael@0 | 555 | // (ArgList(), ToRegister(lir->lhs()), ToRegister(lir->rhs())) |
michael@0 | 556 | |
michael@0 | 557 | template <class SeqType, typename LastType> |
michael@0 | 558 | class ArgSeq : public SeqType |
michael@0 | 559 | { |
michael@0 | 560 | private: |
michael@0 | 561 | typedef ArgSeq<SeqType, LastType> ThisType; |
michael@0 | 562 | LastType last_; |
michael@0 | 563 | |
michael@0 | 564 | public: |
michael@0 | 565 | ArgSeq(const SeqType &seq, const LastType &last) |
michael@0 | 566 | : SeqType(seq), |
michael@0 | 567 | last_(last) |
michael@0 | 568 | { } |
michael@0 | 569 | |
michael@0 | 570 | template <typename NextType> |
michael@0 | 571 | inline ArgSeq<ThisType, NextType> |
michael@0 | 572 | operator, (const NextType &last) const { |
michael@0 | 573 | return ArgSeq<ThisType, NextType>(*this, last); |
michael@0 | 574 | } |
michael@0 | 575 | |
michael@0 | 576 | inline void generate(CodeGeneratorShared *codegen) const { |
michael@0 | 577 | codegen->pushArg(last_); |
michael@0 | 578 | this->SeqType::generate(codegen); |
michael@0 | 579 | } |
michael@0 | 580 | }; |
michael@0 | 581 | |
michael@0 | 582 | // Mark the end of an argument list. |
michael@0 | 583 | template <> |
michael@0 | 584 | class ArgSeq<void, void> |
michael@0 | 585 | { |
michael@0 | 586 | private: |
michael@0 | 587 | typedef ArgSeq<void, void> ThisType; |
michael@0 | 588 | |
michael@0 | 589 | public: |
michael@0 | 590 | ArgSeq() { } |
michael@0 | 591 | ArgSeq(const ThisType &) { } |
michael@0 | 592 | |
michael@0 | 593 | template <typename NextType> |
michael@0 | 594 | inline ArgSeq<ThisType, NextType> |
michael@0 | 595 | operator, (const NextType &last) const { |
michael@0 | 596 | return ArgSeq<ThisType, NextType>(*this, last); |
michael@0 | 597 | } |
michael@0 | 598 | |
michael@0 | 599 | inline void generate(CodeGeneratorShared *codegen) const { |
michael@0 | 600 | } |
michael@0 | 601 | }; |
michael@0 | 602 | |
michael@0 | 603 | inline ArgSeq<void, void> |
michael@0 | 604 | ArgList() |
michael@0 | 605 | { |
michael@0 | 606 | return ArgSeq<void, void>(); |
michael@0 | 607 | } |
michael@0 | 608 | |
michael@0 | 609 | // Store wrappers, to generate the right move of data after the VM call. |
michael@0 | 610 | |
michael@0 | 611 | struct StoreNothing |
michael@0 | 612 | { |
michael@0 | 613 | inline void generate(CodeGeneratorShared *codegen) const { |
michael@0 | 614 | } |
michael@0 | 615 | inline RegisterSet clobbered() const { |
michael@0 | 616 | return RegisterSet(); // No register gets clobbered |
michael@0 | 617 | } |
michael@0 | 618 | }; |
michael@0 | 619 | |
michael@0 | 620 | class StoreRegisterTo |
michael@0 | 621 | { |
michael@0 | 622 | private: |
michael@0 | 623 | Register out_; |
michael@0 | 624 | |
michael@0 | 625 | public: |
michael@0 | 626 | StoreRegisterTo(const Register &out) |
michael@0 | 627 | : out_(out) |
michael@0 | 628 | { } |
michael@0 | 629 | |
michael@0 | 630 | inline void generate(CodeGeneratorShared *codegen) const { |
michael@0 | 631 | codegen->storeResultTo(out_); |
michael@0 | 632 | } |
michael@0 | 633 | inline RegisterSet clobbered() const { |
michael@0 | 634 | RegisterSet set = RegisterSet(); |
michael@0 | 635 | set.add(out_); |
michael@0 | 636 | return set; |
michael@0 | 637 | } |
michael@0 | 638 | }; |
michael@0 | 639 | |
michael@0 | 640 | class StoreFloatRegisterTo |
michael@0 | 641 | { |
michael@0 | 642 | private: |
michael@0 | 643 | FloatRegister out_; |
michael@0 | 644 | |
michael@0 | 645 | public: |
michael@0 | 646 | StoreFloatRegisterTo(const FloatRegister &out) |
michael@0 | 647 | : out_(out) |
michael@0 | 648 | { } |
michael@0 | 649 | |
michael@0 | 650 | inline void generate(CodeGeneratorShared *codegen) const { |
michael@0 | 651 | codegen->storeFloatResultTo(out_); |
michael@0 | 652 | } |
michael@0 | 653 | inline RegisterSet clobbered() const { |
michael@0 | 654 | RegisterSet set = RegisterSet(); |
michael@0 | 655 | set.add(out_); |
michael@0 | 656 | return set; |
michael@0 | 657 | } |
michael@0 | 658 | }; |
michael@0 | 659 | |
michael@0 | 660 | template <typename Output> |
michael@0 | 661 | class StoreValueTo_ |
michael@0 | 662 | { |
michael@0 | 663 | private: |
michael@0 | 664 | Output out_; |
michael@0 | 665 | |
michael@0 | 666 | public: |
michael@0 | 667 | StoreValueTo_(const Output &out) |
michael@0 | 668 | : out_(out) |
michael@0 | 669 | { } |
michael@0 | 670 | |
michael@0 | 671 | inline void generate(CodeGeneratorShared *codegen) const { |
michael@0 | 672 | codegen->storeResultValueTo(out_); |
michael@0 | 673 | } |
michael@0 | 674 | inline RegisterSet clobbered() const { |
michael@0 | 675 | RegisterSet set = RegisterSet(); |
michael@0 | 676 | set.add(out_); |
michael@0 | 677 | return set; |
michael@0 | 678 | } |
michael@0 | 679 | }; |
michael@0 | 680 | |
michael@0 | 681 | template <typename Output> |
michael@0 | 682 | StoreValueTo_<Output> StoreValueTo(const Output &out) |
michael@0 | 683 | { |
michael@0 | 684 | return StoreValueTo_<Output>(out); |
michael@0 | 685 | } |
michael@0 | 686 | |
michael@0 | 687 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 688 | class OutOfLineCallVM : public OutOfLineCodeBase<CodeGeneratorShared> |
michael@0 | 689 | { |
michael@0 | 690 | private: |
michael@0 | 691 | LInstruction *lir_; |
michael@0 | 692 | const VMFunction &fun_; |
michael@0 | 693 | ArgSeq args_; |
michael@0 | 694 | StoreOutputTo out_; |
michael@0 | 695 | |
michael@0 | 696 | public: |
michael@0 | 697 | OutOfLineCallVM(LInstruction *lir, const VMFunction &fun, const ArgSeq &args, |
michael@0 | 698 | const StoreOutputTo &out) |
michael@0 | 699 | : lir_(lir), |
michael@0 | 700 | fun_(fun), |
michael@0 | 701 | args_(args), |
michael@0 | 702 | out_(out) |
michael@0 | 703 | { } |
michael@0 | 704 | |
michael@0 | 705 | bool accept(CodeGeneratorShared *codegen) { |
michael@0 | 706 | return codegen->visitOutOfLineCallVM(this); |
michael@0 | 707 | } |
michael@0 | 708 | |
michael@0 | 709 | LInstruction *lir() const { return lir_; } |
michael@0 | 710 | const VMFunction &function() const { return fun_; } |
michael@0 | 711 | const ArgSeq &args() const { return args_; } |
michael@0 | 712 | const StoreOutputTo &out() const { return out_; } |
michael@0 | 713 | }; |
michael@0 | 714 | |
michael@0 | 715 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 716 | inline OutOfLineCode * |
michael@0 | 717 | CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const ArgSeq &args, |
michael@0 | 718 | const StoreOutputTo &out) |
michael@0 | 719 | { |
michael@0 | 720 | OutOfLineCode *ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out); |
michael@0 | 721 | if (!addOutOfLineCode(ool)) |
michael@0 | 722 | return nullptr; |
michael@0 | 723 | return ool; |
michael@0 | 724 | } |
michael@0 | 725 | |
michael@0 | 726 | template <class ArgSeq, class StoreOutputTo> |
michael@0 | 727 | bool |
michael@0 | 728 | CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool) |
michael@0 | 729 | { |
michael@0 | 730 | LInstruction *lir = ool->lir(); |
michael@0 | 731 | |
michael@0 | 732 | saveLive(lir); |
michael@0 | 733 | ool->args().generate(this); |
michael@0 | 734 | if (!callVM(ool->function(), lir)) |
michael@0 | 735 | return false; |
michael@0 | 736 | ool->out().generate(this); |
michael@0 | 737 | restoreLiveIgnore(lir, ool->out().clobbered()); |
michael@0 | 738 | masm.jump(ool->rejoin()); |
michael@0 | 739 | return true; |
michael@0 | 740 | } |
michael@0 | 741 | |
michael@0 | 742 | // Initiate a parallel abort. The snapshot is used to record the |
michael@0 | 743 | // cause. |
michael@0 | 744 | class OutOfLineAbortPar : public OutOfLineCode |
michael@0 | 745 | { |
michael@0 | 746 | private: |
michael@0 | 747 | ParallelBailoutCause cause_; |
michael@0 | 748 | MBasicBlock *basicBlock_; |
michael@0 | 749 | jsbytecode *bytecode_; |
michael@0 | 750 | |
michael@0 | 751 | public: |
michael@0 | 752 | OutOfLineAbortPar(ParallelBailoutCause cause, MBasicBlock *basicBlock, jsbytecode *bytecode) |
michael@0 | 753 | : cause_(cause), |
michael@0 | 754 | basicBlock_(basicBlock), |
michael@0 | 755 | bytecode_(bytecode) |
michael@0 | 756 | { } |
michael@0 | 757 | |
michael@0 | 758 | ParallelBailoutCause cause() { |
michael@0 | 759 | return cause_; |
michael@0 | 760 | } |
michael@0 | 761 | |
michael@0 | 762 | MBasicBlock *basicBlock() { |
michael@0 | 763 | return basicBlock_; |
michael@0 | 764 | } |
michael@0 | 765 | |
michael@0 | 766 | jsbytecode *bytecode() { |
michael@0 | 767 | return bytecode_; |
michael@0 | 768 | } |
michael@0 | 769 | |
michael@0 | 770 | bool generate(CodeGeneratorShared *codegen); |
michael@0 | 771 | }; |
michael@0 | 772 | |
michael@0 | 773 | // Used when some callee has aborted. |
michael@0 | 774 | class OutOfLinePropagateAbortPar : public OutOfLineCode |
michael@0 | 775 | { |
michael@0 | 776 | private: |
michael@0 | 777 | LInstruction *lir_; |
michael@0 | 778 | |
michael@0 | 779 | public: |
michael@0 | 780 | OutOfLinePropagateAbortPar(LInstruction *lir) |
michael@0 | 781 | : lir_(lir) |
michael@0 | 782 | { } |
michael@0 | 783 | |
michael@0 | 784 | LInstruction *lir() { return lir_; } |
michael@0 | 785 | |
michael@0 | 786 | bool generate(CodeGeneratorShared *codegen); |
michael@0 | 787 | }; |
michael@0 | 788 | |
michael@0 | 789 | extern const VMFunction InterruptCheckInfo; |
michael@0 | 790 | |
michael@0 | 791 | } // namespace jit |
michael@0 | 792 | } // namespace js |
michael@0 | 793 | |
michael@0 | 794 | #endif /* jit_shared_CodeGenerator_shared_h */ |