Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef jit_x64_Assembler_x64_h |
michael@0 | 8 | #define jit_x64_Assembler_x64_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/ArrayUtils.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "jit/IonCode.h" |
michael@0 | 13 | #include "jit/shared/Assembler-shared.h" |
michael@0 | 14 | |
michael@0 | 15 | namespace js { |
michael@0 | 16 | namespace jit { |
michael@0 | 17 | |
michael@0 | 18 | static MOZ_CONSTEXPR_VAR Register rax = { JSC::X86Registers::eax }; |
michael@0 | 19 | static MOZ_CONSTEXPR_VAR Register rbx = { JSC::X86Registers::ebx }; |
michael@0 | 20 | static MOZ_CONSTEXPR_VAR Register rcx = { JSC::X86Registers::ecx }; |
michael@0 | 21 | static MOZ_CONSTEXPR_VAR Register rdx = { JSC::X86Registers::edx }; |
michael@0 | 22 | static MOZ_CONSTEXPR_VAR Register rsi = { JSC::X86Registers::esi }; |
michael@0 | 23 | static MOZ_CONSTEXPR_VAR Register rdi = { JSC::X86Registers::edi }; |
michael@0 | 24 | static MOZ_CONSTEXPR_VAR Register rbp = { JSC::X86Registers::ebp }; |
michael@0 | 25 | static MOZ_CONSTEXPR_VAR Register r8 = { JSC::X86Registers::r8 }; |
michael@0 | 26 | static MOZ_CONSTEXPR_VAR Register r9 = { JSC::X86Registers::r9 }; |
michael@0 | 27 | static MOZ_CONSTEXPR_VAR Register r10 = { JSC::X86Registers::r10 }; |
michael@0 | 28 | static MOZ_CONSTEXPR_VAR Register r11 = { JSC::X86Registers::r11 }; |
michael@0 | 29 | static MOZ_CONSTEXPR_VAR Register r12 = { JSC::X86Registers::r12 }; |
michael@0 | 30 | static MOZ_CONSTEXPR_VAR Register r13 = { JSC::X86Registers::r13 }; |
michael@0 | 31 | static MOZ_CONSTEXPR_VAR Register r14 = { JSC::X86Registers::r14 }; |
michael@0 | 32 | static MOZ_CONSTEXPR_VAR Register r15 = { JSC::X86Registers::r15 }; |
michael@0 | 33 | static MOZ_CONSTEXPR_VAR Register rsp = { JSC::X86Registers::esp }; |
michael@0 | 34 | |
michael@0 | 35 | static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { JSC::X86Registers::xmm0 }; |
michael@0 | 36 | static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { JSC::X86Registers::xmm1 }; |
michael@0 | 37 | static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { JSC::X86Registers::xmm2 }; |
michael@0 | 38 | static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { JSC::X86Registers::xmm3 }; |
michael@0 | 39 | static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { JSC::X86Registers::xmm4 }; |
michael@0 | 40 | static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { JSC::X86Registers::xmm5 }; |
michael@0 | 41 | static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { JSC::X86Registers::xmm6 }; |
michael@0 | 42 | static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { JSC::X86Registers::xmm7 }; |
michael@0 | 43 | static MOZ_CONSTEXPR_VAR FloatRegister xmm8 = { JSC::X86Registers::xmm8 }; |
michael@0 | 44 | static MOZ_CONSTEXPR_VAR FloatRegister xmm9 = { JSC::X86Registers::xmm9 }; |
michael@0 | 45 | static MOZ_CONSTEXPR_VAR FloatRegister xmm10 = { JSC::X86Registers::xmm10 }; |
michael@0 | 46 | static MOZ_CONSTEXPR_VAR FloatRegister xmm11 = { JSC::X86Registers::xmm11 }; |
michael@0 | 47 | static MOZ_CONSTEXPR_VAR FloatRegister xmm12 = { JSC::X86Registers::xmm12 }; |
michael@0 | 48 | static MOZ_CONSTEXPR_VAR FloatRegister xmm13 = { JSC::X86Registers::xmm13 }; |
michael@0 | 49 | static MOZ_CONSTEXPR_VAR FloatRegister xmm14 = { JSC::X86Registers::xmm14 }; |
michael@0 | 50 | static MOZ_CONSTEXPR_VAR FloatRegister xmm15 = { JSC::X86Registers::xmm15 }; |
michael@0 | 51 | |
michael@0 | 52 | // X86-common synonyms. |
michael@0 | 53 | static MOZ_CONSTEXPR_VAR Register eax = rax; |
michael@0 | 54 | static MOZ_CONSTEXPR_VAR Register ebx = rbx; |
michael@0 | 55 | static MOZ_CONSTEXPR_VAR Register ecx = rcx; |
michael@0 | 56 | static MOZ_CONSTEXPR_VAR Register edx = rdx; |
michael@0 | 57 | static MOZ_CONSTEXPR_VAR Register esi = rsi; |
michael@0 | 58 | static MOZ_CONSTEXPR_VAR Register edi = rdi; |
michael@0 | 59 | static MOZ_CONSTEXPR_VAR Register ebp = rbp; |
michael@0 | 60 | static MOZ_CONSTEXPR_VAR Register esp = rsp; |
michael@0 | 61 | |
michael@0 | 62 | static MOZ_CONSTEXPR_VAR Register InvalidReg = { JSC::X86Registers::invalid_reg }; |
michael@0 | 63 | static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { JSC::X86Registers::invalid_xmm }; |
michael@0 | 64 | |
michael@0 | 65 | static MOZ_CONSTEXPR_VAR Register StackPointer = rsp; |
michael@0 | 66 | static MOZ_CONSTEXPR_VAR Register FramePointer = rbp; |
michael@0 | 67 | static MOZ_CONSTEXPR_VAR Register JSReturnReg = rcx; |
michael@0 | 68 | // Avoid, except for assertions. |
michael@0 | 69 | static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = JSReturnReg; |
michael@0 | 70 | static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = JSReturnReg; |
michael@0 | 71 | |
michael@0 | 72 | static MOZ_CONSTEXPR_VAR Register ReturnReg = rax; |
michael@0 | 73 | static MOZ_CONSTEXPR_VAR Register ScratchReg = r11; |
michael@0 | 74 | static MOZ_CONSTEXPR_VAR Register HeapReg = r15; |
michael@0 | 75 | static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = xmm0; |
michael@0 | 76 | static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = xmm15; |
michael@0 | 77 | |
michael@0 | 78 | // Avoid rbp, which is the FramePointer, which is unavailable in some modes. |
michael@0 | 79 | static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = r8; |
michael@0 | 80 | static MOZ_CONSTEXPR_VAR Register CallTempReg0 = rax; |
michael@0 | 81 | static MOZ_CONSTEXPR_VAR Register CallTempReg1 = rdi; |
michael@0 | 82 | static MOZ_CONSTEXPR_VAR Register CallTempReg2 = rbx; |
michael@0 | 83 | static MOZ_CONSTEXPR_VAR Register CallTempReg3 = rcx; |
michael@0 | 84 | static MOZ_CONSTEXPR_VAR Register CallTempReg4 = rsi; |
michael@0 | 85 | static MOZ_CONSTEXPR_VAR Register CallTempReg5 = rdx; |
michael@0 | 86 | |
michael@0 | 87 | // Different argument registers for WIN64 |
michael@0 | 88 | #if defined(_WIN64) |
michael@0 | 89 | static MOZ_CONSTEXPR_VAR Register IntArgReg0 = rcx; |
michael@0 | 90 | static MOZ_CONSTEXPR_VAR Register IntArgReg1 = rdx; |
michael@0 | 91 | static MOZ_CONSTEXPR_VAR Register IntArgReg2 = r8; |
michael@0 | 92 | static MOZ_CONSTEXPR_VAR Register IntArgReg3 = r9; |
michael@0 | 93 | static MOZ_CONSTEXPR_VAR uint32_t NumIntArgRegs = 4; |
michael@0 | 94 | static MOZ_CONSTEXPR_VAR Register IntArgRegs[NumIntArgRegs] = { rcx, rdx, r8, r9 }; |
michael@0 | 95 | |
michael@0 | 96 | static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { rax, rdi, rbx, rsi }; |
michael@0 | 97 | static const uint32_t NumCallTempNonArgRegs = |
michael@0 | 98 | mozilla::ArrayLength(CallTempNonArgRegs); |
michael@0 | 99 | |
michael@0 | 100 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg0 = xmm0; |
michael@0 | 101 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg1 = xmm1; |
michael@0 | 102 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg2 = xmm2; |
michael@0 | 103 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg3 = xmm3; |
michael@0 | 104 | static const uint32_t NumFloatArgRegs = 4; |
michael@0 | 105 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3 }; |
michael@0 | 106 | #else |
michael@0 | 107 | static MOZ_CONSTEXPR_VAR Register IntArgReg0 = rdi; |
michael@0 | 108 | static MOZ_CONSTEXPR_VAR Register IntArgReg1 = rsi; |
michael@0 | 109 | static MOZ_CONSTEXPR_VAR Register IntArgReg2 = rdx; |
michael@0 | 110 | static MOZ_CONSTEXPR_VAR Register IntArgReg3 = rcx; |
michael@0 | 111 | static MOZ_CONSTEXPR_VAR Register IntArgReg4 = r8; |
michael@0 | 112 | static MOZ_CONSTEXPR_VAR Register IntArgReg5 = r9; |
michael@0 | 113 | static MOZ_CONSTEXPR_VAR uint32_t NumIntArgRegs = 6; |
michael@0 | 114 | static MOZ_CONSTEXPR_VAR Register IntArgRegs[NumIntArgRegs] = { rdi, rsi, rdx, rcx, r8, r9 }; |
michael@0 | 115 | |
michael@0 | 116 | static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { rax, rbx }; |
michael@0 | 117 | static const uint32_t NumCallTempNonArgRegs = |
michael@0 | 118 | mozilla::ArrayLength(CallTempNonArgRegs); |
michael@0 | 119 | |
michael@0 | 120 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg0 = xmm0; |
michael@0 | 121 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg1 = xmm1; |
michael@0 | 122 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg2 = xmm2; |
michael@0 | 123 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg3 = xmm3; |
michael@0 | 124 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg4 = xmm4; |
michael@0 | 125 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg5 = xmm5; |
michael@0 | 126 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg6 = xmm6; |
michael@0 | 127 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgReg7 = xmm7; |
michael@0 | 128 | static MOZ_CONSTEXPR_VAR uint32_t NumFloatArgRegs = 8; |
michael@0 | 129 | static MOZ_CONSTEXPR_VAR FloatRegister FloatArgRegs[NumFloatArgRegs] = { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 }; |
michael@0 | 130 | #endif |
michael@0 | 131 | |
michael@0 | 132 | // The convention used by the ForkJoinGetSlice stub. None of these can be rax |
michael@0 | 133 | // or rdx, which the stub also needs for cmpxchg and div, respectively. |
michael@0 | 134 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_cx = rdi; |
michael@0 | 135 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp0 = rbx; |
michael@0 | 136 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp1 = rcx; |
michael@0 | 137 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_output = rsi; |
michael@0 | 138 | |
michael@0 | 139 | // Registers used in the GenerateFFIIonExit Enable Activation block. |
michael@0 | 140 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = r10; |
michael@0 | 141 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = rax; |
michael@0 | 142 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = rdi; |
michael@0 | 143 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = rbx; |
michael@0 | 144 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = rsi; |
michael@0 | 145 | |
michael@0 | 146 | // Registers used in the GenerateFFIIonExit Disable Activation block. |
michael@0 | 147 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = ecx; |
michael@0 | 148 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = ecx; |
michael@0 | 149 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = rax; |
michael@0 | 150 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = rdi; |
michael@0 | 151 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = rbx; |
michael@0 | 152 | |
michael@0 | 153 | class ABIArgGenerator |
michael@0 | 154 | { |
michael@0 | 155 | #if defined(XP_WIN) |
michael@0 | 156 | unsigned regIndex_; |
michael@0 | 157 | #else |
michael@0 | 158 | unsigned intRegIndex_; |
michael@0 | 159 | unsigned floatRegIndex_; |
michael@0 | 160 | #endif |
michael@0 | 161 | uint32_t stackOffset_; |
michael@0 | 162 | ABIArg current_; |
michael@0 | 163 | |
michael@0 | 164 | public: |
michael@0 | 165 | ABIArgGenerator(); |
michael@0 | 166 | ABIArg next(MIRType argType); |
michael@0 | 167 | ABIArg ¤t() { return current_; } |
michael@0 | 168 | uint32_t stackBytesConsumedSoFar() const { return stackOffset_; } |
michael@0 | 169 | |
michael@0 | 170 | // Note: these registers are all guaranteed to be different |
michael@0 | 171 | static const Register NonArgReturnVolatileReg0; |
michael@0 | 172 | static const Register NonArgReturnVolatileReg1; |
michael@0 | 173 | static const Register NonVolatileReg; |
michael@0 | 174 | }; |
michael@0 | 175 | |
michael@0 | 176 | static MOZ_CONSTEXPR_VAR Register OsrFrameReg = IntArgReg3; |
michael@0 | 177 | |
michael@0 | 178 | static MOZ_CONSTEXPR_VAR Register PreBarrierReg = rdx; |
michael@0 | 179 | |
michael@0 | 180 | // GCC stack is aligned on 16 bytes, but we don't maintain the invariant in |
michael@0 | 181 | // jitted code. |
michael@0 | 182 | static const uint32_t StackAlignment = 16; |
michael@0 | 183 | static const bool StackKeptAligned = false; |
michael@0 | 184 | static const uint32_t CodeAlignment = 8; |
michael@0 | 185 | static const uint32_t NativeFrameSize = sizeof(void*); |
michael@0 | 186 | static const uint32_t AlignmentAtPrologue = sizeof(void*); |
michael@0 | 187 | static const uint32_t AlignmentMidPrologue = AlignmentAtPrologue; |
michael@0 | 188 | |
michael@0 | 189 | static const Scale ScalePointer = TimesEight; |
michael@0 | 190 | |
michael@0 | 191 | } // namespace jit |
michael@0 | 192 | } // namespace js |
michael@0 | 193 | |
michael@0 | 194 | #include "jit/shared/Assembler-x86-shared.h" |
michael@0 | 195 | |
michael@0 | 196 | namespace js { |
michael@0 | 197 | namespace jit { |
michael@0 | 198 | |
michael@0 | 199 | // Return operand from a JS -> JS call. |
michael@0 | 200 | static MOZ_CONSTEXPR_VAR ValueOperand JSReturnOperand = ValueOperand(JSReturnReg); |
michael@0 | 201 | |
michael@0 | 202 | class Assembler : public AssemblerX86Shared |
michael@0 | 203 | { |
michael@0 | 204 | // x64 jumps may need extra bits of relocation, because a jump may extend |
michael@0 | 205 | // beyond the signed 32-bit range. To account for this we add an extended |
michael@0 | 206 | // jump table at the bottom of the instruction stream, and if a jump |
michael@0 | 207 | // overflows its range, it will redirect here. |
michael@0 | 208 | // |
michael@0 | 209 | // In our relocation table, we store two offsets instead of one: the offset |
michael@0 | 210 | // to the original jump, and an offset to the extended jump if we will need |
michael@0 | 211 | // to use it instead. The offsets are stored as: |
michael@0 | 212 | // [unsigned] Unsigned offset to short jump, from the start of the code. |
michael@0 | 213 | // [unsigned] Unsigned offset to the extended jump, from the start of |
michael@0 | 214 | // the jump table, in units of SizeOfJumpTableEntry. |
michael@0 | 215 | // |
michael@0 | 216 | // The start of the relocation table contains the offset from the code |
michael@0 | 217 | // buffer to the start of the extended jump table. |
michael@0 | 218 | // |
michael@0 | 219 | // Each entry in this table is a jmp [rip], followed by a ud2 to hint to the |
michael@0 | 220 | // hardware branch predictor that there is no fallthrough, followed by the |
michael@0 | 221 | // eight bytes containing an immediate address. This comes out to 16 bytes. |
michael@0 | 222 | // +1 byte for opcode |
michael@0 | 223 | // +1 byte for mod r/m |
michael@0 | 224 | // +4 bytes for rip-relative offset (2) |
michael@0 | 225 | // +2 bytes for ud2 instruction |
michael@0 | 226 | // +8 bytes for 64-bit address |
michael@0 | 227 | // |
michael@0 | 228 | static const uint32_t SizeOfExtendedJump = 1 + 1 + 4 + 2 + 8; |
michael@0 | 229 | static const uint32_t SizeOfJumpTableEntry = 16; |
michael@0 | 230 | |
michael@0 | 231 | uint32_t extendedJumpTable_; |
michael@0 | 232 | |
michael@0 | 233 | static JitCode *CodeFromJump(JitCode *code, uint8_t *jump); |
michael@0 | 234 | |
michael@0 | 235 | private: |
michael@0 | 236 | void writeRelocation(JmpSrc src, Relocation::Kind reloc); |
michael@0 | 237 | void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc); |
michael@0 | 238 | |
michael@0 | 239 | protected: |
michael@0 | 240 | size_t addPatchableJump(JmpSrc src, Relocation::Kind reloc); |
michael@0 | 241 | |
michael@0 | 242 | public: |
michael@0 | 243 | using AssemblerX86Shared::j; |
michael@0 | 244 | using AssemblerX86Shared::jmp; |
michael@0 | 245 | using AssemblerX86Shared::push; |
michael@0 | 246 | using AssemblerX86Shared::pop; |
michael@0 | 247 | |
michael@0 | 248 | static uint8_t *PatchableJumpAddress(JitCode *code, size_t index); |
michael@0 | 249 | static void PatchJumpEntry(uint8_t *entry, uint8_t *target); |
michael@0 | 250 | |
michael@0 | 251 | Assembler() |
michael@0 | 252 | : extendedJumpTable_(0) |
michael@0 | 253 | { |
michael@0 | 254 | } |
michael@0 | 255 | |
michael@0 | 256 | static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader); |
michael@0 | 257 | |
michael@0 | 258 | // The buffer is about to be linked, make sure any constant pools or excess |
michael@0 | 259 | // bookkeeping has been flushed to the instruction stream. |
michael@0 | 260 | void finish(); |
michael@0 | 261 | |
michael@0 | 262 | // Copy the assembly code to the given buffer, and perform any pending |
michael@0 | 263 | // relocations relying on the target address. |
michael@0 | 264 | void executableCopy(uint8_t *buffer); |
michael@0 | 265 | |
michael@0 | 266 | // Actual assembly emitting functions. |
michael@0 | 267 | |
michael@0 | 268 | void push(const ImmGCPtr ptr) { |
michael@0 | 269 | movq(ptr, ScratchReg); |
michael@0 | 270 | push(ScratchReg); |
michael@0 | 271 | } |
michael@0 | 272 | void push(const ImmWord ptr) { |
michael@0 | 273 | // We often end up with ImmWords that actually fit into int32. |
michael@0 | 274 | // Be aware of the sign extension behavior. |
michael@0 | 275 | if (ptr.value <= INT32_MAX) { |
michael@0 | 276 | push(Imm32(ptr.value)); |
michael@0 | 277 | } else { |
michael@0 | 278 | movq(ptr, ScratchReg); |
michael@0 | 279 | push(ScratchReg); |
michael@0 | 280 | } |
michael@0 | 281 | } |
michael@0 | 282 | void push(const ImmPtr &imm) { |
michael@0 | 283 | push(ImmWord(uintptr_t(imm.value))); |
michael@0 | 284 | } |
michael@0 | 285 | void push(const FloatRegister &src) { |
michael@0 | 286 | subq(Imm32(sizeof(double)), StackPointer); |
michael@0 | 287 | movsd(src, Address(StackPointer, 0)); |
michael@0 | 288 | } |
michael@0 | 289 | CodeOffsetLabel pushWithPatch(const ImmWord &word) { |
michael@0 | 290 | CodeOffsetLabel label = movWithPatch(word, ScratchReg); |
michael@0 | 291 | push(ScratchReg); |
michael@0 | 292 | return label; |
michael@0 | 293 | } |
michael@0 | 294 | |
michael@0 | 295 | void pop(const FloatRegister &src) { |
michael@0 | 296 | movsd(Address(StackPointer, 0), src); |
michael@0 | 297 | addq(Imm32(sizeof(double)), StackPointer); |
michael@0 | 298 | } |
michael@0 | 299 | |
michael@0 | 300 | CodeOffsetLabel movWithPatch(const ImmWord &word, const Register &dest) { |
michael@0 | 301 | masm.movq_i64r(word.value, dest.code()); |
michael@0 | 302 | return masm.currentOffset(); |
michael@0 | 303 | } |
michael@0 | 304 | CodeOffsetLabel movWithPatch(const ImmPtr &imm, const Register &dest) { |
michael@0 | 305 | return movWithPatch(ImmWord(uintptr_t(imm.value)), dest); |
michael@0 | 306 | } |
michael@0 | 307 | |
michael@0 | 308 | // Load an ImmWord value into a register. Note that this instruction will |
michael@0 | 309 | // attempt to optimize its immediate field size. When a full 64-bit |
michael@0 | 310 | // immediate is needed for a relocation, use movWithPatch. |
michael@0 | 311 | void movq(ImmWord word, const Register &dest) { |
michael@0 | 312 | // Load a 64-bit immediate into a register. If the value falls into |
michael@0 | 313 | // certain ranges, we can use specialized instructions which have |
michael@0 | 314 | // smaller encodings. |
michael@0 | 315 | if (word.value <= UINT32_MAX) { |
michael@0 | 316 | // movl has a 32-bit unsigned (effectively) immediate field. |
michael@0 | 317 | masm.movl_i32r((uint32_t)word.value, dest.code()); |
michael@0 | 318 | } else if ((intptr_t)word.value >= INT32_MIN && (intptr_t)word.value <= INT32_MAX) { |
michael@0 | 319 | // movq has a 32-bit signed immediate field. |
michael@0 | 320 | masm.movq_i32r((int32_t)(intptr_t)word.value, dest.code()); |
michael@0 | 321 | } else { |
michael@0 | 322 | // Otherwise use movabs. |
michael@0 | 323 | masm.movq_i64r(word.value, dest.code()); |
michael@0 | 324 | } |
michael@0 | 325 | } |
michael@0 | 326 | void movq(ImmPtr imm, const Register &dest) { |
michael@0 | 327 | movq(ImmWord(uintptr_t(imm.value)), dest); |
michael@0 | 328 | } |
michael@0 | 329 | void movq(ImmGCPtr ptr, const Register &dest) { |
michael@0 | 330 | masm.movq_i64r(ptr.value, dest.code()); |
michael@0 | 331 | writeDataRelocation(ptr); |
michael@0 | 332 | } |
michael@0 | 333 | void movq(const Operand &src, const Register &dest) { |
michael@0 | 334 | switch (src.kind()) { |
michael@0 | 335 | case Operand::REG: |
michael@0 | 336 | masm.movq_rr(src.reg(), dest.code()); |
michael@0 | 337 | break; |
michael@0 | 338 | case Operand::MEM_REG_DISP: |
michael@0 | 339 | masm.movq_mr(src.disp(), src.base(), dest.code()); |
michael@0 | 340 | break; |
michael@0 | 341 | case Operand::MEM_SCALE: |
michael@0 | 342 | masm.movq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code()); |
michael@0 | 343 | break; |
michael@0 | 344 | case Operand::MEM_ADDRESS32: |
michael@0 | 345 | masm.movq_mr(src.address(), dest.code()); |
michael@0 | 346 | break; |
michael@0 | 347 | default: |
michael@0 | 348 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 349 | } |
michael@0 | 350 | } |
michael@0 | 351 | void movq(const Register &src, const Operand &dest) { |
michael@0 | 352 | switch (dest.kind()) { |
michael@0 | 353 | case Operand::REG: |
michael@0 | 354 | masm.movq_rr(src.code(), dest.reg()); |
michael@0 | 355 | break; |
michael@0 | 356 | case Operand::MEM_REG_DISP: |
michael@0 | 357 | masm.movq_rm(src.code(), dest.disp(), dest.base()); |
michael@0 | 358 | break; |
michael@0 | 359 | case Operand::MEM_SCALE: |
michael@0 | 360 | masm.movq_rm(src.code(), dest.disp(), dest.base(), dest.index(), dest.scale()); |
michael@0 | 361 | break; |
michael@0 | 362 | case Operand::MEM_ADDRESS32: |
michael@0 | 363 | masm.movq_rm(src.code(), dest.address()); |
michael@0 | 364 | break; |
michael@0 | 365 | default: |
michael@0 | 366 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 367 | } |
michael@0 | 368 | } |
michael@0 | 369 | void movq(Imm32 imm32, const Operand &dest) { |
michael@0 | 370 | switch (dest.kind()) { |
michael@0 | 371 | case Operand::REG: |
michael@0 | 372 | masm.movl_i32r(imm32.value, dest.reg()); |
michael@0 | 373 | break; |
michael@0 | 374 | case Operand::MEM_REG_DISP: |
michael@0 | 375 | masm.movq_i32m(imm32.value, dest.disp(), dest.base()); |
michael@0 | 376 | break; |
michael@0 | 377 | case Operand::MEM_SCALE: |
michael@0 | 378 | masm.movq_i32m(imm32.value, dest.disp(), dest.base(), dest.index(), dest.scale()); |
michael@0 | 379 | break; |
michael@0 | 380 | case Operand::MEM_ADDRESS32: |
michael@0 | 381 | masm.movq_i32m(imm32.value, dest.address()); |
michael@0 | 382 | break; |
michael@0 | 383 | default: |
michael@0 | 384 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 385 | } |
michael@0 | 386 | } |
michael@0 | 387 | void movq(const Register &src, const FloatRegister &dest) { |
michael@0 | 388 | masm.movq_rr(src.code(), dest.code()); |
michael@0 | 389 | } |
michael@0 | 390 | void movq(const FloatRegister &src, const Register &dest) { |
michael@0 | 391 | masm.movq_rr(src.code(), dest.code()); |
michael@0 | 392 | } |
michael@0 | 393 | void movq(const Register &src, const Register &dest) { |
michael@0 | 394 | masm.movq_rr(src.code(), dest.code()); |
michael@0 | 395 | } |
michael@0 | 396 | |
michael@0 | 397 | void xchgq(const Register &src, const Register &dest) { |
michael@0 | 398 | masm.xchgq_rr(src.code(), dest.code()); |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | void andq(const Register &src, const Register &dest) { |
michael@0 | 402 | masm.andq_rr(src.code(), dest.code()); |
michael@0 | 403 | } |
michael@0 | 404 | void andq(Imm32 imm, const Register &dest) { |
michael@0 | 405 | masm.andq_ir(imm.value, dest.code()); |
michael@0 | 406 | } |
michael@0 | 407 | |
michael@0 | 408 | void addq(Imm32 imm, const Register &dest) { |
michael@0 | 409 | masm.addq_ir(imm.value, dest.code()); |
michael@0 | 410 | } |
michael@0 | 411 | void addq(Imm32 imm, const Operand &dest) { |
michael@0 | 412 | switch (dest.kind()) { |
michael@0 | 413 | case Operand::REG: |
michael@0 | 414 | masm.addq_ir(imm.value, dest.reg()); |
michael@0 | 415 | break; |
michael@0 | 416 | case Operand::MEM_REG_DISP: |
michael@0 | 417 | masm.addq_im(imm.value, dest.disp(), dest.base()); |
michael@0 | 418 | break; |
michael@0 | 419 | case Operand::MEM_ADDRESS32: |
michael@0 | 420 | masm.addq_im(imm.value, dest.address()); |
michael@0 | 421 | break; |
michael@0 | 422 | default: |
michael@0 | 423 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 424 | } |
michael@0 | 425 | } |
michael@0 | 426 | void addq(const Register &src, const Register &dest) { |
michael@0 | 427 | masm.addq_rr(src.code(), dest.code()); |
michael@0 | 428 | } |
michael@0 | 429 | void addq(const Operand &src, const Register &dest) { |
michael@0 | 430 | switch (src.kind()) { |
michael@0 | 431 | case Operand::REG: |
michael@0 | 432 | masm.addq_rr(src.reg(), dest.code()); |
michael@0 | 433 | break; |
michael@0 | 434 | case Operand::MEM_REG_DISP: |
michael@0 | 435 | masm.addq_mr(src.disp(), src.base(), dest.code()); |
michael@0 | 436 | break; |
michael@0 | 437 | case Operand::MEM_ADDRESS32: |
michael@0 | 438 | masm.addq_mr(src.address(), dest.code()); |
michael@0 | 439 | break; |
michael@0 | 440 | default: |
michael@0 | 441 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 442 | } |
michael@0 | 443 | } |
michael@0 | 444 | |
michael@0 | 445 | void subq(Imm32 imm, const Register &dest) { |
michael@0 | 446 | masm.subq_ir(imm.value, dest.code()); |
michael@0 | 447 | } |
michael@0 | 448 | void subq(const Register &src, const Register &dest) { |
michael@0 | 449 | masm.subq_rr(src.code(), dest.code()); |
michael@0 | 450 | } |
michael@0 | 451 | void subq(const Operand &src, const Register &dest) { |
michael@0 | 452 | switch (src.kind()) { |
michael@0 | 453 | case Operand::REG: |
michael@0 | 454 | masm.subq_rr(src.reg(), dest.code()); |
michael@0 | 455 | break; |
michael@0 | 456 | case Operand::MEM_REG_DISP: |
michael@0 | 457 | masm.subq_mr(src.disp(), src.base(), dest.code()); |
michael@0 | 458 | break; |
michael@0 | 459 | case Operand::MEM_ADDRESS32: |
michael@0 | 460 | masm.subq_mr(src.address(), dest.code()); |
michael@0 | 461 | break; |
michael@0 | 462 | default: |
michael@0 | 463 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 464 | } |
michael@0 | 465 | } |
michael@0 | 466 | void subq(const Register &src, const Operand &dest) { |
michael@0 | 467 | switch (dest.kind()) { |
michael@0 | 468 | case Operand::REG: |
michael@0 | 469 | masm.subq_rr(src.code(), dest.reg()); |
michael@0 | 470 | break; |
michael@0 | 471 | case Operand::MEM_REG_DISP: |
michael@0 | 472 | masm.subq_rm(src.code(), dest.disp(), dest.base()); |
michael@0 | 473 | break; |
michael@0 | 474 | default: |
michael@0 | 475 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 476 | } |
michael@0 | 477 | } |
michael@0 | 478 | void shlq(Imm32 imm, const Register &dest) { |
michael@0 | 479 | masm.shlq_i8r(imm.value, dest.code()); |
michael@0 | 480 | } |
michael@0 | 481 | void shrq(Imm32 imm, const Register &dest) { |
michael@0 | 482 | masm.shrq_i8r(imm.value, dest.code()); |
michael@0 | 483 | } |
michael@0 | 484 | void sarq(Imm32 imm, const Register &dest) { |
michael@0 | 485 | masm.sarq_i8r(imm.value, dest.code()); |
michael@0 | 486 | } |
michael@0 | 487 | void orq(Imm32 imm, const Register &dest) { |
michael@0 | 488 | masm.orq_ir(imm.value, dest.code()); |
michael@0 | 489 | } |
michael@0 | 490 | void orq(const Register &src, const Register &dest) { |
michael@0 | 491 | masm.orq_rr(src.code(), dest.code()); |
michael@0 | 492 | } |
michael@0 | 493 | void orq(const Operand &src, const Register &dest) { |
michael@0 | 494 | switch (src.kind()) { |
michael@0 | 495 | case Operand::REG: |
michael@0 | 496 | masm.orq_rr(src.reg(), dest.code()); |
michael@0 | 497 | break; |
michael@0 | 498 | case Operand::MEM_REG_DISP: |
michael@0 | 499 | masm.orq_mr(src.disp(), src.base(), dest.code()); |
michael@0 | 500 | break; |
michael@0 | 501 | case Operand::MEM_ADDRESS32: |
michael@0 | 502 | masm.orq_mr(src.address(), dest.code()); |
michael@0 | 503 | break; |
michael@0 | 504 | default: |
michael@0 | 505 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 506 | } |
michael@0 | 507 | } |
michael@0 | 508 | void xorq(const Register &src, const Register &dest) { |
michael@0 | 509 | masm.xorq_rr(src.code(), dest.code()); |
michael@0 | 510 | } |
michael@0 | 511 | void xorq(Imm32 imm, const Register &dest) { |
michael@0 | 512 | masm.xorq_ir(imm.value, dest.code()); |
michael@0 | 513 | } |
michael@0 | 514 | |
michael@0 | 515 | void mov(ImmWord word, const Register &dest) { |
michael@0 | 516 | // Use xor for setting registers to zero, as it is specially optimized |
michael@0 | 517 | // for this purpose on modern hardware. Note that it does clobber FLAGS |
michael@0 | 518 | // though. Use xorl instead of xorq since they are functionally |
michael@0 | 519 | // equivalent (32-bit instructions zero-extend their results to 64 bits) |
michael@0 | 520 | // and xorl has a smaller encoding. |
michael@0 | 521 | if (word.value == 0) |
michael@0 | 522 | xorl(dest, dest); |
michael@0 | 523 | else |
michael@0 | 524 | movq(word, dest); |
michael@0 | 525 | } |
michael@0 | 526 | void mov(ImmPtr imm, const Register &dest) { |
michael@0 | 527 | movq(imm, dest); |
michael@0 | 528 | } |
michael@0 | 529 | void mov(AsmJSImmPtr imm, const Register &dest) { |
michael@0 | 530 | masm.movq_i64r(-1, dest.code()); |
michael@0 | 531 | enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), imm.kind())); |
michael@0 | 532 | } |
michael@0 | 533 | void mov(const Operand &src, const Register &dest) { |
michael@0 | 534 | movq(src, dest); |
michael@0 | 535 | } |
michael@0 | 536 | void mov(const Register &src, const Operand &dest) { |
michael@0 | 537 | movq(src, dest); |
michael@0 | 538 | } |
michael@0 | 539 | void mov(const Imm32 &imm32, const Operand &dest) { |
michael@0 | 540 | movq(imm32, dest); |
michael@0 | 541 | } |
michael@0 | 542 | void mov(const Register &src, const Register &dest) { |
michael@0 | 543 | movq(src, dest); |
michael@0 | 544 | } |
michael@0 | 545 | void mov(AbsoluteLabel *label, const Register &dest) { |
michael@0 | 546 | JS_ASSERT(!label->bound()); |
michael@0 | 547 | // Thread the patch list through the unpatched address word in the |
michael@0 | 548 | // instruction stream. |
michael@0 | 549 | masm.movq_i64r(label->prev(), dest.code()); |
michael@0 | 550 | label->setPrev(masm.size()); |
michael@0 | 551 | } |
michael@0 | 552 | void xchg(const Register &src, const Register &dest) { |
michael@0 | 553 | xchgq(src, dest); |
michael@0 | 554 | } |
michael@0 | 555 | void lea(const Operand &src, const Register &dest) { |
michael@0 | 556 | switch (src.kind()) { |
michael@0 | 557 | case Operand::MEM_REG_DISP: |
michael@0 | 558 | masm.leaq_mr(src.disp(), src.base(), dest.code()); |
michael@0 | 559 | break; |
michael@0 | 560 | case Operand::MEM_SCALE: |
michael@0 | 561 | masm.leaq_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code()); |
michael@0 | 562 | break; |
michael@0 | 563 | default: |
michael@0 | 564 | MOZ_ASSUME_UNREACHABLE("unexepcted operand kind"); |
michael@0 | 565 | } |
michael@0 | 566 | } |
michael@0 | 567 | |
michael@0 | 568 | CodeOffsetLabel loadRipRelativeInt32(const Register &dest) { |
michael@0 | 569 | return CodeOffsetLabel(masm.movl_ripr(dest.code()).offset()); |
michael@0 | 570 | } |
michael@0 | 571 | CodeOffsetLabel loadRipRelativeInt64(const Register &dest) { |
michael@0 | 572 | return CodeOffsetLabel(masm.movq_ripr(dest.code()).offset()); |
michael@0 | 573 | } |
michael@0 | 574 | CodeOffsetLabel loadRipRelativeDouble(const FloatRegister &dest) { |
michael@0 | 575 | return CodeOffsetLabel(masm.movsd_ripr(dest.code()).offset()); |
michael@0 | 576 | } |
michael@0 | 577 | CodeOffsetLabel storeRipRelativeInt32(const Register &dest) { |
michael@0 | 578 | return CodeOffsetLabel(masm.movl_rrip(dest.code()).offset()); |
michael@0 | 579 | } |
michael@0 | 580 | CodeOffsetLabel storeRipRelativeDouble(const FloatRegister &dest) { |
michael@0 | 581 | return CodeOffsetLabel(masm.movsd_rrip(dest.code()).offset()); |
michael@0 | 582 | } |
michael@0 | 583 | CodeOffsetLabel leaRipRelative(const Register &dest) { |
michael@0 | 584 | return CodeOffsetLabel(masm.leaq_rip(dest.code()).offset()); |
michael@0 | 585 | } |
michael@0 | 586 | |
michael@0 | 587 | // The below cmpq methods switch the lhs and rhs when it invokes the |
michael@0 | 588 | // macroassembler to conform with intel standard. When calling this |
michael@0 | 589 | // function put the left operand on the left as you would expect. |
michael@0 | 590 | void cmpq(const Operand &lhs, const Register &rhs) { |
michael@0 | 591 | switch (lhs.kind()) { |
michael@0 | 592 | case Operand::REG: |
michael@0 | 593 | masm.cmpq_rr(rhs.code(), lhs.reg()); |
michael@0 | 594 | break; |
michael@0 | 595 | case Operand::MEM_REG_DISP: |
michael@0 | 596 | masm.cmpq_rm(rhs.code(), lhs.disp(), lhs.base()); |
michael@0 | 597 | break; |
michael@0 | 598 | case Operand::MEM_ADDRESS32: |
michael@0 | 599 | masm.cmpq_rm(rhs.code(), lhs.address()); |
michael@0 | 600 | break; |
michael@0 | 601 | default: |
michael@0 | 602 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 603 | } |
michael@0 | 604 | } |
michael@0 | 605 | void cmpq(const Operand &lhs, Imm32 rhs) { |
michael@0 | 606 | switch (lhs.kind()) { |
michael@0 | 607 | case Operand::REG: |
michael@0 | 608 | masm.cmpq_ir(rhs.value, lhs.reg()); |
michael@0 | 609 | break; |
michael@0 | 610 | case Operand::MEM_REG_DISP: |
michael@0 | 611 | masm.cmpq_im(rhs.value, lhs.disp(), lhs.base()); |
michael@0 | 612 | break; |
michael@0 | 613 | case Operand::MEM_ADDRESS32: |
michael@0 | 614 | masm.cmpq_im(rhs.value, lhs.address()); |
michael@0 | 615 | break; |
michael@0 | 616 | default: |
michael@0 | 617 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 618 | } |
michael@0 | 619 | } |
michael@0 | 620 | void cmpq(const Register &lhs, const Operand &rhs) { |
michael@0 | 621 | switch (rhs.kind()) { |
michael@0 | 622 | case Operand::REG: |
michael@0 | 623 | masm.cmpq_rr(rhs.reg(), lhs.code()); |
michael@0 | 624 | break; |
michael@0 | 625 | case Operand::MEM_REG_DISP: |
michael@0 | 626 | masm.cmpq_mr(rhs.disp(), rhs.base(), lhs.code()); |
michael@0 | 627 | break; |
michael@0 | 628 | default: |
michael@0 | 629 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 630 | } |
michael@0 | 631 | } |
michael@0 | 632 | void cmpq(const Register &lhs, const Register &rhs) { |
michael@0 | 633 | masm.cmpq_rr(rhs.code(), lhs.code()); |
michael@0 | 634 | } |
michael@0 | 635 | void cmpq(const Register &lhs, Imm32 rhs) { |
michael@0 | 636 | masm.cmpq_ir(rhs.value, lhs.code()); |
michael@0 | 637 | } |
michael@0 | 638 | |
michael@0 | 639 | void testq(const Register &lhs, Imm32 rhs) { |
michael@0 | 640 | masm.testq_i32r(rhs.value, lhs.code()); |
michael@0 | 641 | } |
michael@0 | 642 | void testq(const Register &lhs, const Register &rhs) { |
michael@0 | 643 | masm.testq_rr(rhs.code(), lhs.code()); |
michael@0 | 644 | } |
michael@0 | 645 | void testq(const Operand &lhs, Imm32 rhs) { |
michael@0 | 646 | switch (lhs.kind()) { |
michael@0 | 647 | case Operand::REG: |
michael@0 | 648 | masm.testq_i32r(rhs.value, lhs.reg()); |
michael@0 | 649 | break; |
michael@0 | 650 | case Operand::MEM_REG_DISP: |
michael@0 | 651 | masm.testq_i32m(rhs.value, lhs.disp(), lhs.base()); |
michael@0 | 652 | break; |
michael@0 | 653 | default: |
michael@0 | 654 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 655 | break; |
michael@0 | 656 | } |
michael@0 | 657 | } |
michael@0 | 658 | |
michael@0 | 659 | void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) { |
michael@0 | 660 | JmpSrc src = masm.jmp(); |
michael@0 | 661 | addPendingJump(src, target, reloc); |
michael@0 | 662 | } |
michael@0 | 663 | void j(Condition cond, ImmPtr target, |
michael@0 | 664 | Relocation::Kind reloc = Relocation::HARDCODED) { |
michael@0 | 665 | JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond)); |
michael@0 | 666 | addPendingJump(src, target, reloc); |
michael@0 | 667 | } |
michael@0 | 668 | |
michael@0 | 669 | void jmp(JitCode *target) { |
michael@0 | 670 | jmp(ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 671 | } |
michael@0 | 672 | void j(Condition cond, JitCode *target) { |
michael@0 | 673 | j(cond, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 674 | } |
michael@0 | 675 | void call(JitCode *target) { |
michael@0 | 676 | JmpSrc src = masm.call(); |
michael@0 | 677 | addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 678 | } |
michael@0 | 679 | |
michael@0 | 680 | // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch |
michael@0 | 681 | // this instruction. |
michael@0 | 682 | CodeOffsetLabel toggledCall(JitCode *target, bool enabled) { |
michael@0 | 683 | CodeOffsetLabel offset(size()); |
michael@0 | 684 | JmpSrc src = enabled ? masm.call() : masm.cmp_eax(); |
michael@0 | 685 | addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 686 | JS_ASSERT(size() - offset.offset() == ToggledCallSize()); |
michael@0 | 687 | return offset; |
michael@0 | 688 | } |
michael@0 | 689 | |
michael@0 | 690 | static size_t ToggledCallSize() { |
michael@0 | 691 | // Size of a call instruction. |
michael@0 | 692 | return 5; |
michael@0 | 693 | } |
michael@0 | 694 | |
michael@0 | 695 | // Do not mask shared implementations. |
michael@0 | 696 | using AssemblerX86Shared::call; |
michael@0 | 697 | |
michael@0 | 698 | void cvttsd2sq(const FloatRegister &src, const Register &dest) { |
michael@0 | 699 | masm.cvttsd2sq_rr(src.code(), dest.code()); |
michael@0 | 700 | } |
michael@0 | 701 | void cvttss2sq(const FloatRegister &src, const Register &dest) { |
michael@0 | 702 | masm.cvttss2sq_rr(src.code(), dest.code()); |
michael@0 | 703 | } |
michael@0 | 704 | void cvtsq2sd(const Register &src, const FloatRegister &dest) { |
michael@0 | 705 | masm.cvtsq2sd_rr(src.code(), dest.code()); |
michael@0 | 706 | } |
michael@0 | 707 | void cvtsq2ss(const Register &src, const FloatRegister &dest) { |
michael@0 | 708 | masm.cvtsq2ss_rr(src.code(), dest.code()); |
michael@0 | 709 | } |
michael@0 | 710 | }; |
michael@0 | 711 | |
michael@0 | 712 | static inline void |
michael@0 | 713 | PatchJump(CodeLocationJump jump, CodeLocationLabel label) |
michael@0 | 714 | { |
michael@0 | 715 | if (JSC::X86Assembler::canRelinkJump(jump.raw(), label.raw())) { |
michael@0 | 716 | JSC::X86Assembler::setRel32(jump.raw(), label.raw()); |
michael@0 | 717 | } else { |
michael@0 | 718 | JSC::X86Assembler::setRel32(jump.raw(), jump.jumpTableEntry()); |
michael@0 | 719 | Assembler::PatchJumpEntry(jump.jumpTableEntry(), label.raw()); |
michael@0 | 720 | } |
michael@0 | 721 | } |
michael@0 | 722 | |
michael@0 | 723 | static inline bool |
michael@0 | 724 | GetIntArgReg(uint32_t intArg, uint32_t floatArg, Register *out) |
michael@0 | 725 | { |
michael@0 | 726 | #if defined(_WIN64) |
michael@0 | 727 | uint32_t arg = intArg + floatArg; |
michael@0 | 728 | #else |
michael@0 | 729 | uint32_t arg = intArg; |
michael@0 | 730 | #endif |
michael@0 | 731 | if (arg >= NumIntArgRegs) |
michael@0 | 732 | return false; |
michael@0 | 733 | *out = IntArgRegs[arg]; |
michael@0 | 734 | return true; |
michael@0 | 735 | } |
michael@0 | 736 | |
michael@0 | 737 | // Get a register in which we plan to put a quantity that will be used as an |
michael@0 | 738 | // integer argument. This differs from GetIntArgReg in that if we have no more |
michael@0 | 739 | // actual argument registers to use we will fall back on using whatever |
michael@0 | 740 | // CallTempReg* don't overlap the argument registers, and only fail once those |
michael@0 | 741 | // run out too. |
michael@0 | 742 | static inline bool |
michael@0 | 743 | GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out) |
michael@0 | 744 | { |
michael@0 | 745 | if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) |
michael@0 | 746 | return true; |
michael@0 | 747 | // Unfortunately, we have to assume things about the point at which |
michael@0 | 748 | // GetIntArgReg returns false, because we need to know how many registers it |
michael@0 | 749 | // can allocate. |
michael@0 | 750 | #if defined(_WIN64) |
michael@0 | 751 | uint32_t arg = usedIntArgs + usedFloatArgs; |
michael@0 | 752 | #else |
michael@0 | 753 | uint32_t arg = usedIntArgs; |
michael@0 | 754 | #endif |
michael@0 | 755 | arg -= NumIntArgRegs; |
michael@0 | 756 | if (arg >= NumCallTempNonArgRegs) |
michael@0 | 757 | return false; |
michael@0 | 758 | *out = CallTempNonArgRegs[arg]; |
michael@0 | 759 | return true; |
michael@0 | 760 | } |
michael@0 | 761 | |
michael@0 | 762 | static inline bool |
michael@0 | 763 | GetFloatArgReg(uint32_t intArg, uint32_t floatArg, FloatRegister *out) |
michael@0 | 764 | { |
michael@0 | 765 | #if defined(_WIN64) |
michael@0 | 766 | uint32_t arg = intArg + floatArg; |
michael@0 | 767 | #else |
michael@0 | 768 | uint32_t arg = floatArg; |
michael@0 | 769 | #endif |
michael@0 | 770 | if (floatArg >= NumFloatArgRegs) |
michael@0 | 771 | return false; |
michael@0 | 772 | *out = FloatArgRegs[arg]; |
michael@0 | 773 | return true; |
michael@0 | 774 | } |
michael@0 | 775 | |
michael@0 | 776 | } // namespace jit |
michael@0 | 777 | } // namespace js |
michael@0 | 778 | |
michael@0 | 779 | #endif /* jit_x64_Assembler_x64_h */ |