Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #ifndef jit_x86_Assembler_x86_h |
michael@0 | 8 | #define jit_x86_Assembler_x86_h |
michael@0 | 9 | |
michael@0 | 10 | #include "mozilla/ArrayUtils.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "assembler/assembler/X86Assembler.h" |
michael@0 | 13 | #include "jit/CompactBuffer.h" |
michael@0 | 14 | #include "jit/IonCode.h" |
michael@0 | 15 | #include "jit/shared/Assembler-shared.h" |
michael@0 | 16 | |
michael@0 | 17 | namespace js { |
michael@0 | 18 | namespace jit { |
michael@0 | 19 | |
michael@0 | 20 | static MOZ_CONSTEXPR_VAR Register eax = { JSC::X86Registers::eax }; |
michael@0 | 21 | static MOZ_CONSTEXPR_VAR Register ecx = { JSC::X86Registers::ecx }; |
michael@0 | 22 | static MOZ_CONSTEXPR_VAR Register edx = { JSC::X86Registers::edx }; |
michael@0 | 23 | static MOZ_CONSTEXPR_VAR Register ebx = { JSC::X86Registers::ebx }; |
michael@0 | 24 | static MOZ_CONSTEXPR_VAR Register esp = { JSC::X86Registers::esp }; |
michael@0 | 25 | static MOZ_CONSTEXPR_VAR Register ebp = { JSC::X86Registers::ebp }; |
michael@0 | 26 | static MOZ_CONSTEXPR_VAR Register esi = { JSC::X86Registers::esi }; |
michael@0 | 27 | static MOZ_CONSTEXPR_VAR Register edi = { JSC::X86Registers::edi }; |
michael@0 | 28 | |
michael@0 | 29 | static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { JSC::X86Registers::xmm0 }; |
michael@0 | 30 | static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { JSC::X86Registers::xmm1 }; |
michael@0 | 31 | static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { JSC::X86Registers::xmm2 }; |
michael@0 | 32 | static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { JSC::X86Registers::xmm3 }; |
michael@0 | 33 | static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { JSC::X86Registers::xmm4 }; |
michael@0 | 34 | static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { JSC::X86Registers::xmm5 }; |
michael@0 | 35 | static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { JSC::X86Registers::xmm6 }; |
michael@0 | 36 | static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { JSC::X86Registers::xmm7 }; |
michael@0 | 37 | |
michael@0 | 38 | static MOZ_CONSTEXPR_VAR Register InvalidReg = { JSC::X86Registers::invalid_reg }; |
michael@0 | 39 | static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { JSC::X86Registers::invalid_xmm }; |
michael@0 | 40 | |
michael@0 | 41 | static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx; |
michael@0 | 42 | static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx; |
michael@0 | 43 | static MOZ_CONSTEXPR_VAR Register StackPointer = esp; |
michael@0 | 44 | static MOZ_CONSTEXPR_VAR Register FramePointer = ebp; |
michael@0 | 45 | static MOZ_CONSTEXPR_VAR Register ReturnReg = eax; |
michael@0 | 46 | static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = xmm0; |
michael@0 | 47 | static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = xmm7; |
michael@0 | 48 | |
michael@0 | 49 | // Avoid ebp, which is the FramePointer, which is unavailable in some modes. |
michael@0 | 50 | static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = esi; |
michael@0 | 51 | static MOZ_CONSTEXPR_VAR Register CallTempReg0 = edi; |
michael@0 | 52 | static MOZ_CONSTEXPR_VAR Register CallTempReg1 = eax; |
michael@0 | 53 | static MOZ_CONSTEXPR_VAR Register CallTempReg2 = ebx; |
michael@0 | 54 | static MOZ_CONSTEXPR_VAR Register CallTempReg3 = ecx; |
michael@0 | 55 | static MOZ_CONSTEXPR_VAR Register CallTempReg4 = esi; |
michael@0 | 56 | static MOZ_CONSTEXPR_VAR Register CallTempReg5 = edx; |
michael@0 | 57 | |
michael@0 | 58 | // The convention used by the ForkJoinGetSlice stub. None of these can be eax |
michael@0 | 59 | // or edx, which the stub also needs for cmpxchg and div, respectively. |
michael@0 | 60 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_cx = edi; |
michael@0 | 61 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp0 = ebx; |
michael@0 | 62 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp1 = ecx; |
michael@0 | 63 | static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_output = esi; |
michael@0 | 64 | |
michael@0 | 65 | // We have no arg regs, so our NonArgRegs are just our CallTempReg* |
michael@0 | 66 | static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx }; |
michael@0 | 67 | static const uint32_t NumCallTempNonArgRegs = |
michael@0 | 68 | mozilla::ArrayLength(CallTempNonArgRegs); |
michael@0 | 69 | |
michael@0 | 70 | class ABIArgGenerator |
michael@0 | 71 | { |
michael@0 | 72 | uint32_t stackOffset_; |
michael@0 | 73 | ABIArg current_; |
michael@0 | 74 | |
michael@0 | 75 | public: |
michael@0 | 76 | ABIArgGenerator(); |
michael@0 | 77 | ABIArg next(MIRType argType); |
michael@0 | 78 | ABIArg ¤t() { return current_; } |
michael@0 | 79 | uint32_t stackBytesConsumedSoFar() const { return stackOffset_; } |
michael@0 | 80 | |
michael@0 | 81 | // Note: these registers are all guaranteed to be different |
michael@0 | 82 | static const Register NonArgReturnVolatileReg0; |
michael@0 | 83 | static const Register NonArgReturnVolatileReg1; |
michael@0 | 84 | static const Register NonVolatileReg; |
michael@0 | 85 | }; |
michael@0 | 86 | |
michael@0 | 87 | static MOZ_CONSTEXPR_VAR Register OsrFrameReg = edx; |
michael@0 | 88 | static MOZ_CONSTEXPR_VAR Register PreBarrierReg = edx; |
michael@0 | 89 | |
michael@0 | 90 | // Registers used in the GenerateFFIIonExit Enable Activation block. |
michael@0 | 91 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = ecx; |
michael@0 | 92 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = edi; |
michael@0 | 93 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = eax; |
michael@0 | 94 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = ebx; |
michael@0 | 95 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = edx; |
michael@0 | 96 | |
michael@0 | 97 | // Registers used in the GenerateFFIIonExit Disable Activation block. |
michael@0 | 98 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = edx; |
michael@0 | 99 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = ecx; |
michael@0 | 100 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi; |
michael@0 | 101 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax; |
michael@0 | 102 | static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi; |
michael@0 | 103 | |
michael@0 | 104 | // GCC stack is aligned on 16 bytes, but we don't maintain the invariant in |
michael@0 | 105 | // jitted code. |
michael@0 | 106 | #if defined(__GNUC__) |
michael@0 | 107 | static const uint32_t StackAlignment = 16; |
michael@0 | 108 | #else |
michael@0 | 109 | static const uint32_t StackAlignment = 4; |
michael@0 | 110 | #endif |
michael@0 | 111 | static const bool StackKeptAligned = false; |
michael@0 | 112 | static const uint32_t CodeAlignment = 8; |
michael@0 | 113 | static const uint32_t NativeFrameSize = sizeof(void*); |
michael@0 | 114 | static const uint32_t AlignmentAtPrologue = sizeof(void*); |
michael@0 | 115 | static const uint32_t AlignmentMidPrologue = AlignmentAtPrologue; |
michael@0 | 116 | struct ImmTag : public Imm32 |
michael@0 | 117 | { |
michael@0 | 118 | ImmTag(JSValueTag mask) |
michael@0 | 119 | : Imm32(int32_t(mask)) |
michael@0 | 120 | { } |
michael@0 | 121 | }; |
michael@0 | 122 | |
michael@0 | 123 | struct ImmType : public ImmTag |
michael@0 | 124 | { |
michael@0 | 125 | ImmType(JSValueType type) |
michael@0 | 126 | : ImmTag(JSVAL_TYPE_TO_TAG(type)) |
michael@0 | 127 | { } |
michael@0 | 128 | }; |
michael@0 | 129 | |
michael@0 | 130 | static const Scale ScalePointer = TimesFour; |
michael@0 | 131 | |
michael@0 | 132 | } // namespace jit |
michael@0 | 133 | } // namespace js |
michael@0 | 134 | |
michael@0 | 135 | #include "jit/shared/Assembler-x86-shared.h" |
michael@0 | 136 | |
michael@0 | 137 | namespace js { |
michael@0 | 138 | namespace jit { |
michael@0 | 139 | |
michael@0 | 140 | static inline void |
michael@0 | 141 | PatchJump(CodeLocationJump jump, CodeLocationLabel label) |
michael@0 | 142 | { |
michael@0 | 143 | #ifdef DEBUG |
michael@0 | 144 | // Assert that we're overwriting a jump instruction, either: |
michael@0 | 145 | // 0F 80+cc <imm32>, or |
michael@0 | 146 | // E9 <imm32> |
michael@0 | 147 | unsigned char *x = (unsigned char *)jump.raw() - 5; |
michael@0 | 148 | JS_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) || |
michael@0 | 149 | (*x == 0xE9)); |
michael@0 | 150 | #endif |
michael@0 | 151 | JSC::X86Assembler::setRel32(jump.raw(), label.raw()); |
michael@0 | 152 | } |
michael@0 | 153 | |
michael@0 | 154 | // Return operand from a JS -> JS call. |
michael@0 | 155 | static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data); |
michael@0 | 156 | |
michael@0 | 157 | class Assembler : public AssemblerX86Shared |
michael@0 | 158 | { |
michael@0 | 159 | void writeRelocation(JmpSrc src) { |
michael@0 | 160 | jumpRelocations_.writeUnsigned(src.offset()); |
michael@0 | 161 | } |
michael@0 | 162 | void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) { |
michael@0 | 163 | enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind)); |
michael@0 | 164 | if (kind == Relocation::JITCODE) |
michael@0 | 165 | writeRelocation(src); |
michael@0 | 166 | } |
michael@0 | 167 | |
michael@0 | 168 | public: |
michael@0 | 169 | using AssemblerX86Shared::movl; |
michael@0 | 170 | using AssemblerX86Shared::j; |
michael@0 | 171 | using AssemblerX86Shared::jmp; |
michael@0 | 172 | using AssemblerX86Shared::movsd; |
michael@0 | 173 | using AssemblerX86Shared::movss; |
michael@0 | 174 | using AssemblerX86Shared::retarget; |
michael@0 | 175 | using AssemblerX86Shared::cmpl; |
michael@0 | 176 | using AssemblerX86Shared::call; |
michael@0 | 177 | using AssemblerX86Shared::push; |
michael@0 | 178 | using AssemblerX86Shared::pop; |
michael@0 | 179 | |
michael@0 | 180 | static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader); |
michael@0 | 181 | |
michael@0 | 182 | // Copy the assembly code to the given buffer, and perform any pending |
michael@0 | 183 | // relocations relying on the target address. |
michael@0 | 184 | void executableCopy(uint8_t *buffer); |
michael@0 | 185 | |
michael@0 | 186 | // Actual assembly emitting functions. |
michael@0 | 187 | |
michael@0 | 188 | void push(const ImmGCPtr &ptr) { |
michael@0 | 189 | push(Imm32(ptr.value)); |
michael@0 | 190 | writeDataRelocation(ptr); |
michael@0 | 191 | } |
michael@0 | 192 | void push(const ImmWord imm) { |
michael@0 | 193 | push(Imm32(imm.value)); |
michael@0 | 194 | } |
michael@0 | 195 | void push(const ImmPtr imm) { |
michael@0 | 196 | push(ImmWord(uintptr_t(imm.value))); |
michael@0 | 197 | } |
michael@0 | 198 | void push(const FloatRegister &src) { |
michael@0 | 199 | subl(Imm32(sizeof(double)), StackPointer); |
michael@0 | 200 | movsd(src, Address(StackPointer, 0)); |
michael@0 | 201 | } |
michael@0 | 202 | |
michael@0 | 203 | CodeOffsetLabel pushWithPatch(const ImmWord &word) { |
michael@0 | 204 | push(Imm32(word.value)); |
michael@0 | 205 | return masm.currentOffset(); |
michael@0 | 206 | } |
michael@0 | 207 | |
michael@0 | 208 | void pop(const FloatRegister &src) { |
michael@0 | 209 | movsd(Address(StackPointer, 0), src); |
michael@0 | 210 | addl(Imm32(sizeof(double)), StackPointer); |
michael@0 | 211 | } |
michael@0 | 212 | |
michael@0 | 213 | CodeOffsetLabel movWithPatch(const ImmWord &word, const Register &dest) { |
michael@0 | 214 | movl(Imm32(word.value), dest); |
michael@0 | 215 | return masm.currentOffset(); |
michael@0 | 216 | } |
michael@0 | 217 | CodeOffsetLabel movWithPatch(const ImmPtr &imm, const Register &dest) { |
michael@0 | 218 | return movWithPatch(ImmWord(uintptr_t(imm.value)), dest); |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | void movl(const ImmGCPtr &ptr, const Register &dest) { |
michael@0 | 222 | masm.movl_i32r(ptr.value, dest.code()); |
michael@0 | 223 | writeDataRelocation(ptr); |
michael@0 | 224 | } |
michael@0 | 225 | void movl(const ImmGCPtr &ptr, const Operand &dest) { |
michael@0 | 226 | switch (dest.kind()) { |
michael@0 | 227 | case Operand::REG: |
michael@0 | 228 | masm.movl_i32r(ptr.value, dest.reg()); |
michael@0 | 229 | writeDataRelocation(ptr); |
michael@0 | 230 | break; |
michael@0 | 231 | case Operand::MEM_REG_DISP: |
michael@0 | 232 | masm.movl_i32m(ptr.value, dest.disp(), dest.base()); |
michael@0 | 233 | writeDataRelocation(ptr); |
michael@0 | 234 | break; |
michael@0 | 235 | case Operand::MEM_SCALE: |
michael@0 | 236 | masm.movl_i32m(ptr.value, dest.disp(), dest.base(), dest.index(), dest.scale()); |
michael@0 | 237 | writeDataRelocation(ptr); |
michael@0 | 238 | break; |
michael@0 | 239 | default: |
michael@0 | 240 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 241 | } |
michael@0 | 242 | } |
michael@0 | 243 | void movl(ImmWord imm, Register dest) { |
michael@0 | 244 | masm.movl_i32r(imm.value, dest.code()); |
michael@0 | 245 | } |
michael@0 | 246 | void movl(ImmPtr imm, Register dest) { |
michael@0 | 247 | movl(ImmWord(uintptr_t(imm.value)), dest); |
michael@0 | 248 | } |
michael@0 | 249 | void mov(ImmWord imm, Register dest) { |
michael@0 | 250 | // Use xor for setting registers to zero, as it is specially optimized |
michael@0 | 251 | // for this purpose on modern hardware. Note that it does clobber FLAGS |
michael@0 | 252 | // though. |
michael@0 | 253 | if (imm.value == 0) |
michael@0 | 254 | xorl(dest, dest); |
michael@0 | 255 | else |
michael@0 | 256 | movl(imm, dest); |
michael@0 | 257 | } |
michael@0 | 258 | void mov(ImmPtr imm, Register dest) { |
michael@0 | 259 | mov(ImmWord(uintptr_t(imm.value)), dest); |
michael@0 | 260 | } |
michael@0 | 261 | void mov(AsmJSImmPtr imm, Register dest) { |
michael@0 | 262 | masm.movl_i32r(-1, dest.code()); |
michael@0 | 263 | enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), imm.kind())); |
michael@0 | 264 | } |
michael@0 | 265 | void mov(const Operand &src, const Register &dest) { |
michael@0 | 266 | movl(src, dest); |
michael@0 | 267 | } |
michael@0 | 268 | void mov(const Register &src, const Operand &dest) { |
michael@0 | 269 | movl(src, dest); |
michael@0 | 270 | } |
michael@0 | 271 | void mov(Imm32 imm, const Operand &dest) { |
michael@0 | 272 | movl(imm, dest); |
michael@0 | 273 | } |
michael@0 | 274 | void mov(AbsoluteLabel *label, const Register &dest) { |
michael@0 | 275 | JS_ASSERT(!label->bound()); |
michael@0 | 276 | // Thread the patch list through the unpatched address word in the |
michael@0 | 277 | // instruction stream. |
michael@0 | 278 | masm.movl_i32r(label->prev(), dest.code()); |
michael@0 | 279 | label->setPrev(masm.size()); |
michael@0 | 280 | } |
michael@0 | 281 | void mov(const Register &src, const Register &dest) { |
michael@0 | 282 | movl(src, dest); |
michael@0 | 283 | } |
michael@0 | 284 | void xchg(const Register &src, const Register &dest) { |
michael@0 | 285 | xchgl(src, dest); |
michael@0 | 286 | } |
michael@0 | 287 | void lea(const Operand &src, const Register &dest) { |
michael@0 | 288 | return leal(src, dest); |
michael@0 | 289 | } |
michael@0 | 290 | |
michael@0 | 291 | void fld32(const Operand &dest) { |
michael@0 | 292 | switch (dest.kind()) { |
michael@0 | 293 | case Operand::MEM_REG_DISP: |
michael@0 | 294 | masm.fld32_m(dest.disp(), dest.base()); |
michael@0 | 295 | break; |
michael@0 | 296 | default: |
michael@0 | 297 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 298 | } |
michael@0 | 299 | } |
michael@0 | 300 | |
michael@0 | 301 | void fstp32(const Operand &src) { |
michael@0 | 302 | switch (src.kind()) { |
michael@0 | 303 | case Operand::MEM_REG_DISP: |
michael@0 | 304 | masm.fstp32_m(src.disp(), src.base()); |
michael@0 | 305 | break; |
michael@0 | 306 | default: |
michael@0 | 307 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 308 | } |
michael@0 | 309 | } |
michael@0 | 310 | |
michael@0 | 311 | void cmpl(const Register src, ImmWord ptr) { |
michael@0 | 312 | masm.cmpl_ir(ptr.value, src.code()); |
michael@0 | 313 | } |
michael@0 | 314 | void cmpl(const Register src, ImmPtr imm) { |
michael@0 | 315 | cmpl(src, ImmWord(uintptr_t(imm.value))); |
michael@0 | 316 | } |
michael@0 | 317 | void cmpl(const Register src, ImmGCPtr ptr) { |
michael@0 | 318 | masm.cmpl_ir(ptr.value, src.code()); |
michael@0 | 319 | writeDataRelocation(ptr); |
michael@0 | 320 | } |
michael@0 | 321 | void cmpl(const Register &lhs, const Register &rhs) { |
michael@0 | 322 | masm.cmpl_rr(rhs.code(), lhs.code()); |
michael@0 | 323 | } |
michael@0 | 324 | void cmpl(const Operand &op, ImmGCPtr imm) { |
michael@0 | 325 | switch (op.kind()) { |
michael@0 | 326 | case Operand::REG: |
michael@0 | 327 | masm.cmpl_ir_force32(imm.value, op.reg()); |
michael@0 | 328 | writeDataRelocation(imm); |
michael@0 | 329 | break; |
michael@0 | 330 | case Operand::MEM_REG_DISP: |
michael@0 | 331 | masm.cmpl_im_force32(imm.value, op.disp(), op.base()); |
michael@0 | 332 | writeDataRelocation(imm); |
michael@0 | 333 | break; |
michael@0 | 334 | case Operand::MEM_ADDRESS32: |
michael@0 | 335 | masm.cmpl_im(imm.value, op.address()); |
michael@0 | 336 | writeDataRelocation(imm); |
michael@0 | 337 | break; |
michael@0 | 338 | default: |
michael@0 | 339 | MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); |
michael@0 | 340 | } |
michael@0 | 341 | } |
michael@0 | 342 | void cmpl(const AsmJSAbsoluteAddress &lhs, const Register &rhs) { |
michael@0 | 343 | masm.cmpl_rm_force32(rhs.code(), (void*)-1); |
michael@0 | 344 | enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), lhs.kind())); |
michael@0 | 345 | } |
michael@0 | 346 | CodeOffsetLabel cmplWithPatch(const Register &lhs, Imm32 rhs) { |
michael@0 | 347 | masm.cmpl_ir_force32(rhs.value, lhs.code()); |
michael@0 | 348 | return masm.currentOffset(); |
michael@0 | 349 | } |
michael@0 | 350 | |
michael@0 | 351 | void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) { |
michael@0 | 352 | JmpSrc src = masm.jmp(); |
michael@0 | 353 | addPendingJump(src, target, reloc); |
michael@0 | 354 | } |
michael@0 | 355 | void j(Condition cond, ImmPtr target, |
michael@0 | 356 | Relocation::Kind reloc = Relocation::HARDCODED) { |
michael@0 | 357 | JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond)); |
michael@0 | 358 | addPendingJump(src, target, reloc); |
michael@0 | 359 | } |
michael@0 | 360 | |
michael@0 | 361 | void jmp(JitCode *target) { |
michael@0 | 362 | jmp(ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 363 | } |
michael@0 | 364 | void j(Condition cond, JitCode *target) { |
michael@0 | 365 | j(cond, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 366 | } |
michael@0 | 367 | void call(JitCode *target) { |
michael@0 | 368 | JmpSrc src = masm.call(); |
michael@0 | 369 | addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 370 | } |
michael@0 | 371 | void call(ImmWord target) { |
michael@0 | 372 | call(ImmPtr((void*)target.value)); |
michael@0 | 373 | } |
michael@0 | 374 | void call(ImmPtr target) { |
michael@0 | 375 | JmpSrc src = masm.call(); |
michael@0 | 376 | addPendingJump(src, target, Relocation::HARDCODED); |
michael@0 | 377 | } |
michael@0 | 378 | void call(AsmJSImmPtr target) { |
michael@0 | 379 | // Moving to a register is suboptimal. To fix (use a single |
michael@0 | 380 | // call-immediate instruction) we'll need to distinguish a new type of |
michael@0 | 381 | // relative patch to an absolute address in AsmJSAbsoluteLink. |
michael@0 | 382 | mov(target, eax); |
michael@0 | 383 | call(eax); |
michael@0 | 384 | } |
michael@0 | 385 | |
michael@0 | 386 | // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch |
michael@0 | 387 | // this instruction. |
michael@0 | 388 | CodeOffsetLabel toggledCall(JitCode *target, bool enabled) { |
michael@0 | 389 | CodeOffsetLabel offset(size()); |
michael@0 | 390 | JmpSrc src = enabled ? masm.call() : masm.cmp_eax(); |
michael@0 | 391 | addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 392 | JS_ASSERT(size() - offset.offset() == ToggledCallSize()); |
michael@0 | 393 | return offset; |
michael@0 | 394 | } |
michael@0 | 395 | |
michael@0 | 396 | static size_t ToggledCallSize() { |
michael@0 | 397 | // Size of a call instruction. |
michael@0 | 398 | return 5; |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | // Re-routes pending jumps to an external target, flushing the label in the |
michael@0 | 402 | // process. |
michael@0 | 403 | void retarget(Label *label, ImmPtr target, Relocation::Kind reloc) { |
michael@0 | 404 | JSC::MacroAssembler::Label jsclabel; |
michael@0 | 405 | if (label->used()) { |
michael@0 | 406 | bool more; |
michael@0 | 407 | JSC::X86Assembler::JmpSrc jmp(label->offset()); |
michael@0 | 408 | do { |
michael@0 | 409 | JSC::X86Assembler::JmpSrc next; |
michael@0 | 410 | more = masm.nextJump(jmp, &next); |
michael@0 | 411 | addPendingJump(jmp, target, reloc); |
michael@0 | 412 | jmp = next; |
michael@0 | 413 | } while (more); |
michael@0 | 414 | } |
michael@0 | 415 | label->reset(); |
michael@0 | 416 | } |
michael@0 | 417 | |
michael@0 | 418 | // Move a 32-bit immediate into a register where the immediate can be |
michael@0 | 419 | // patched. |
michael@0 | 420 | CodeOffsetLabel movlWithPatch(Imm32 imm, Register dest) { |
michael@0 | 421 | masm.movl_i32r(imm.value, dest.code()); |
michael@0 | 422 | return masm.currentOffset(); |
michael@0 | 423 | } |
michael@0 | 424 | |
michael@0 | 425 | // Load from *(base + disp32) where disp32 can be patched. |
michael@0 | 426 | CodeOffsetLabel movsblWithPatch(Address src, Register dest) { |
michael@0 | 427 | masm.movsbl_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 428 | return masm.currentOffset(); |
michael@0 | 429 | } |
michael@0 | 430 | CodeOffsetLabel movzblWithPatch(Address src, Register dest) { |
michael@0 | 431 | masm.movzbl_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 432 | return masm.currentOffset(); |
michael@0 | 433 | } |
michael@0 | 434 | CodeOffsetLabel movswlWithPatch(Address src, Register dest) { |
michael@0 | 435 | masm.movswl_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 436 | return masm.currentOffset(); |
michael@0 | 437 | } |
michael@0 | 438 | CodeOffsetLabel movzwlWithPatch(Address src, Register dest) { |
michael@0 | 439 | masm.movzwl_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 440 | return masm.currentOffset(); |
michael@0 | 441 | } |
michael@0 | 442 | CodeOffsetLabel movlWithPatch(Address src, Register dest) { |
michael@0 | 443 | masm.movl_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 444 | return masm.currentOffset(); |
michael@0 | 445 | } |
michael@0 | 446 | CodeOffsetLabel movssWithPatch(Address src, FloatRegister dest) { |
michael@0 | 447 | JS_ASSERT(HasSSE2()); |
michael@0 | 448 | masm.movss_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 449 | return masm.currentOffset(); |
michael@0 | 450 | } |
michael@0 | 451 | CodeOffsetLabel movsdWithPatch(Address src, FloatRegister dest) { |
michael@0 | 452 | JS_ASSERT(HasSSE2()); |
michael@0 | 453 | masm.movsd_mr_disp32(src.offset, src.base.code(), dest.code()); |
michael@0 | 454 | return masm.currentOffset(); |
michael@0 | 455 | } |
michael@0 | 456 | |
michael@0 | 457 | // Store to *(base + disp32) where disp32 can be patched. |
michael@0 | 458 | CodeOffsetLabel movbWithPatch(Register src, Address dest) { |
michael@0 | 459 | masm.movb_rm_disp32(src.code(), dest.offset, dest.base.code()); |
michael@0 | 460 | return masm.currentOffset(); |
michael@0 | 461 | } |
michael@0 | 462 | CodeOffsetLabel movwWithPatch(Register src, Address dest) { |
michael@0 | 463 | masm.movw_rm_disp32(src.code(), dest.offset, dest.base.code()); |
michael@0 | 464 | return masm.currentOffset(); |
michael@0 | 465 | } |
michael@0 | 466 | CodeOffsetLabel movlWithPatch(Register src, Address dest) { |
michael@0 | 467 | masm.movl_rm_disp32(src.code(), dest.offset, dest.base.code()); |
michael@0 | 468 | return masm.currentOffset(); |
michael@0 | 469 | } |
michael@0 | 470 | CodeOffsetLabel movssWithPatch(FloatRegister src, Address dest) { |
michael@0 | 471 | JS_ASSERT(HasSSE2()); |
michael@0 | 472 | masm.movss_rm_disp32(src.code(), dest.offset, dest.base.code()); |
michael@0 | 473 | return masm.currentOffset(); |
michael@0 | 474 | } |
michael@0 | 475 | CodeOffsetLabel movsdWithPatch(FloatRegister src, Address dest) { |
michael@0 | 476 | JS_ASSERT(HasSSE2()); |
michael@0 | 477 | masm.movsd_rm_disp32(src.code(), dest.offset, dest.base.code()); |
michael@0 | 478 | return masm.currentOffset(); |
michael@0 | 479 | } |
michael@0 | 480 | |
michael@0 | 481 | // Load from *(addr + index*scale) where addr can be patched. |
michael@0 | 482 | CodeOffsetLabel movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale, |
michael@0 | 483 | Register dest) |
michael@0 | 484 | { |
michael@0 | 485 | masm.movl_mr(addr.addr, index.code(), scale, dest.code()); |
michael@0 | 486 | return masm.currentOffset(); |
michael@0 | 487 | } |
michael@0 | 488 | |
michael@0 | 489 | // Load from *src where src can be patched. |
michael@0 | 490 | CodeOffsetLabel movsblWithPatch(const PatchedAbsoluteAddress &src, Register dest) { |
michael@0 | 491 | masm.movsbl_mr(src.addr, dest.code()); |
michael@0 | 492 | return masm.currentOffset(); |
michael@0 | 493 | } |
michael@0 | 494 | CodeOffsetLabel movzblWithPatch(const PatchedAbsoluteAddress &src, Register dest) { |
michael@0 | 495 | masm.movzbl_mr(src.addr, dest.code()); |
michael@0 | 496 | return masm.currentOffset(); |
michael@0 | 497 | } |
michael@0 | 498 | CodeOffsetLabel movswlWithPatch(const PatchedAbsoluteAddress &src, Register dest) { |
michael@0 | 499 | masm.movswl_mr(src.addr, dest.code()); |
michael@0 | 500 | return masm.currentOffset(); |
michael@0 | 501 | } |
michael@0 | 502 | CodeOffsetLabel movzwlWithPatch(const PatchedAbsoluteAddress &src, Register dest) { |
michael@0 | 503 | masm.movzwl_mr(src.addr, dest.code()); |
michael@0 | 504 | return masm.currentOffset(); |
michael@0 | 505 | } |
michael@0 | 506 | CodeOffsetLabel movlWithPatch(const PatchedAbsoluteAddress &src, Register dest) { |
michael@0 | 507 | masm.movl_mr(src.addr, dest.code()); |
michael@0 | 508 | return masm.currentOffset(); |
michael@0 | 509 | } |
michael@0 | 510 | CodeOffsetLabel movssWithPatch(const PatchedAbsoluteAddress &src, FloatRegister dest) { |
michael@0 | 511 | JS_ASSERT(HasSSE2()); |
michael@0 | 512 | masm.movss_mr(src.addr, dest.code()); |
michael@0 | 513 | return masm.currentOffset(); |
michael@0 | 514 | } |
michael@0 | 515 | CodeOffsetLabel movsdWithPatch(const PatchedAbsoluteAddress &src, FloatRegister dest) { |
michael@0 | 516 | JS_ASSERT(HasSSE2()); |
michael@0 | 517 | masm.movsd_mr(src.addr, dest.code()); |
michael@0 | 518 | return masm.currentOffset(); |
michael@0 | 519 | } |
michael@0 | 520 | |
michael@0 | 521 | // Store to *dest where dest can be patched. |
michael@0 | 522 | CodeOffsetLabel movbWithPatch(Register src, const PatchedAbsoluteAddress &dest) { |
michael@0 | 523 | masm.movb_rm(src.code(), dest.addr); |
michael@0 | 524 | return masm.currentOffset(); |
michael@0 | 525 | } |
michael@0 | 526 | CodeOffsetLabel movwWithPatch(Register src, const PatchedAbsoluteAddress &dest) { |
michael@0 | 527 | masm.movw_rm(src.code(), dest.addr); |
michael@0 | 528 | return masm.currentOffset(); |
michael@0 | 529 | } |
michael@0 | 530 | CodeOffsetLabel movlWithPatch(Register src, const PatchedAbsoluteAddress &dest) { |
michael@0 | 531 | masm.movl_rm(src.code(), dest.addr); |
michael@0 | 532 | return masm.currentOffset(); |
michael@0 | 533 | } |
michael@0 | 534 | CodeOffsetLabel movssWithPatch(FloatRegister src, const PatchedAbsoluteAddress &dest) { |
michael@0 | 535 | JS_ASSERT(HasSSE2()); |
michael@0 | 536 | masm.movss_rm(src.code(), dest.addr); |
michael@0 | 537 | return masm.currentOffset(); |
michael@0 | 538 | } |
michael@0 | 539 | CodeOffsetLabel movsdWithPatch(FloatRegister src, const PatchedAbsoluteAddress &dest) { |
michael@0 | 540 | JS_ASSERT(HasSSE2()); |
michael@0 | 541 | masm.movsd_rm(src.code(), dest.addr); |
michael@0 | 542 | return masm.currentOffset(); |
michael@0 | 543 | } |
michael@0 | 544 | |
michael@0 | 545 | }; |
michael@0 | 546 | |
michael@0 | 547 | // Get a register in which we plan to put a quantity that will be used as an |
michael@0 | 548 | // integer argument. This differs from GetIntArgReg in that if we have no more |
michael@0 | 549 | // actual argument registers to use we will fall back on using whatever |
michael@0 | 550 | // CallTempReg* don't overlap the argument registers, and only fail once those |
michael@0 | 551 | // run out too. |
michael@0 | 552 | static inline bool |
michael@0 | 553 | GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out) |
michael@0 | 554 | { |
michael@0 | 555 | if (usedIntArgs >= NumCallTempNonArgRegs) |
michael@0 | 556 | return false; |
michael@0 | 557 | *out = CallTempNonArgRegs[usedIntArgs]; |
michael@0 | 558 | return true; |
michael@0 | 559 | } |
michael@0 | 560 | |
michael@0 | 561 | } // namespace jit |
michael@0 | 562 | } // namespace js |
michael@0 | 563 | |
michael@0 | 564 | #endif /* jit_x86_Assembler_x86_h */ |