js/src/jit/x86/Assembler-x86.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/x86/Assembler-x86.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,564 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef jit_x86_Assembler_x86_h
    1.11 +#define jit_x86_Assembler_x86_h
    1.12 +
    1.13 +#include "mozilla/ArrayUtils.h"
    1.14 +
    1.15 +#include "assembler/assembler/X86Assembler.h"
    1.16 +#include "jit/CompactBuffer.h"
    1.17 +#include "jit/IonCode.h"
    1.18 +#include "jit/shared/Assembler-shared.h"
    1.19 +
    1.20 +namespace js {
    1.21 +namespace jit {
    1.22 +
    1.23 +static MOZ_CONSTEXPR_VAR Register eax = { JSC::X86Registers::eax };
    1.24 +static MOZ_CONSTEXPR_VAR Register ecx = { JSC::X86Registers::ecx };
    1.25 +static MOZ_CONSTEXPR_VAR Register edx = { JSC::X86Registers::edx };
    1.26 +static MOZ_CONSTEXPR_VAR Register ebx = { JSC::X86Registers::ebx };
    1.27 +static MOZ_CONSTEXPR_VAR Register esp = { JSC::X86Registers::esp };
    1.28 +static MOZ_CONSTEXPR_VAR Register ebp = { JSC::X86Registers::ebp };
    1.29 +static MOZ_CONSTEXPR_VAR Register esi = { JSC::X86Registers::esi };
    1.30 +static MOZ_CONSTEXPR_VAR Register edi = { JSC::X86Registers::edi };
    1.31 +
    1.32 +static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { JSC::X86Registers::xmm0 };
    1.33 +static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { JSC::X86Registers::xmm1 };
    1.34 +static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { JSC::X86Registers::xmm2 };
    1.35 +static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { JSC::X86Registers::xmm3 };
    1.36 +static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { JSC::X86Registers::xmm4 };
    1.37 +static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { JSC::X86Registers::xmm5 };
    1.38 +static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { JSC::X86Registers::xmm6 };
    1.39 +static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { JSC::X86Registers::xmm7 };
    1.40 +
    1.41 +static MOZ_CONSTEXPR_VAR Register InvalidReg = { JSC::X86Registers::invalid_reg };
    1.42 +static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { JSC::X86Registers::invalid_xmm };
    1.43 +
    1.44 +static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
    1.45 +static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
    1.46 +static MOZ_CONSTEXPR_VAR Register StackPointer = esp;
    1.47 +static MOZ_CONSTEXPR_VAR Register FramePointer = ebp;
    1.48 +static MOZ_CONSTEXPR_VAR Register ReturnReg = eax;
    1.49 +static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = xmm0;
    1.50 +static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = xmm7;
    1.51 +
    1.52 +// Avoid ebp, which is the FramePointer, which is unavailable in some modes.
    1.53 +static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = esi;
    1.54 +static MOZ_CONSTEXPR_VAR Register CallTempReg0 = edi;
    1.55 +static MOZ_CONSTEXPR_VAR Register CallTempReg1 = eax;
    1.56 +static MOZ_CONSTEXPR_VAR Register CallTempReg2 = ebx;
    1.57 +static MOZ_CONSTEXPR_VAR Register CallTempReg3 = ecx;
    1.58 +static MOZ_CONSTEXPR_VAR Register CallTempReg4 = esi;
    1.59 +static MOZ_CONSTEXPR_VAR Register CallTempReg5 = edx;
    1.60 +
    1.61 +// The convention used by the ForkJoinGetSlice stub. None of these can be eax
    1.62 +// or edx, which the stub also needs for cmpxchg and div, respectively.
    1.63 +static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_cx = edi;
    1.64 +static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp0 = ebx;
    1.65 +static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp1 = ecx;
    1.66 +static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_output = esi;
    1.67 +
    1.68 +// We have no arg regs, so our NonArgRegs are just our CallTempReg*
    1.69 +static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx };
    1.70 +static const uint32_t NumCallTempNonArgRegs =
    1.71 +    mozilla::ArrayLength(CallTempNonArgRegs);
    1.72 +
    1.73 +class ABIArgGenerator
    1.74 +{
    1.75 +    uint32_t stackOffset_;
    1.76 +    ABIArg current_;
    1.77 +
    1.78 +  public:
    1.79 +    ABIArgGenerator();
    1.80 +    ABIArg next(MIRType argType);
    1.81 +    ABIArg &current() { return current_; }
    1.82 +    uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
    1.83 +
    1.84 +    // Note: these registers are all guaranteed to be different
    1.85 +    static const Register NonArgReturnVolatileReg0;
    1.86 +    static const Register NonArgReturnVolatileReg1;
    1.87 +    static const Register NonVolatileReg;
    1.88 +};
    1.89 +
    1.90 +static MOZ_CONSTEXPR_VAR Register OsrFrameReg = edx;
    1.91 +static MOZ_CONSTEXPR_VAR Register PreBarrierReg = edx;
    1.92 +
    1.93 +// Registers used in the GenerateFFIIonExit Enable Activation block.
    1.94 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = ecx;
    1.95 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = edi;
    1.96 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = eax;
    1.97 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = ebx;
    1.98 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = edx;
    1.99 +
   1.100 +// Registers used in the GenerateFFIIonExit Disable Activation block.
   1.101 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = edx;
   1.102 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = ecx;
   1.103 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
   1.104 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
   1.105 +static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
   1.106 +
   1.107 +// GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
   1.108 +// jitted code.
   1.109 +#if defined(__GNUC__)
   1.110 +static const uint32_t StackAlignment = 16;
   1.111 +#else
   1.112 +static const uint32_t StackAlignment = 4;
   1.113 +#endif
   1.114 +static const bool StackKeptAligned = false;
   1.115 +static const uint32_t CodeAlignment = 8;
   1.116 +static const uint32_t NativeFrameSize = sizeof(void*);
   1.117 +static const uint32_t AlignmentAtPrologue = sizeof(void*);
   1.118 +static const uint32_t AlignmentMidPrologue = AlignmentAtPrologue;
   1.119 +struct ImmTag : public Imm32
   1.120 +{
   1.121 +    ImmTag(JSValueTag mask)
   1.122 +      : Imm32(int32_t(mask))
   1.123 +    { }
   1.124 +};
   1.125 +
   1.126 +struct ImmType : public ImmTag
   1.127 +{
   1.128 +    ImmType(JSValueType type)
   1.129 +      : ImmTag(JSVAL_TYPE_TO_TAG(type))
   1.130 +    { }
   1.131 +};
   1.132 +
   1.133 +static const Scale ScalePointer = TimesFour;
   1.134 +
   1.135 +} // namespace jit
   1.136 +} // namespace js
   1.137 +
   1.138 +#include "jit/shared/Assembler-x86-shared.h"
   1.139 +
   1.140 +namespace js {
   1.141 +namespace jit {
   1.142 +
   1.143 +static inline void
   1.144 +PatchJump(CodeLocationJump jump, CodeLocationLabel label)
   1.145 +{
   1.146 +#ifdef DEBUG
   1.147 +    // Assert that we're overwriting a jump instruction, either:
   1.148 +    //   0F 80+cc <imm32>, or
   1.149 +    //   E9 <imm32>
   1.150 +    unsigned char *x = (unsigned char *)jump.raw() - 5;
   1.151 +    JS_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
   1.152 +              (*x == 0xE9));
   1.153 +#endif
   1.154 +    JSC::X86Assembler::setRel32(jump.raw(), label.raw());
   1.155 +}
   1.156 +
   1.157 +// Return operand from a JS -> JS call.
   1.158 +static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
   1.159 +
   1.160 +class Assembler : public AssemblerX86Shared
   1.161 +{
   1.162 +    void writeRelocation(JmpSrc src) {
   1.163 +        jumpRelocations_.writeUnsigned(src.offset());
   1.164 +    }
   1.165 +    void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
   1.166 +        enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
   1.167 +        if (kind == Relocation::JITCODE)
   1.168 +            writeRelocation(src);
   1.169 +    }
   1.170 +
   1.171 +  public:
   1.172 +    using AssemblerX86Shared::movl;
   1.173 +    using AssemblerX86Shared::j;
   1.174 +    using AssemblerX86Shared::jmp;
   1.175 +    using AssemblerX86Shared::movsd;
   1.176 +    using AssemblerX86Shared::movss;
   1.177 +    using AssemblerX86Shared::retarget;
   1.178 +    using AssemblerX86Shared::cmpl;
   1.179 +    using AssemblerX86Shared::call;
   1.180 +    using AssemblerX86Shared::push;
   1.181 +    using AssemblerX86Shared::pop;
   1.182 +
   1.183 +    static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader);
   1.184 +
   1.185 +    // Copy the assembly code to the given buffer, and perform any pending
   1.186 +    // relocations relying on the target address.
   1.187 +    void executableCopy(uint8_t *buffer);
   1.188 +
   1.189 +    // Actual assembly emitting functions.
   1.190 +
   1.191 +    void push(const ImmGCPtr &ptr) {
   1.192 +        push(Imm32(ptr.value));
   1.193 +        writeDataRelocation(ptr);
   1.194 +    }
   1.195 +    void push(const ImmWord imm) {
   1.196 +        push(Imm32(imm.value));
   1.197 +    }
   1.198 +    void push(const ImmPtr imm) {
   1.199 +        push(ImmWord(uintptr_t(imm.value)));
   1.200 +    }
   1.201 +    void push(const FloatRegister &src) {
   1.202 +        subl(Imm32(sizeof(double)), StackPointer);
   1.203 +        movsd(src, Address(StackPointer, 0));
   1.204 +    }
   1.205 +
   1.206 +    CodeOffsetLabel pushWithPatch(const ImmWord &word) {
   1.207 +        push(Imm32(word.value));
   1.208 +        return masm.currentOffset();
   1.209 +    }
   1.210 +
   1.211 +    void pop(const FloatRegister &src) {
   1.212 +        movsd(Address(StackPointer, 0), src);
   1.213 +        addl(Imm32(sizeof(double)), StackPointer);
   1.214 +    }
   1.215 +
   1.216 +    CodeOffsetLabel movWithPatch(const ImmWord &word, const Register &dest) {
   1.217 +        movl(Imm32(word.value), dest);
   1.218 +        return masm.currentOffset();
   1.219 +    }
   1.220 +    CodeOffsetLabel movWithPatch(const ImmPtr &imm, const Register &dest) {
   1.221 +        return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
   1.222 +    }
   1.223 +
   1.224 +    void movl(const ImmGCPtr &ptr, const Register &dest) {
   1.225 +        masm.movl_i32r(ptr.value, dest.code());
   1.226 +        writeDataRelocation(ptr);
   1.227 +    }
   1.228 +    void movl(const ImmGCPtr &ptr, const Operand &dest) {
   1.229 +        switch (dest.kind()) {
   1.230 +          case Operand::REG:
   1.231 +            masm.movl_i32r(ptr.value, dest.reg());
   1.232 +            writeDataRelocation(ptr);
   1.233 +            break;
   1.234 +          case Operand::MEM_REG_DISP:
   1.235 +            masm.movl_i32m(ptr.value, dest.disp(), dest.base());
   1.236 +            writeDataRelocation(ptr);
   1.237 +            break;
   1.238 +          case Operand::MEM_SCALE:
   1.239 +            masm.movl_i32m(ptr.value, dest.disp(), dest.base(), dest.index(), dest.scale());
   1.240 +            writeDataRelocation(ptr);
   1.241 +            break;
   1.242 +          default:
   1.243 +            MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   1.244 +        }
   1.245 +    }
   1.246 +    void movl(ImmWord imm, Register dest) {
   1.247 +        masm.movl_i32r(imm.value, dest.code());
   1.248 +    }
   1.249 +    void movl(ImmPtr imm, Register dest) {
   1.250 +        movl(ImmWord(uintptr_t(imm.value)), dest);
   1.251 +    }
   1.252 +    void mov(ImmWord imm, Register dest) {
   1.253 +        // Use xor for setting registers to zero, as it is specially optimized
   1.254 +        // for this purpose on modern hardware. Note that it does clobber FLAGS
   1.255 +        // though.
   1.256 +        if (imm.value == 0)
   1.257 +            xorl(dest, dest);
   1.258 +        else
   1.259 +            movl(imm, dest);
   1.260 +    }
   1.261 +    void mov(ImmPtr imm, Register dest) {
   1.262 +        mov(ImmWord(uintptr_t(imm.value)), dest);
   1.263 +    }
   1.264 +    void mov(AsmJSImmPtr imm, Register dest) {
   1.265 +        masm.movl_i32r(-1, dest.code());
   1.266 +        enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), imm.kind()));
   1.267 +    }
   1.268 +    void mov(const Operand &src, const Register &dest) {
   1.269 +        movl(src, dest);
   1.270 +    }
   1.271 +    void mov(const Register &src, const Operand &dest) {
   1.272 +        movl(src, dest);
   1.273 +    }
   1.274 +    void mov(Imm32 imm, const Operand &dest) {
   1.275 +        movl(imm, dest);
   1.276 +    }
   1.277 +    void mov(AbsoluteLabel *label, const Register &dest) {
   1.278 +        JS_ASSERT(!label->bound());
   1.279 +        // Thread the patch list through the unpatched address word in the
   1.280 +        // instruction stream.
   1.281 +        masm.movl_i32r(label->prev(), dest.code());
   1.282 +        label->setPrev(masm.size());
   1.283 +    }
   1.284 +    void mov(const Register &src, const Register &dest) {
   1.285 +        movl(src, dest);
   1.286 +    }
   1.287 +    void xchg(const Register &src, const Register &dest) {
   1.288 +        xchgl(src, dest);
   1.289 +    }
   1.290 +    void lea(const Operand &src, const Register &dest) {
   1.291 +        return leal(src, dest);
   1.292 +    }
   1.293 +
   1.294 +    void fld32(const Operand &dest) {
   1.295 +        switch (dest.kind()) {
   1.296 +          case Operand::MEM_REG_DISP:
   1.297 +            masm.fld32_m(dest.disp(), dest.base());
   1.298 +            break;
   1.299 +          default:
   1.300 +            MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   1.301 +        }
   1.302 +    }
   1.303 +
   1.304 +    void fstp32(const Operand &src) {
   1.305 +        switch (src.kind()) {
   1.306 +          case Operand::MEM_REG_DISP:
   1.307 +            masm.fstp32_m(src.disp(), src.base());
   1.308 +            break;
   1.309 +          default:
   1.310 +            MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   1.311 +        }
   1.312 +    }
   1.313 +
   1.314 +    void cmpl(const Register src, ImmWord ptr) {
   1.315 +        masm.cmpl_ir(ptr.value, src.code());
   1.316 +    }
   1.317 +    void cmpl(const Register src, ImmPtr imm) {
   1.318 +        cmpl(src, ImmWord(uintptr_t(imm.value)));
   1.319 +    }
   1.320 +    void cmpl(const Register src, ImmGCPtr ptr) {
   1.321 +        masm.cmpl_ir(ptr.value, src.code());
   1.322 +        writeDataRelocation(ptr);
   1.323 +    }
   1.324 +    void cmpl(const Register &lhs, const Register &rhs) {
   1.325 +        masm.cmpl_rr(rhs.code(), lhs.code());
   1.326 +    }
   1.327 +    void cmpl(const Operand &op, ImmGCPtr imm) {
   1.328 +        switch (op.kind()) {
   1.329 +          case Operand::REG:
   1.330 +            masm.cmpl_ir_force32(imm.value, op.reg());
   1.331 +            writeDataRelocation(imm);
   1.332 +            break;
   1.333 +          case Operand::MEM_REG_DISP:
   1.334 +            masm.cmpl_im_force32(imm.value, op.disp(), op.base());
   1.335 +            writeDataRelocation(imm);
   1.336 +            break;
   1.337 +          case Operand::MEM_ADDRESS32:
   1.338 +            masm.cmpl_im(imm.value, op.address());
   1.339 +            writeDataRelocation(imm);
   1.340 +            break;
   1.341 +          default:
   1.342 +            MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   1.343 +        }
   1.344 +    }
   1.345 +    void cmpl(const AsmJSAbsoluteAddress &lhs, const Register &rhs) {
   1.346 +        masm.cmpl_rm_force32(rhs.code(), (void*)-1);
   1.347 +        enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), lhs.kind()));
   1.348 +    }
   1.349 +    CodeOffsetLabel cmplWithPatch(const Register &lhs, Imm32 rhs) {
   1.350 +        masm.cmpl_ir_force32(rhs.value, lhs.code());
   1.351 +        return masm.currentOffset();
   1.352 +    }
   1.353 +
   1.354 +    void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
   1.355 +        JmpSrc src = masm.jmp();
   1.356 +        addPendingJump(src, target, reloc);
   1.357 +    }
   1.358 +    void j(Condition cond, ImmPtr target,
   1.359 +           Relocation::Kind reloc = Relocation::HARDCODED) {
   1.360 +        JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
   1.361 +        addPendingJump(src, target, reloc);
   1.362 +    }
   1.363 +
   1.364 +    void jmp(JitCode *target) {
   1.365 +        jmp(ImmPtr(target->raw()), Relocation::JITCODE);
   1.366 +    }
   1.367 +    void j(Condition cond, JitCode *target) {
   1.368 +        j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
   1.369 +    }
   1.370 +    void call(JitCode *target) {
   1.371 +        JmpSrc src = masm.call();
   1.372 +        addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
   1.373 +    }
   1.374 +    void call(ImmWord target) {
   1.375 +        call(ImmPtr((void*)target.value));
   1.376 +    }
   1.377 +    void call(ImmPtr target) {
   1.378 +        JmpSrc src = masm.call();
   1.379 +        addPendingJump(src, target, Relocation::HARDCODED);
   1.380 +    }
   1.381 +    void call(AsmJSImmPtr target) {
   1.382 +        // Moving to a register is suboptimal. To fix (use a single
   1.383 +        // call-immediate instruction) we'll need to distinguish a new type of
   1.384 +        // relative patch to an absolute address in AsmJSAbsoluteLink.
   1.385 +        mov(target, eax);
   1.386 +        call(eax);
   1.387 +    }
   1.388 +
   1.389 +    // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
   1.390 +    // this instruction.
   1.391 +    CodeOffsetLabel toggledCall(JitCode *target, bool enabled) {
   1.392 +        CodeOffsetLabel offset(size());
   1.393 +        JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
   1.394 +        addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
   1.395 +        JS_ASSERT(size() - offset.offset() == ToggledCallSize());
   1.396 +        return offset;
   1.397 +    }
   1.398 +
   1.399 +    static size_t ToggledCallSize() {
   1.400 +        // Size of a call instruction.
   1.401 +        return 5;
   1.402 +    }
   1.403 +
   1.404 +    // Re-routes pending jumps to an external target, flushing the label in the
   1.405 +    // process.
   1.406 +    void retarget(Label *label, ImmPtr target, Relocation::Kind reloc) {
   1.407 +        JSC::MacroAssembler::Label jsclabel;
   1.408 +        if (label->used()) {
   1.409 +            bool more;
   1.410 +            JSC::X86Assembler::JmpSrc jmp(label->offset());
   1.411 +            do {
   1.412 +                JSC::X86Assembler::JmpSrc next;
   1.413 +                more = masm.nextJump(jmp, &next);
   1.414 +                addPendingJump(jmp, target, reloc);
   1.415 +                jmp = next;
   1.416 +            } while (more);
   1.417 +        }
   1.418 +        label->reset();
   1.419 +    }
   1.420 +
   1.421 +    // Move a 32-bit immediate into a register where the immediate can be
   1.422 +    // patched.
   1.423 +    CodeOffsetLabel movlWithPatch(Imm32 imm, Register dest) {
   1.424 +        masm.movl_i32r(imm.value, dest.code());
   1.425 +        return masm.currentOffset();
   1.426 +    }
   1.427 +
   1.428 +    // Load from *(base + disp32) where disp32 can be patched.
   1.429 +    CodeOffsetLabel movsblWithPatch(Address src, Register dest) {
   1.430 +        masm.movsbl_mr_disp32(src.offset, src.base.code(), dest.code());
   1.431 +        return masm.currentOffset();
   1.432 +    }
   1.433 +    CodeOffsetLabel movzblWithPatch(Address src, Register dest) {
   1.434 +        masm.movzbl_mr_disp32(src.offset, src.base.code(), dest.code());
   1.435 +        return masm.currentOffset();
   1.436 +    }
   1.437 +    CodeOffsetLabel movswlWithPatch(Address src, Register dest) {
   1.438 +        masm.movswl_mr_disp32(src.offset, src.base.code(), dest.code());
   1.439 +        return masm.currentOffset();
   1.440 +    }
   1.441 +    CodeOffsetLabel movzwlWithPatch(Address src, Register dest) {
   1.442 +        masm.movzwl_mr_disp32(src.offset, src.base.code(), dest.code());
   1.443 +        return masm.currentOffset();
   1.444 +    }
   1.445 +    CodeOffsetLabel movlWithPatch(Address src, Register dest) {
   1.446 +        masm.movl_mr_disp32(src.offset, src.base.code(), dest.code());
   1.447 +        return masm.currentOffset();
   1.448 +    }
   1.449 +    CodeOffsetLabel movssWithPatch(Address src, FloatRegister dest) {
   1.450 +        JS_ASSERT(HasSSE2());
   1.451 +        masm.movss_mr_disp32(src.offset, src.base.code(), dest.code());
   1.452 +        return masm.currentOffset();
   1.453 +    }
   1.454 +    CodeOffsetLabel movsdWithPatch(Address src, FloatRegister dest) {
   1.455 +        JS_ASSERT(HasSSE2());
   1.456 +        masm.movsd_mr_disp32(src.offset, src.base.code(), dest.code());
   1.457 +        return masm.currentOffset();
   1.458 +    }
   1.459 +
   1.460 +    // Store to *(base + disp32) where disp32 can be patched.
   1.461 +    CodeOffsetLabel movbWithPatch(Register src, Address dest) {
   1.462 +        masm.movb_rm_disp32(src.code(), dest.offset, dest.base.code());
   1.463 +        return masm.currentOffset();
   1.464 +    }
   1.465 +    CodeOffsetLabel movwWithPatch(Register src, Address dest) {
   1.466 +        masm.movw_rm_disp32(src.code(), dest.offset, dest.base.code());
   1.467 +        return masm.currentOffset();
   1.468 +    }
   1.469 +    CodeOffsetLabel movlWithPatch(Register src, Address dest) {
   1.470 +        masm.movl_rm_disp32(src.code(), dest.offset, dest.base.code());
   1.471 +        return masm.currentOffset();
   1.472 +    }
   1.473 +    CodeOffsetLabel movssWithPatch(FloatRegister src, Address dest) {
   1.474 +        JS_ASSERT(HasSSE2());
   1.475 +        masm.movss_rm_disp32(src.code(), dest.offset, dest.base.code());
   1.476 +        return masm.currentOffset();
   1.477 +    }
   1.478 +    CodeOffsetLabel movsdWithPatch(FloatRegister src, Address dest) {
   1.479 +        JS_ASSERT(HasSSE2());
   1.480 +        masm.movsd_rm_disp32(src.code(), dest.offset, dest.base.code());
   1.481 +        return masm.currentOffset();
   1.482 +    }
   1.483 +
   1.484 +    // Load from *(addr + index*scale) where addr can be patched.
   1.485 +    CodeOffsetLabel movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
   1.486 +                                  Register dest)
   1.487 +    {
   1.488 +        masm.movl_mr(addr.addr, index.code(), scale, dest.code());
   1.489 +        return masm.currentOffset();
   1.490 +    }
   1.491 +
   1.492 +    // Load from *src where src can be patched.
   1.493 +    CodeOffsetLabel movsblWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   1.494 +        masm.movsbl_mr(src.addr, dest.code());
   1.495 +        return masm.currentOffset();
   1.496 +    }
   1.497 +    CodeOffsetLabel movzblWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   1.498 +        masm.movzbl_mr(src.addr, dest.code());
   1.499 +        return masm.currentOffset();
   1.500 +    }
   1.501 +    CodeOffsetLabel movswlWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   1.502 +        masm.movswl_mr(src.addr, dest.code());
   1.503 +        return masm.currentOffset();
   1.504 +    }
   1.505 +    CodeOffsetLabel movzwlWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   1.506 +        masm.movzwl_mr(src.addr, dest.code());
   1.507 +        return masm.currentOffset();
   1.508 +    }
   1.509 +    CodeOffsetLabel movlWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   1.510 +        masm.movl_mr(src.addr, dest.code());
   1.511 +        return masm.currentOffset();
   1.512 +    }
   1.513 +    CodeOffsetLabel movssWithPatch(const PatchedAbsoluteAddress &src, FloatRegister dest) {
   1.514 +        JS_ASSERT(HasSSE2());
   1.515 +        masm.movss_mr(src.addr, dest.code());
   1.516 +        return masm.currentOffset();
   1.517 +    }
   1.518 +    CodeOffsetLabel movsdWithPatch(const PatchedAbsoluteAddress &src, FloatRegister dest) {
   1.519 +        JS_ASSERT(HasSSE2());
   1.520 +        masm.movsd_mr(src.addr, dest.code());
   1.521 +        return masm.currentOffset();
   1.522 +    }
   1.523 +
   1.524 +    // Store to *dest where dest can be patched.
   1.525 +    CodeOffsetLabel movbWithPatch(Register src, const PatchedAbsoluteAddress &dest) {
   1.526 +        masm.movb_rm(src.code(), dest.addr);
   1.527 +        return masm.currentOffset();
   1.528 +    }
   1.529 +    CodeOffsetLabel movwWithPatch(Register src, const PatchedAbsoluteAddress &dest) {
   1.530 +        masm.movw_rm(src.code(), dest.addr);
   1.531 +        return masm.currentOffset();
   1.532 +    }
   1.533 +    CodeOffsetLabel movlWithPatch(Register src, const PatchedAbsoluteAddress &dest) {
   1.534 +        masm.movl_rm(src.code(), dest.addr);
   1.535 +        return masm.currentOffset();
   1.536 +    }
   1.537 +    CodeOffsetLabel movssWithPatch(FloatRegister src, const PatchedAbsoluteAddress &dest) {
   1.538 +        JS_ASSERT(HasSSE2());
   1.539 +        masm.movss_rm(src.code(), dest.addr);
   1.540 +        return masm.currentOffset();
   1.541 +    }
   1.542 +    CodeOffsetLabel movsdWithPatch(FloatRegister src, const PatchedAbsoluteAddress &dest) {
   1.543 +        JS_ASSERT(HasSSE2());
   1.544 +        masm.movsd_rm(src.code(), dest.addr);
   1.545 +        return masm.currentOffset();
   1.546 +    }
   1.547 +
   1.548 +};
   1.549 +
   1.550 +// Get a register in which we plan to put a quantity that will be used as an
   1.551 +// integer argument.  This differs from GetIntArgReg in that if we have no more
   1.552 +// actual argument registers to use we will fall back on using whatever
   1.553 +// CallTempReg* don't overlap the argument registers, and only fail once those
   1.554 +// run out too.
   1.555 +static inline bool
   1.556 +GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
   1.557 +{
   1.558 +    if (usedIntArgs >= NumCallTempNonArgRegs)
   1.559 +        return false;
   1.560 +    *out = CallTempNonArgRegs[usedIntArgs];
   1.561 +    return true;
   1.562 +}
   1.563 +
   1.564 +} // namespace jit
   1.565 +} // namespace js
   1.566 +
   1.567 +#endif /* jit_x86_Assembler_x86_h */

mercurial