js/src/jit/x86/Assembler-x86.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #ifndef jit_x86_Assembler_x86_h
     8 #define jit_x86_Assembler_x86_h
    10 #include "mozilla/ArrayUtils.h"
    12 #include "assembler/assembler/X86Assembler.h"
    13 #include "jit/CompactBuffer.h"
    14 #include "jit/IonCode.h"
    15 #include "jit/shared/Assembler-shared.h"
    17 namespace js {
    18 namespace jit {
    20 static MOZ_CONSTEXPR_VAR Register eax = { JSC::X86Registers::eax };
    21 static MOZ_CONSTEXPR_VAR Register ecx = { JSC::X86Registers::ecx };
    22 static MOZ_CONSTEXPR_VAR Register edx = { JSC::X86Registers::edx };
    23 static MOZ_CONSTEXPR_VAR Register ebx = { JSC::X86Registers::ebx };
    24 static MOZ_CONSTEXPR_VAR Register esp = { JSC::X86Registers::esp };
    25 static MOZ_CONSTEXPR_VAR Register ebp = { JSC::X86Registers::ebp };
    26 static MOZ_CONSTEXPR_VAR Register esi = { JSC::X86Registers::esi };
    27 static MOZ_CONSTEXPR_VAR Register edi = { JSC::X86Registers::edi };
    29 static MOZ_CONSTEXPR_VAR FloatRegister xmm0 = { JSC::X86Registers::xmm0 };
    30 static MOZ_CONSTEXPR_VAR FloatRegister xmm1 = { JSC::X86Registers::xmm1 };
    31 static MOZ_CONSTEXPR_VAR FloatRegister xmm2 = { JSC::X86Registers::xmm2 };
    32 static MOZ_CONSTEXPR_VAR FloatRegister xmm3 = { JSC::X86Registers::xmm3 };
    33 static MOZ_CONSTEXPR_VAR FloatRegister xmm4 = { JSC::X86Registers::xmm4 };
    34 static MOZ_CONSTEXPR_VAR FloatRegister xmm5 = { JSC::X86Registers::xmm5 };
    35 static MOZ_CONSTEXPR_VAR FloatRegister xmm6 = { JSC::X86Registers::xmm6 };
    36 static MOZ_CONSTEXPR_VAR FloatRegister xmm7 = { JSC::X86Registers::xmm7 };
    38 static MOZ_CONSTEXPR_VAR Register InvalidReg = { JSC::X86Registers::invalid_reg };
    39 static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { JSC::X86Registers::invalid_xmm };
    41 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
    42 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
    43 static MOZ_CONSTEXPR_VAR Register StackPointer = esp;
    44 static MOZ_CONSTEXPR_VAR Register FramePointer = ebp;
    45 static MOZ_CONSTEXPR_VAR Register ReturnReg = eax;
    46 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = xmm0;
    47 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = xmm7;
    49 // Avoid ebp, which is the FramePointer, which is unavailable in some modes.
    50 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = esi;
    51 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = edi;
    52 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = eax;
    53 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = ebx;
    54 static MOZ_CONSTEXPR_VAR Register CallTempReg3 = ecx;
    55 static MOZ_CONSTEXPR_VAR Register CallTempReg4 = esi;
    56 static MOZ_CONSTEXPR_VAR Register CallTempReg5 = edx;
    58 // The convention used by the ForkJoinGetSlice stub. None of these can be eax
    59 // or edx, which the stub also needs for cmpxchg and div, respectively.
    60 static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_cx = edi;
    61 static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp0 = ebx;
    62 static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_temp1 = ecx;
    63 static MOZ_CONSTEXPR_VAR Register ForkJoinGetSliceReg_output = esi;
    65 // We have no arg regs, so our NonArgRegs are just our CallTempReg*
    66 static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { edi, eax, ebx, ecx, esi, edx };
    67 static const uint32_t NumCallTempNonArgRegs =
    68     mozilla::ArrayLength(CallTempNonArgRegs);
    70 class ABIArgGenerator
    71 {
    72     uint32_t stackOffset_;
    73     ABIArg current_;
    75   public:
    76     ABIArgGenerator();
    77     ABIArg next(MIRType argType);
    78     ABIArg &current() { return current_; }
    79     uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
    81     // Note: these registers are all guaranteed to be different
    82     static const Register NonArgReturnVolatileReg0;
    83     static const Register NonArgReturnVolatileReg1;
    84     static const Register NonVolatileReg;
    85 };
    87 static MOZ_CONSTEXPR_VAR Register OsrFrameReg = edx;
    88 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = edx;
    90 // Registers used in the GenerateFFIIonExit Enable Activation block.
    91 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = ecx;
    92 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = edi;
    93 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = eax;
    94 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = ebx;
    95 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = edx;
    97 // Registers used in the GenerateFFIIonExit Disable Activation block.
    98 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = edx;
    99 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = ecx;
   100 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = edi;
   101 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = eax;
   102 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = esi;
   104 // GCC stack is aligned on 16 bytes, but we don't maintain the invariant in
   105 // jitted code.
   106 #if defined(__GNUC__)
   107 static const uint32_t StackAlignment = 16;
   108 #else
   109 static const uint32_t StackAlignment = 4;
   110 #endif
   111 static const bool StackKeptAligned = false;
   112 static const uint32_t CodeAlignment = 8;
   113 static const uint32_t NativeFrameSize = sizeof(void*);
   114 static const uint32_t AlignmentAtPrologue = sizeof(void*);
   115 static const uint32_t AlignmentMidPrologue = AlignmentAtPrologue;
   116 struct ImmTag : public Imm32
   117 {
   118     ImmTag(JSValueTag mask)
   119       : Imm32(int32_t(mask))
   120     { }
   121 };
   123 struct ImmType : public ImmTag
   124 {
   125     ImmType(JSValueType type)
   126       : ImmTag(JSVAL_TYPE_TO_TAG(type))
   127     { }
   128 };
   130 static const Scale ScalePointer = TimesFour;
   132 } // namespace jit
   133 } // namespace js
   135 #include "jit/shared/Assembler-x86-shared.h"
   137 namespace js {
   138 namespace jit {
   140 static inline void
   141 PatchJump(CodeLocationJump jump, CodeLocationLabel label)
   142 {
   143 #ifdef DEBUG
   144     // Assert that we're overwriting a jump instruction, either:
   145     //   0F 80+cc <imm32>, or
   146     //   E9 <imm32>
   147     unsigned char *x = (unsigned char *)jump.raw() - 5;
   148     JS_ASSERT(((*x >= 0x80 && *x <= 0x8F) && *(x - 1) == 0x0F) ||
   149               (*x == 0xE9));
   150 #endif
   151     JSC::X86Assembler::setRel32(jump.raw(), label.raw());
   152 }
   154 // Return operand from a JS -> JS call.
   155 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
   157 class Assembler : public AssemblerX86Shared
   158 {
   159     void writeRelocation(JmpSrc src) {
   160         jumpRelocations_.writeUnsigned(src.offset());
   161     }
   162     void addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind kind) {
   163         enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, kind));
   164         if (kind == Relocation::JITCODE)
   165             writeRelocation(src);
   166     }
   168   public:
   169     using AssemblerX86Shared::movl;
   170     using AssemblerX86Shared::j;
   171     using AssemblerX86Shared::jmp;
   172     using AssemblerX86Shared::movsd;
   173     using AssemblerX86Shared::movss;
   174     using AssemblerX86Shared::retarget;
   175     using AssemblerX86Shared::cmpl;
   176     using AssemblerX86Shared::call;
   177     using AssemblerX86Shared::push;
   178     using AssemblerX86Shared::pop;
   180     static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader);
   182     // Copy the assembly code to the given buffer, and perform any pending
   183     // relocations relying on the target address.
   184     void executableCopy(uint8_t *buffer);
   186     // Actual assembly emitting functions.
   188     void push(const ImmGCPtr &ptr) {
   189         push(Imm32(ptr.value));
   190         writeDataRelocation(ptr);
   191     }
   192     void push(const ImmWord imm) {
   193         push(Imm32(imm.value));
   194     }
   195     void push(const ImmPtr imm) {
   196         push(ImmWord(uintptr_t(imm.value)));
   197     }
   198     void push(const FloatRegister &src) {
   199         subl(Imm32(sizeof(double)), StackPointer);
   200         movsd(src, Address(StackPointer, 0));
   201     }
   203     CodeOffsetLabel pushWithPatch(const ImmWord &word) {
   204         push(Imm32(word.value));
   205         return masm.currentOffset();
   206     }
   208     void pop(const FloatRegister &src) {
   209         movsd(Address(StackPointer, 0), src);
   210         addl(Imm32(sizeof(double)), StackPointer);
   211     }
   213     CodeOffsetLabel movWithPatch(const ImmWord &word, const Register &dest) {
   214         movl(Imm32(word.value), dest);
   215         return masm.currentOffset();
   216     }
   217     CodeOffsetLabel movWithPatch(const ImmPtr &imm, const Register &dest) {
   218         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
   219     }
   221     void movl(const ImmGCPtr &ptr, const Register &dest) {
   222         masm.movl_i32r(ptr.value, dest.code());
   223         writeDataRelocation(ptr);
   224     }
   225     void movl(const ImmGCPtr &ptr, const Operand &dest) {
   226         switch (dest.kind()) {
   227           case Operand::REG:
   228             masm.movl_i32r(ptr.value, dest.reg());
   229             writeDataRelocation(ptr);
   230             break;
   231           case Operand::MEM_REG_DISP:
   232             masm.movl_i32m(ptr.value, dest.disp(), dest.base());
   233             writeDataRelocation(ptr);
   234             break;
   235           case Operand::MEM_SCALE:
   236             masm.movl_i32m(ptr.value, dest.disp(), dest.base(), dest.index(), dest.scale());
   237             writeDataRelocation(ptr);
   238             break;
   239           default:
   240             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   241         }
   242     }
   243     void movl(ImmWord imm, Register dest) {
   244         masm.movl_i32r(imm.value, dest.code());
   245     }
   246     void movl(ImmPtr imm, Register dest) {
   247         movl(ImmWord(uintptr_t(imm.value)), dest);
   248     }
   249     void mov(ImmWord imm, Register dest) {
   250         // Use xor for setting registers to zero, as it is specially optimized
   251         // for this purpose on modern hardware. Note that it does clobber FLAGS
   252         // though.
   253         if (imm.value == 0)
   254             xorl(dest, dest);
   255         else
   256             movl(imm, dest);
   257     }
   258     void mov(ImmPtr imm, Register dest) {
   259         mov(ImmWord(uintptr_t(imm.value)), dest);
   260     }
   261     void mov(AsmJSImmPtr imm, Register dest) {
   262         masm.movl_i32r(-1, dest.code());
   263         enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), imm.kind()));
   264     }
   265     void mov(const Operand &src, const Register &dest) {
   266         movl(src, dest);
   267     }
   268     void mov(const Register &src, const Operand &dest) {
   269         movl(src, dest);
   270     }
   271     void mov(Imm32 imm, const Operand &dest) {
   272         movl(imm, dest);
   273     }
   274     void mov(AbsoluteLabel *label, const Register &dest) {
   275         JS_ASSERT(!label->bound());
   276         // Thread the patch list through the unpatched address word in the
   277         // instruction stream.
   278         masm.movl_i32r(label->prev(), dest.code());
   279         label->setPrev(masm.size());
   280     }
   281     void mov(const Register &src, const Register &dest) {
   282         movl(src, dest);
   283     }
   284     void xchg(const Register &src, const Register &dest) {
   285         xchgl(src, dest);
   286     }
   287     void lea(const Operand &src, const Register &dest) {
   288         return leal(src, dest);
   289     }
   291     void fld32(const Operand &dest) {
   292         switch (dest.kind()) {
   293           case Operand::MEM_REG_DISP:
   294             masm.fld32_m(dest.disp(), dest.base());
   295             break;
   296           default:
   297             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   298         }
   299     }
   301     void fstp32(const Operand &src) {
   302         switch (src.kind()) {
   303           case Operand::MEM_REG_DISP:
   304             masm.fstp32_m(src.disp(), src.base());
   305             break;
   306           default:
   307             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   308         }
   309     }
   311     void cmpl(const Register src, ImmWord ptr) {
   312         masm.cmpl_ir(ptr.value, src.code());
   313     }
   314     void cmpl(const Register src, ImmPtr imm) {
   315         cmpl(src, ImmWord(uintptr_t(imm.value)));
   316     }
   317     void cmpl(const Register src, ImmGCPtr ptr) {
   318         masm.cmpl_ir(ptr.value, src.code());
   319         writeDataRelocation(ptr);
   320     }
   321     void cmpl(const Register &lhs, const Register &rhs) {
   322         masm.cmpl_rr(rhs.code(), lhs.code());
   323     }
   324     void cmpl(const Operand &op, ImmGCPtr imm) {
   325         switch (op.kind()) {
   326           case Operand::REG:
   327             masm.cmpl_ir_force32(imm.value, op.reg());
   328             writeDataRelocation(imm);
   329             break;
   330           case Operand::MEM_REG_DISP:
   331             masm.cmpl_im_force32(imm.value, op.disp(), op.base());
   332             writeDataRelocation(imm);
   333             break;
   334           case Operand::MEM_ADDRESS32:
   335             masm.cmpl_im(imm.value, op.address());
   336             writeDataRelocation(imm);
   337             break;
   338           default:
   339             MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
   340         }
   341     }
   342     void cmpl(const AsmJSAbsoluteAddress &lhs, const Register &rhs) {
   343         masm.cmpl_rm_force32(rhs.code(), (void*)-1);
   344         enoughMemory_ &= append(AsmJSAbsoluteLink(masm.currentOffset(), lhs.kind()));
   345     }
   346     CodeOffsetLabel cmplWithPatch(const Register &lhs, Imm32 rhs) {
   347         masm.cmpl_ir_force32(rhs.value, lhs.code());
   348         return masm.currentOffset();
   349     }
   351     void jmp(ImmPtr target, Relocation::Kind reloc = Relocation::HARDCODED) {
   352         JmpSrc src = masm.jmp();
   353         addPendingJump(src, target, reloc);
   354     }
   355     void j(Condition cond, ImmPtr target,
   356            Relocation::Kind reloc = Relocation::HARDCODED) {
   357         JmpSrc src = masm.jCC(static_cast<JSC::X86Assembler::Condition>(cond));
   358         addPendingJump(src, target, reloc);
   359     }
   361     void jmp(JitCode *target) {
   362         jmp(ImmPtr(target->raw()), Relocation::JITCODE);
   363     }
   364     void j(Condition cond, JitCode *target) {
   365         j(cond, ImmPtr(target->raw()), Relocation::JITCODE);
   366     }
   367     void call(JitCode *target) {
   368         JmpSrc src = masm.call();
   369         addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
   370     }
   371     void call(ImmWord target) {
   372         call(ImmPtr((void*)target.value));
   373     }
   374     void call(ImmPtr target) {
   375         JmpSrc src = masm.call();
   376         addPendingJump(src, target, Relocation::HARDCODED);
   377     }
   378     void call(AsmJSImmPtr target) {
   379         // Moving to a register is suboptimal. To fix (use a single
   380         // call-immediate instruction) we'll need to distinguish a new type of
   381         // relative patch to an absolute address in AsmJSAbsoluteLink.
   382         mov(target, eax);
   383         call(eax);
   384     }
   386     // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
   387     // this instruction.
   388     CodeOffsetLabel toggledCall(JitCode *target, bool enabled) {
   389         CodeOffsetLabel offset(size());
   390         JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
   391         addPendingJump(src, ImmPtr(target->raw()), Relocation::JITCODE);
   392         JS_ASSERT(size() - offset.offset() == ToggledCallSize());
   393         return offset;
   394     }
   396     static size_t ToggledCallSize() {
   397         // Size of a call instruction.
   398         return 5;
   399     }
   401     // Re-routes pending jumps to an external target, flushing the label in the
   402     // process.
   403     void retarget(Label *label, ImmPtr target, Relocation::Kind reloc) {
   404         JSC::MacroAssembler::Label jsclabel;
   405         if (label->used()) {
   406             bool more;
   407             JSC::X86Assembler::JmpSrc jmp(label->offset());
   408             do {
   409                 JSC::X86Assembler::JmpSrc next;
   410                 more = masm.nextJump(jmp, &next);
   411                 addPendingJump(jmp, target, reloc);
   412                 jmp = next;
   413             } while (more);
   414         }
   415         label->reset();
   416     }
   418     // Move a 32-bit immediate into a register where the immediate can be
   419     // patched.
   420     CodeOffsetLabel movlWithPatch(Imm32 imm, Register dest) {
   421         masm.movl_i32r(imm.value, dest.code());
   422         return masm.currentOffset();
   423     }
   425     // Load from *(base + disp32) where disp32 can be patched.
   426     CodeOffsetLabel movsblWithPatch(Address src, Register dest) {
   427         masm.movsbl_mr_disp32(src.offset, src.base.code(), dest.code());
   428         return masm.currentOffset();
   429     }
   430     CodeOffsetLabel movzblWithPatch(Address src, Register dest) {
   431         masm.movzbl_mr_disp32(src.offset, src.base.code(), dest.code());
   432         return masm.currentOffset();
   433     }
   434     CodeOffsetLabel movswlWithPatch(Address src, Register dest) {
   435         masm.movswl_mr_disp32(src.offset, src.base.code(), dest.code());
   436         return masm.currentOffset();
   437     }
   438     CodeOffsetLabel movzwlWithPatch(Address src, Register dest) {
   439         masm.movzwl_mr_disp32(src.offset, src.base.code(), dest.code());
   440         return masm.currentOffset();
   441     }
   442     CodeOffsetLabel movlWithPatch(Address src, Register dest) {
   443         masm.movl_mr_disp32(src.offset, src.base.code(), dest.code());
   444         return masm.currentOffset();
   445     }
   446     CodeOffsetLabel movssWithPatch(Address src, FloatRegister dest) {
   447         JS_ASSERT(HasSSE2());
   448         masm.movss_mr_disp32(src.offset, src.base.code(), dest.code());
   449         return masm.currentOffset();
   450     }
   451     CodeOffsetLabel movsdWithPatch(Address src, FloatRegister dest) {
   452         JS_ASSERT(HasSSE2());
   453         masm.movsd_mr_disp32(src.offset, src.base.code(), dest.code());
   454         return masm.currentOffset();
   455     }
   457     // Store to *(base + disp32) where disp32 can be patched.
   458     CodeOffsetLabel movbWithPatch(Register src, Address dest) {
   459         masm.movb_rm_disp32(src.code(), dest.offset, dest.base.code());
   460         return masm.currentOffset();
   461     }
   462     CodeOffsetLabel movwWithPatch(Register src, Address dest) {
   463         masm.movw_rm_disp32(src.code(), dest.offset, dest.base.code());
   464         return masm.currentOffset();
   465     }
   466     CodeOffsetLabel movlWithPatch(Register src, Address dest) {
   467         masm.movl_rm_disp32(src.code(), dest.offset, dest.base.code());
   468         return masm.currentOffset();
   469     }
   470     CodeOffsetLabel movssWithPatch(FloatRegister src, Address dest) {
   471         JS_ASSERT(HasSSE2());
   472         masm.movss_rm_disp32(src.code(), dest.offset, dest.base.code());
   473         return masm.currentOffset();
   474     }
   475     CodeOffsetLabel movsdWithPatch(FloatRegister src, Address dest) {
   476         JS_ASSERT(HasSSE2());
   477         masm.movsd_rm_disp32(src.code(), dest.offset, dest.base.code());
   478         return masm.currentOffset();
   479     }
   481     // Load from *(addr + index*scale) where addr can be patched.
   482     CodeOffsetLabel movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
   483                                   Register dest)
   484     {
   485         masm.movl_mr(addr.addr, index.code(), scale, dest.code());
   486         return masm.currentOffset();
   487     }
   489     // Load from *src where src can be patched.
   490     CodeOffsetLabel movsblWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   491         masm.movsbl_mr(src.addr, dest.code());
   492         return masm.currentOffset();
   493     }
   494     CodeOffsetLabel movzblWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   495         masm.movzbl_mr(src.addr, dest.code());
   496         return masm.currentOffset();
   497     }
   498     CodeOffsetLabel movswlWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   499         masm.movswl_mr(src.addr, dest.code());
   500         return masm.currentOffset();
   501     }
   502     CodeOffsetLabel movzwlWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   503         masm.movzwl_mr(src.addr, dest.code());
   504         return masm.currentOffset();
   505     }
   506     CodeOffsetLabel movlWithPatch(const PatchedAbsoluteAddress &src, Register dest) {
   507         masm.movl_mr(src.addr, dest.code());
   508         return masm.currentOffset();
   509     }
   510     CodeOffsetLabel movssWithPatch(const PatchedAbsoluteAddress &src, FloatRegister dest) {
   511         JS_ASSERT(HasSSE2());
   512         masm.movss_mr(src.addr, dest.code());
   513         return masm.currentOffset();
   514     }
   515     CodeOffsetLabel movsdWithPatch(const PatchedAbsoluteAddress &src, FloatRegister dest) {
   516         JS_ASSERT(HasSSE2());
   517         masm.movsd_mr(src.addr, dest.code());
   518         return masm.currentOffset();
   519     }
   521     // Store to *dest where dest can be patched.
   522     CodeOffsetLabel movbWithPatch(Register src, const PatchedAbsoluteAddress &dest) {
   523         masm.movb_rm(src.code(), dest.addr);
   524         return masm.currentOffset();
   525     }
   526     CodeOffsetLabel movwWithPatch(Register src, const PatchedAbsoluteAddress &dest) {
   527         masm.movw_rm(src.code(), dest.addr);
   528         return masm.currentOffset();
   529     }
   530     CodeOffsetLabel movlWithPatch(Register src, const PatchedAbsoluteAddress &dest) {
   531         masm.movl_rm(src.code(), dest.addr);
   532         return masm.currentOffset();
   533     }
   534     CodeOffsetLabel movssWithPatch(FloatRegister src, const PatchedAbsoluteAddress &dest) {
   535         JS_ASSERT(HasSSE2());
   536         masm.movss_rm(src.code(), dest.addr);
   537         return masm.currentOffset();
   538     }
   539     CodeOffsetLabel movsdWithPatch(FloatRegister src, const PatchedAbsoluteAddress &dest) {
   540         JS_ASSERT(HasSSE2());
   541         masm.movsd_rm(src.code(), dest.addr);
   542         return masm.currentOffset();
   543     }
   545 };
   547 // Get a register in which we plan to put a quantity that will be used as an
   548 // integer argument.  This differs from GetIntArgReg in that if we have no more
   549 // actual argument registers to use we will fall back on using whatever
   550 // CallTempReg* don't overlap the argument registers, and only fail once those
   551 // run out too.
   552 static inline bool
   553 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
   554 {
   555     if (usedIntArgs >= NumCallTempNonArgRegs)
   556         return false;
   557     *out = CallTempNonArgRegs[usedIntArgs];
   558     return true;
   559 }
   561 } // namespace jit
   562 } // namespace js
   564 #endif /* jit_x86_Assembler_x86_h */

mercurial