michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_shared_MacroAssembler_x86_shared_h michael@0: #define jit_shared_MacroAssembler_x86_shared_h michael@0: michael@0: #include "mozilla/Casting.h" michael@0: #include "mozilla/DebugOnly.h" michael@0: michael@0: #if defined(JS_CODEGEN_X86) michael@0: # include "jit/x86/Assembler-x86.h" michael@0: #elif defined(JS_CODEGEN_X64) michael@0: # include "jit/x64/Assembler-x64.h" michael@0: #endif michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: class MacroAssemblerX86Shared : public Assembler michael@0: { michael@0: protected: michael@0: // Bytes pushed onto the frame by the callee; includes frameDepth_. This is michael@0: // needed to compute offsets to stack slots while temporary space has been michael@0: // reserved for unexpected spills or C++ function calls. It is maintained michael@0: // by functions which track stack alignment, which for clear distinction michael@0: // use StudlyCaps (for example, Push, Pop). michael@0: uint32_t framePushed_; michael@0: michael@0: public: michael@0: using Assembler::call; michael@0: michael@0: MacroAssemblerX86Shared() michael@0: : framePushed_(0) michael@0: { } michael@0: michael@0: void compareDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs) { michael@0: if (cond & DoubleConditionBitInvert) michael@0: ucomisd(rhs, lhs); michael@0: else michael@0: ucomisd(lhs, rhs); michael@0: } michael@0: void branchDouble(DoubleCondition cond, const FloatRegister &lhs, michael@0: const FloatRegister &rhs, Label *label) michael@0: { michael@0: compareDouble(cond, lhs, rhs); michael@0: michael@0: if (cond == DoubleEqual) { michael@0: Label unordered; michael@0: j(Parity, &unordered); michael@0: j(Equal, label); michael@0: bind(&unordered); michael@0: return; michael@0: } michael@0: if (cond == DoubleNotEqualOrUnordered) { michael@0: j(NotEqual, label); michael@0: j(Parity, label); michael@0: return; michael@0: } michael@0: michael@0: JS_ASSERT(!(cond & DoubleConditionBitSpecial)); michael@0: j(ConditionFromDoubleCondition(cond), label); michael@0: } michael@0: michael@0: void compareFloat(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs) { michael@0: if (cond & DoubleConditionBitInvert) michael@0: ucomiss(rhs, lhs); michael@0: else michael@0: ucomiss(lhs, rhs); michael@0: } michael@0: void branchFloat(DoubleCondition cond, const FloatRegister &lhs, michael@0: const FloatRegister &rhs, Label *label) michael@0: { michael@0: compareFloat(cond, lhs, rhs); michael@0: michael@0: if (cond == DoubleEqual) { michael@0: Label unordered; michael@0: j(Parity, &unordered); michael@0: j(Equal, label); michael@0: bind(&unordered); michael@0: return; michael@0: } michael@0: if (cond == DoubleNotEqualOrUnordered) { michael@0: j(NotEqual, label); michael@0: j(Parity, label); michael@0: return; michael@0: } michael@0: michael@0: JS_ASSERT(!(cond & DoubleConditionBitSpecial)); michael@0: j(ConditionFromDoubleCondition(cond), label); michael@0: } michael@0: michael@0: void branchNegativeZero(const FloatRegister ®, const Register &scratch, Label *label); michael@0: void branchNegativeZeroFloat32(const FloatRegister ®, const Register &scratch, Label *label); michael@0: michael@0: void move32(const Imm32 &imm, const Register &dest) { michael@0: // Use the ImmWord version of mov to register, which has special michael@0: // optimizations. Casting to uint32_t here ensures that the value michael@0: // is zero-extended. michael@0: mov(ImmWord(uint32_t(imm.value)), dest); michael@0: } michael@0: void move32(const Imm32 &imm, const Operand &dest) { michael@0: movl(imm, dest); michael@0: } michael@0: void move32(const Register &src, const Register &dest) { michael@0: movl(src, dest); michael@0: } michael@0: void move32(const Register &src, const Operand &dest) { michael@0: movl(src, dest); michael@0: } michael@0: void and32(const Imm32 &imm, const Register &dest) { michael@0: andl(imm, dest); michael@0: } michael@0: void and32(const Imm32 &imm, const Address &dest) { michael@0: andl(imm, Operand(dest)); michael@0: } michael@0: void or32(const Register &src, const Register &dest) { michael@0: orl(src, dest); michael@0: } michael@0: void or32(const Imm32 &imm, const Register &dest) { michael@0: orl(imm, dest); michael@0: } michael@0: void or32(const Imm32 &imm, const Address &dest) { michael@0: orl(imm, Operand(dest)); michael@0: } michael@0: void neg32(const Register ®) { michael@0: negl(reg); michael@0: } michael@0: void test32(const Register &lhs, const Register &rhs) { michael@0: testl(lhs, rhs); michael@0: } michael@0: void test32(const Address &addr, Imm32 imm) { michael@0: testl(Operand(addr), imm); michael@0: } michael@0: void test32(const Register &lhs, const Imm32 &rhs) { michael@0: testl(lhs, rhs); michael@0: } michael@0: void cmp32(const Register &lhs, const Imm32 &rhs) { michael@0: cmpl(lhs, rhs); michael@0: } michael@0: void cmp32(Register a, Register b) { michael@0: cmpl(a, b); michael@0: } michael@0: void cmp32(const Operand &lhs, const Imm32 &rhs) { michael@0: cmpl(lhs, rhs); michael@0: } michael@0: void cmp32(const Operand &lhs, const Register &rhs) { michael@0: cmpl(lhs, rhs); michael@0: } michael@0: void add32(Register src, Register dest) { michael@0: addl(src, dest); michael@0: } michael@0: void add32(Imm32 imm, Register dest) { michael@0: addl(imm, dest); michael@0: } michael@0: void add32(Imm32 imm, const Address &dest) { michael@0: addl(imm, Operand(dest)); michael@0: } michael@0: void sub32(Imm32 imm, Register dest) { michael@0: subl(imm, dest); michael@0: } michael@0: void sub32(Register src, Register dest) { michael@0: subl(src, dest); michael@0: } michael@0: template michael@0: void branchAdd32(Condition cond, T src, Register dest, Label *label) { michael@0: add32(src, dest); michael@0: j(cond, label); michael@0: } michael@0: template michael@0: void branchSub32(Condition cond, T src, Register dest, Label *label) { michael@0: sub32(src, dest); michael@0: j(cond, label); michael@0: } michael@0: void xor32(Imm32 imm, Register dest) { michael@0: xorl(imm, dest); michael@0: } michael@0: void xor32(Register src, Register dest) { michael@0: xorl(src, dest); michael@0: } michael@0: void not32(Register reg) { michael@0: notl(reg); michael@0: } michael@0: void inc32(const Operand &addr) { michael@0: incl(addr); michael@0: } michael@0: void atomic_inc32(const Operand &addr) { michael@0: lock_incl(addr); michael@0: } michael@0: void dec32(const Operand &addr) { michael@0: decl(addr); michael@0: } michael@0: void atomic_dec32(const Operand &addr) { michael@0: lock_decl(addr); michael@0: } michael@0: void atomic_cmpxchg32(const Register &src, const Operand &addr, const Register &dest) { michael@0: // %eax must be explicitly provided for calling clarity. michael@0: MOZ_ASSERT(dest.code() == JSC::X86Registers::eax); michael@0: lock_cmpxchg32(src, addr); michael@0: } michael@0: michael@0: void branch16(Condition cond, const Register &lhs, const Register &rhs, Label *label) { michael@0: cmpw(lhs, rhs); michael@0: j(cond, label); michael@0: } michael@0: void branch32(Condition cond, const Operand &lhs, const Register &rhs, Label *label) { michael@0: cmpl(lhs, rhs); michael@0: j(cond, label); michael@0: } michael@0: void branch32(Condition cond, const Operand &lhs, Imm32 rhs, Label *label) { michael@0: cmpl(lhs, rhs); michael@0: j(cond, label); michael@0: } michael@0: void branch32(Condition cond, const Address &lhs, const Register &rhs, Label *label) { michael@0: cmpl(Operand(lhs), rhs); michael@0: j(cond, label); michael@0: } michael@0: void branch32(Condition cond, const Address &lhs, Imm32 imm, Label *label) { michael@0: cmpl(Operand(lhs), imm); michael@0: j(cond, label); michael@0: } michael@0: void branch32(Condition cond, const Register &lhs, Imm32 imm, Label *label) { michael@0: cmpl(lhs, imm); michael@0: j(cond, label); michael@0: } michael@0: void branch32(Condition cond, const Register &lhs, const Register &rhs, Label *label) { michael@0: cmpl(lhs, rhs); michael@0: j(cond, label); michael@0: } michael@0: void branchTest16(Condition cond, const Register &lhs, const Register &rhs, Label *label) { michael@0: testw(lhs, rhs); michael@0: j(cond, label); michael@0: } michael@0: void branchTest32(Condition cond, const Register &lhs, const Register &rhs, Label *label) { michael@0: testl(lhs, rhs); michael@0: j(cond, label); michael@0: } michael@0: void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) { michael@0: testl(lhs, imm); michael@0: j(cond, label); michael@0: } michael@0: void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) { michael@0: testl(Operand(address), imm); michael@0: j(cond, label); michael@0: } michael@0: michael@0: // The following functions are exposed for use in platform-shared code. michael@0: template michael@0: void Push(const T &t) { michael@0: push(t); michael@0: framePushed_ += sizeof(intptr_t); michael@0: } michael@0: void Push(const FloatRegister &t) { michael@0: push(t); michael@0: framePushed_ += sizeof(double); michael@0: } michael@0: CodeOffsetLabel PushWithPatch(const ImmWord &word) { michael@0: framePushed_ += sizeof(word.value); michael@0: return pushWithPatch(word); michael@0: } michael@0: CodeOffsetLabel PushWithPatch(const ImmPtr &imm) { michael@0: return PushWithPatch(ImmWord(uintptr_t(imm.value))); michael@0: } michael@0: michael@0: template michael@0: void Pop(const T &t) { michael@0: pop(t); michael@0: framePushed_ -= sizeof(intptr_t); michael@0: } michael@0: void Pop(const FloatRegister &t) { michael@0: pop(t); michael@0: framePushed_ -= sizeof(double); michael@0: } michael@0: void implicitPop(uint32_t args) { michael@0: JS_ASSERT(args % sizeof(intptr_t) == 0); michael@0: framePushed_ -= args; michael@0: } michael@0: uint32_t framePushed() const { michael@0: return framePushed_; michael@0: } michael@0: void setFramePushed(uint32_t framePushed) { michael@0: framePushed_ = framePushed; michael@0: } michael@0: michael@0: void jump(Label *label) { michael@0: jmp(label); michael@0: } michael@0: void jump(RepatchLabel *label) { michael@0: jmp(label); michael@0: } michael@0: void jump(Register reg) { michael@0: jmp(Operand(reg)); michael@0: } michael@0: void jump(const Address &addr) { michael@0: jmp(Operand(addr)); michael@0: } michael@0: michael@0: void convertInt32ToDouble(const Register &src, const FloatRegister &dest) { michael@0: // cvtsi2sd and friends write only part of their output register, which michael@0: // causes slowdowns on out-of-order processors. Explicitly break michael@0: // dependencies with xorpd (and xorps elsewhere), which are handled michael@0: // specially in modern CPUs, for this purpose. See sections 8.14, 9.8, michael@0: // 10.8, 12.9, 13.16, 14.14, and 15.8 of Agner's Microarchitecture michael@0: // document. michael@0: zeroDouble(dest); michael@0: cvtsi2sd(src, dest); michael@0: } michael@0: void convertInt32ToDouble(const Address &src, FloatRegister dest) { michael@0: convertInt32ToDouble(Operand(src), dest); michael@0: } michael@0: void convertInt32ToDouble(const Operand &src, FloatRegister dest) { michael@0: // Clear the output register first to break dependencies; see above; michael@0: zeroDouble(dest); michael@0: cvtsi2sd(Operand(src), dest); michael@0: } michael@0: void convertInt32ToFloat32(const Register &src, const FloatRegister &dest) { michael@0: // Clear the output register first to break dependencies; see above; michael@0: zeroFloat32(dest); michael@0: cvtsi2ss(src, dest); michael@0: } michael@0: void convertInt32ToFloat32(const Address &src, FloatRegister dest) { michael@0: convertInt32ToFloat32(Operand(src), dest); michael@0: } michael@0: void convertInt32ToFloat32(const Operand &src, FloatRegister dest) { michael@0: // Clear the output register first to break dependencies; see above; michael@0: zeroFloat32(dest); michael@0: cvtsi2ss(src, dest); michael@0: } michael@0: Condition testDoubleTruthy(bool truthy, const FloatRegister ®) { michael@0: zeroDouble(ScratchFloatReg); michael@0: ucomisd(ScratchFloatReg, reg); michael@0: return truthy ? NonZero : Zero; michael@0: } michael@0: void branchTestDoubleTruthy(bool truthy, const FloatRegister ®, Label *label) { michael@0: Condition cond = testDoubleTruthy(truthy, reg); michael@0: j(cond, label); michael@0: } michael@0: void load8ZeroExtend(const Address &src, const Register &dest) { michael@0: movzbl(Operand(src), dest); michael@0: } michael@0: void load8ZeroExtend(const BaseIndex &src, const Register &dest) { michael@0: movzbl(Operand(src), dest); michael@0: } michael@0: void load8SignExtend(const Address &src, const Register &dest) { michael@0: movsbl(Operand(src), dest); michael@0: } michael@0: void load8SignExtend(const BaseIndex &src, const Register &dest) { michael@0: movsbl(Operand(src), dest); michael@0: } michael@0: template michael@0: void store8(const S &src, const T &dest) { michael@0: movb(src, Operand(dest)); michael@0: } michael@0: void load16ZeroExtend(const Address &src, const Register &dest) { michael@0: movzwl(Operand(src), dest); michael@0: } michael@0: void load16ZeroExtend(const BaseIndex &src, const Register &dest) { michael@0: movzwl(Operand(src), dest); michael@0: } michael@0: template michael@0: void store16(const S &src, const T &dest) { michael@0: movw(src, Operand(dest)); michael@0: } michael@0: void load16SignExtend(const Address &src, const Register &dest) { michael@0: movswl(Operand(src), dest); michael@0: } michael@0: void load16SignExtend(const BaseIndex &src, const Register &dest) { michael@0: movswl(Operand(src), dest); michael@0: } michael@0: void load32(const Address &address, Register dest) { michael@0: movl(Operand(address), dest); michael@0: } michael@0: void load32(const BaseIndex &src, Register dest) { michael@0: movl(Operand(src), dest); michael@0: } michael@0: void load32(const Operand &src, Register dest) { michael@0: movl(src, dest); michael@0: } michael@0: template michael@0: void store32(const S &src, const T &dest) { michael@0: movl(src, Operand(dest)); michael@0: } michael@0: void loadDouble(const Address &src, FloatRegister dest) { michael@0: movsd(src, dest); michael@0: } michael@0: void loadDouble(const BaseIndex &src, FloatRegister dest) { michael@0: movsd(src, dest); michael@0: } michael@0: void loadDouble(const Operand &src, FloatRegister dest) { michael@0: switch (src.kind()) { michael@0: case Operand::MEM_REG_DISP: michael@0: loadDouble(src.toAddress(), dest); michael@0: break; michael@0: case Operand::MEM_SCALE: michael@0: loadDouble(src.toBaseIndex(), dest); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); michael@0: } michael@0: } michael@0: void storeDouble(FloatRegister src, const Address &dest) { michael@0: movsd(src, dest); michael@0: } michael@0: void storeDouble(FloatRegister src, const BaseIndex &dest) { michael@0: movsd(src, dest); michael@0: } michael@0: void storeDouble(FloatRegister src, const Operand &dest) { michael@0: switch (dest.kind()) { michael@0: case Operand::MEM_REG_DISP: michael@0: storeDouble(src, dest.toAddress()); michael@0: break; michael@0: case Operand::MEM_SCALE: michael@0: storeDouble(src, dest.toBaseIndex()); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); michael@0: } michael@0: } michael@0: void moveDouble(FloatRegister src, FloatRegister dest) { michael@0: // Use movapd instead of movsd to avoid dependencies. michael@0: movapd(src, dest); michael@0: } michael@0: void zeroDouble(FloatRegister reg) { michael@0: xorpd(reg, reg); michael@0: } michael@0: void zeroFloat32(FloatRegister reg) { michael@0: xorps(reg, reg); michael@0: } michael@0: void negateDouble(FloatRegister reg) { michael@0: // From MacroAssemblerX86Shared::maybeInlineDouble michael@0: pcmpeqw(ScratchFloatReg, ScratchFloatReg); michael@0: psllq(Imm32(63), ScratchFloatReg); michael@0: michael@0: // XOR the float in a float register with -0.0. michael@0: xorpd(ScratchFloatReg, reg); // s ^ 0x80000000000000 michael@0: } michael@0: void negateFloat(FloatRegister reg) { michael@0: pcmpeqw(ScratchFloatReg, ScratchFloatReg); michael@0: psllq(Imm32(31), ScratchFloatReg); michael@0: michael@0: // XOR the float in a float register with -0.0. michael@0: xorps(ScratchFloatReg, reg); // s ^ 0x80000000 michael@0: } michael@0: void addDouble(FloatRegister src, FloatRegister dest) { michael@0: addsd(src, dest); michael@0: } michael@0: void subDouble(FloatRegister src, FloatRegister dest) { michael@0: subsd(src, dest); michael@0: } michael@0: void mulDouble(FloatRegister src, FloatRegister dest) { michael@0: mulsd(src, dest); michael@0: } michael@0: void divDouble(FloatRegister src, FloatRegister dest) { michael@0: divsd(src, dest); michael@0: } michael@0: void convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest) { michael@0: cvtss2sd(src, dest); michael@0: } michael@0: void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest) { michael@0: cvtsd2ss(src, dest); michael@0: } michael@0: void moveFloatAsDouble(const Register &src, FloatRegister dest) { michael@0: movd(src, dest); michael@0: cvtss2sd(dest, dest); michael@0: } michael@0: void loadFloatAsDouble(const Address &src, FloatRegister dest) { michael@0: movss(src, dest); michael@0: cvtss2sd(dest, dest); michael@0: } michael@0: void loadFloatAsDouble(const BaseIndex &src, FloatRegister dest) { michael@0: movss(src, dest); michael@0: cvtss2sd(dest, dest); michael@0: } michael@0: void loadFloatAsDouble(const Operand &src, FloatRegister dest) { michael@0: loadFloat32(src, dest); michael@0: cvtss2sd(dest, dest); michael@0: } michael@0: void loadFloat32(const Address &src, FloatRegister dest) { michael@0: movss(src, dest); michael@0: } michael@0: void loadFloat32(const BaseIndex &src, FloatRegister dest) { michael@0: movss(src, dest); michael@0: } michael@0: void loadFloat32(const Operand &src, FloatRegister dest) { michael@0: switch (src.kind()) { michael@0: case Operand::MEM_REG_DISP: michael@0: loadFloat32(src.toAddress(), dest); michael@0: break; michael@0: case Operand::MEM_SCALE: michael@0: loadFloat32(src.toBaseIndex(), dest); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); michael@0: } michael@0: } michael@0: void storeFloat32(FloatRegister src, const Address &dest) { michael@0: movss(src, dest); michael@0: } michael@0: void storeFloat32(FloatRegister src, const BaseIndex &dest) { michael@0: movss(src, dest); michael@0: } michael@0: void storeFloat32(FloatRegister src, const Operand &dest) { michael@0: switch (dest.kind()) { michael@0: case Operand::MEM_REG_DISP: michael@0: storeFloat32(src, dest.toAddress()); michael@0: break; michael@0: case Operand::MEM_SCALE: michael@0: storeFloat32(src, dest.toBaseIndex()); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected operand kind"); michael@0: } michael@0: } michael@0: void moveFloat32(FloatRegister src, FloatRegister dest) { michael@0: // Use movaps instead of movss to avoid dependencies. michael@0: movaps(src, dest); michael@0: } michael@0: michael@0: // Checks whether a double is representable as a 32-bit integer. If so, the michael@0: // integer is written to the output register. Otherwise, a bailout is taken to michael@0: // the given snapshot. This function overwrites the scratch float register. michael@0: void convertDoubleToInt32(FloatRegister src, Register dest, Label *fail, michael@0: bool negativeZeroCheck = true) michael@0: { michael@0: // Check for -0.0 michael@0: if (negativeZeroCheck) michael@0: branchNegativeZero(src, dest, fail); michael@0: michael@0: cvttsd2si(src, dest); michael@0: cvtsi2sd(dest, ScratchFloatReg); michael@0: ucomisd(src, ScratchFloatReg); michael@0: j(Assembler::Parity, fail); michael@0: j(Assembler::NotEqual, fail); michael@0: michael@0: } michael@0: michael@0: // Checks whether a float32 is representable as a 32-bit integer. If so, the michael@0: // integer is written to the output register. Otherwise, a bailout is taken to michael@0: // the given snapshot. This function overwrites the scratch float register. michael@0: void convertFloat32ToInt32(FloatRegister src, Register dest, Label *fail, michael@0: bool negativeZeroCheck = true) michael@0: { michael@0: // Check for -0.0 michael@0: if (negativeZeroCheck) michael@0: branchNegativeZeroFloat32(src, dest, fail); michael@0: michael@0: cvttss2si(src, dest); michael@0: convertInt32ToFloat32(dest, ScratchFloatReg); michael@0: ucomiss(src, ScratchFloatReg); michael@0: j(Assembler::Parity, fail); michael@0: j(Assembler::NotEqual, fail); michael@0: } michael@0: michael@0: void clampIntToUint8(Register reg) { michael@0: Label inRange; michael@0: branchTest32(Assembler::Zero, reg, Imm32(0xffffff00), &inRange); michael@0: { michael@0: sarl(Imm32(31), reg); michael@0: notl(reg); michael@0: andl(Imm32(255), reg); michael@0: } michael@0: bind(&inRange); michael@0: } michael@0: michael@0: bool maybeInlineDouble(double d, const FloatRegister &dest) { michael@0: uint64_t u = mozilla::BitwiseCast(d); michael@0: michael@0: // Loading zero with xor is specially optimized in hardware. michael@0: if (u == 0) { michael@0: xorpd(dest, dest); michael@0: return true; michael@0: } michael@0: michael@0: // It is also possible to load several common constants using pcmpeqw michael@0: // to get all ones and then psllq and psrlq to get zeros at the ends, michael@0: // as described in "13.4 Generating constants" of michael@0: // "2. Optimizing subroutines in assembly language" by Agner Fog, and as michael@0: // previously implemented here. However, with x86 and x64 both using michael@0: // constant pool loads for double constants, this is probably only michael@0: // worthwhile in cases where a load is likely to be delayed. michael@0: michael@0: return false; michael@0: } michael@0: michael@0: bool maybeInlineFloat(float f, const FloatRegister &dest) { michael@0: uint32_t u = mozilla::BitwiseCast(f); michael@0: michael@0: // See comment above michael@0: if (u == 0) { michael@0: xorps(dest, dest); michael@0: return true; michael@0: } michael@0: return false; michael@0: } michael@0: michael@0: void convertBoolToInt32(Register source, Register dest) { michael@0: // Note that C++ bool is only 1 byte, so zero extend it to clear the michael@0: // higher-order bits. michael@0: movzbl(source, dest); michael@0: } michael@0: michael@0: void emitSet(Assembler::Condition cond, const Register &dest, michael@0: Assembler::NaNCond ifNaN = Assembler::NaN_HandledByCond) { michael@0: if (GeneralRegisterSet(Registers::SingleByteRegs).has(dest)) { michael@0: // If the register we're defining is a single byte register, michael@0: // take advantage of the setCC instruction michael@0: setCC(cond, dest); michael@0: movzbl(dest, dest); michael@0: michael@0: if (ifNaN != Assembler::NaN_HandledByCond) { michael@0: Label noNaN; michael@0: j(Assembler::NoParity, &noNaN); michael@0: mov(ImmWord(ifNaN == Assembler::NaN_IsTrue), dest); michael@0: bind(&noNaN); michael@0: } michael@0: } else { michael@0: Label end; michael@0: Label ifFalse; michael@0: michael@0: if (ifNaN == Assembler::NaN_IsFalse) michael@0: j(Assembler::Parity, &ifFalse); michael@0: // Note a subtlety here: FLAGS is live at this point, and the michael@0: // mov interface doesn't guarantee to preserve FLAGS. Use michael@0: // movl instead of mov, because the movl instruction michael@0: // preserves FLAGS. michael@0: movl(Imm32(1), dest); michael@0: j(cond, &end); michael@0: if (ifNaN == Assembler::NaN_IsTrue) michael@0: j(Assembler::Parity, &end); michael@0: bind(&ifFalse); michael@0: mov(ImmWord(0), dest); michael@0: michael@0: bind(&end); michael@0: } michael@0: } michael@0: michael@0: template michael@0: void cmp32Set(Assembler::Condition cond, T1 lhs, T2 rhs, const Register &dest) michael@0: { michael@0: cmp32(lhs, rhs); michael@0: emitSet(cond, dest); michael@0: } michael@0: michael@0: // Emit a JMP that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). michael@0: CodeOffsetLabel toggledJump(Label *label) { michael@0: CodeOffsetLabel offset(size()); michael@0: jump(label); michael@0: return offset; michael@0: } michael@0: michael@0: template michael@0: void computeEffectiveAddress(const T &address, Register dest) { michael@0: lea(Operand(address), dest); michael@0: } michael@0: michael@0: // Builds an exit frame on the stack, with a return address to an internal michael@0: // non-function. Returns offset to be passed to markSafepointAt(). michael@0: bool buildFakeExitFrame(const Register &scratch, uint32_t *offset); michael@0: void callWithExitFrame(JitCode *target); michael@0: michael@0: void callIon(const Register &callee) { michael@0: call(callee); michael@0: } michael@0: michael@0: void appendCallSite(const CallSiteDesc &desc) { michael@0: // Add an extra sizeof(void*) to include the return address that was michael@0: // pushed by the call instruction (see CallSite::stackDepth). michael@0: enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + sizeof(void*))); michael@0: } michael@0: michael@0: void call(const CallSiteDesc &desc, Label *label) { michael@0: call(label); michael@0: appendCallSite(desc); michael@0: } michael@0: void call(const CallSiteDesc &desc, const Register ®) { michael@0: call(reg); michael@0: appendCallSite(desc); michael@0: } michael@0: void callIonFromAsmJS(const Register ®) { michael@0: call(CallSiteDesc::Exit(), reg); michael@0: } michael@0: michael@0: void checkStackAlignment() { michael@0: // Exists for ARM compatibility. michael@0: } michael@0: michael@0: CodeOffsetLabel labelForPatch() { michael@0: return CodeOffsetLabel(size()); michael@0: } michael@0: michael@0: void abiret() { michael@0: ret(); michael@0: } michael@0: michael@0: protected: michael@0: bool buildOOLFakeExitFrame(void *fakeReturnAddr); michael@0: }; michael@0: michael@0: } // namespace jit michael@0: } // namespace js michael@0: michael@0: #endif /* jit_shared_MacroAssembler_x86_shared_h */