michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/arm/MacroAssembler-arm.h" michael@0: michael@0: #include "mozilla/Casting.h" michael@0: #include "mozilla/DebugOnly.h" michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "jit/arm/Simulator-arm.h" michael@0: #include "jit/Bailouts.h" michael@0: #include "jit/BaselineFrame.h" michael@0: #include "jit/IonFrames.h" michael@0: #include "jit/MoveEmitter.h" michael@0: michael@0: using namespace js; michael@0: using namespace jit; michael@0: michael@0: using mozilla::Abs; michael@0: using mozilla::BitwiseCast; michael@0: michael@0: bool michael@0: isValueDTRDCandidate(ValueOperand &val) michael@0: { michael@0: // In order to be used for a DTRD memory function, the two target registers michael@0: // need to be a) Adjacent, with the tag larger than the payload, and michael@0: // b) Aligned to a multiple of two. michael@0: if ((val.typeReg().code() != (val.payloadReg().code() + 1))) michael@0: return false; michael@0: if ((val.payloadReg().code() & 1) != 0) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) michael@0: { michael@0: // Note that C++ bool is only 1 byte, so zero extend it to clear the michael@0: // higher-order bits. michael@0: ma_and(Imm32(0xff), source, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertInt32ToDouble(const Register &src, const FloatRegister &dest_) michael@0: { michael@0: // direct conversions aren't possible. michael@0: VFPRegister dest = VFPRegister(dest_); michael@0: as_vxfer(src, InvalidReg, dest.sintOverlay(), michael@0: CoreToFloat); michael@0: as_vcvt(dest, dest.sintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertInt32ToDouble(const Address &src, FloatRegister dest) michael@0: { michael@0: ma_vldr(Operand(src), ScratchFloatReg); michael@0: as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertUInt32ToDouble(const Register &src, const FloatRegister &dest_) michael@0: { michael@0: // direct conversions aren't possible. michael@0: VFPRegister dest = VFPRegister(dest_); michael@0: as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); michael@0: as_vcvt(dest, dest.uintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertUInt32ToFloat32(const Register &src, const FloatRegister &dest_) michael@0: { michael@0: // direct conversions aren't possible. michael@0: VFPRegister dest = VFPRegister(dest_); michael@0: as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); michael@0: as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay()); michael@0: } michael@0: michael@0: void MacroAssemblerARM::convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest, michael@0: Condition c) michael@0: { michael@0: as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c); michael@0: } michael@0: michael@0: // there are two options for implementing emitTruncateDouble. michael@0: // 1) convert the floating point value to an integer, if it did not fit, michael@0: // then it was clamped to INT_MIN/INT_MAX, and we can test it. michael@0: // NOTE: if the value really was supposed to be INT_MAX / INT_MIN michael@0: // then it will be wrong. michael@0: // 2) convert the floating point value to an integer, if it did not fit, michael@0: // then it set one or two bits in the fpcsr. Check those. michael@0: void michael@0: MacroAssemblerARM::branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail) michael@0: { michael@0: ma_vcvt_F64_I32(src, ScratchFloatReg); michael@0: ma_vxfer(ScratchFloatReg, dest); michael@0: ma_cmp(dest, Imm32(0x7fffffff)); michael@0: ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); michael@0: ma_b(fail, Assembler::Equal); michael@0: } michael@0: michael@0: // Checks whether a double is representable as a 32-bit integer. If so, the michael@0: // integer is written to the output register. Otherwise, a bailout is taken to michael@0: // the given snapshot. This function overwrites the scratch float register. michael@0: void michael@0: MacroAssemblerARM::convertDoubleToInt32(const FloatRegister &src, const Register &dest, michael@0: Label *fail, bool negativeZeroCheck) michael@0: { michael@0: // convert the floating point value to an integer, if it did not fit, michael@0: // then when we convert it *back* to a float, it will have a michael@0: // different value, which we can test. michael@0: ma_vcvt_F64_I32(src, ScratchFloatReg); michael@0: // move the value into the dest register. michael@0: ma_vxfer(ScratchFloatReg, dest); michael@0: ma_vcvt_I32_F64(ScratchFloatReg, ScratchFloatReg); michael@0: ma_vcmp(src, ScratchFloatReg); michael@0: as_vmrs(pc); michael@0: ma_b(fail, Assembler::VFP_NotEqualOrUnordered); michael@0: michael@0: if (negativeZeroCheck) { michael@0: ma_cmp(dest, Imm32(0)); michael@0: // Test and bail for -0.0, when integer result is 0 michael@0: // Move the top word of the double into the output reg, if it is non-zero, michael@0: // then the original value was -0.0 michael@0: as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1); michael@0: ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); michael@0: ma_b(fail, Assembler::Equal); michael@0: } michael@0: } michael@0: michael@0: // Checks whether a float32 is representable as a 32-bit integer. If so, the michael@0: // integer is written to the output register. Otherwise, a bailout is taken to michael@0: // the given snapshot. This function overwrites the scratch float register. michael@0: void michael@0: MacroAssemblerARM::convertFloat32ToInt32(const FloatRegister &src, const Register &dest, michael@0: Label *fail, bool negativeZeroCheck) michael@0: { michael@0: // convert the floating point value to an integer, if it did not fit, michael@0: // then when we convert it *back* to a float, it will have a michael@0: // different value, which we can test. michael@0: ma_vcvt_F32_I32(src, ScratchFloatReg); michael@0: // move the value into the dest register. michael@0: ma_vxfer(ScratchFloatReg, dest); michael@0: ma_vcvt_I32_F32(ScratchFloatReg, ScratchFloatReg); michael@0: ma_vcmp_f32(src, ScratchFloatReg); michael@0: as_vmrs(pc); michael@0: ma_b(fail, Assembler::VFP_NotEqualOrUnordered); michael@0: michael@0: if (negativeZeroCheck) { michael@0: ma_cmp(dest, Imm32(0)); michael@0: // Test and bail for -0.0, when integer result is 0 michael@0: // Move the float into the output reg, and if it is non-zero then michael@0: // the original value was -0.0 michael@0: as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0); michael@0: ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); michael@0: ma_b(fail, Assembler::Equal); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest) { michael@0: as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail) { michael@0: ma_vcvt_F32_I32(src, ScratchFloatReg); michael@0: ma_vxfer(ScratchFloatReg, dest); michael@0: ma_cmp(dest, Imm32(0x7fffffff)); michael@0: ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); michael@0: ma_b(fail, Assembler::Equal); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertInt32ToFloat32(const Register &src, const FloatRegister &dest_) { michael@0: // direct conversions aren't possible. michael@0: VFPRegister dest = VFPRegister(dest_).singleOverlay(); michael@0: as_vxfer(src, InvalidReg, dest.sintOverlay(), michael@0: CoreToFloat); michael@0: as_vcvt(dest, dest.sintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::convertInt32ToFloat32(const Address &src, FloatRegister dest) { michael@0: ma_vldr(Operand(src), ScratchFloatReg); michael@0: as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::addDouble(FloatRegister src, FloatRegister dest) michael@0: { michael@0: ma_vadd(dest, src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::subDouble(FloatRegister src, FloatRegister dest) michael@0: { michael@0: ma_vsub(dest, src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::mulDouble(FloatRegister src, FloatRegister dest) michael@0: { michael@0: ma_vmul(dest, src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::divDouble(FloatRegister src, FloatRegister dest) michael@0: { michael@0: ma_vdiv(dest, src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::negateDouble(FloatRegister reg) michael@0: { michael@0: ma_vneg(reg, reg); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::inc64(AbsoluteAddress dest) michael@0: { michael@0: michael@0: ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex); michael@0: michael@0: ma_mov(Imm32((int32_t)dest.addr), ScratchRegister); michael@0: michael@0: ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(0)), r0, r1); michael@0: michael@0: ma_add(Imm32(1), r0, SetCond); michael@0: ma_adc(Imm32(0), r1, NoSetCond); michael@0: michael@0: ma_strd(r0, r1, EDtrAddr(ScratchRegister, EDtrOffImm(0))); michael@0: michael@0: ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex); michael@0: michael@0: } michael@0: michael@0: bool michael@0: MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op)) michael@0: return false; michael@0: ALUOp interop = getDestVariant(op); michael@0: Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value); michael@0: if (both.fst.invalid) michael@0: return false; michael@0: // for the most part, there is no good reason to set the condition michael@0: // codes for the first instruction. michael@0: // we can do better things if the second instruction doesn't michael@0: // have a dest, such as check for overflow by doing first operation michael@0: // don't do second operation if first operation overflowed. michael@0: // this preserves the overflow condition code. michael@0: // unfortunately, it is horribly brittle. michael@0: as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c); michael@0: as_alu(dest, ScratchRegister, both.snd, op, sc, c); michael@0: return true; michael@0: } michael@0: michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, michael@0: ALUOp op, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: // As it turns out, if you ask for a compare-like instruction michael@0: // you *probably* want it to set condition codes. michael@0: if (dest == InvalidReg) michael@0: JS_ASSERT(sc == SetCond); michael@0: michael@0: // The operator gives us the ability to determine how michael@0: // this can be used. michael@0: Imm8 imm8 = Imm8(imm.value); michael@0: // ONE INSTRUCTION: michael@0: // If we can encode it using an imm8m, then do so. michael@0: if (!imm8.invalid) { michael@0: as_alu(dest, src1, imm8, op, sc, c); michael@0: return; michael@0: } michael@0: // ONE INSTRUCTION, NEGATED: michael@0: Imm32 negImm = imm; michael@0: Register negDest; michael@0: ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest); michael@0: Imm8 negImm8 = Imm8(negImm.value); michael@0: // add r1, r2, -15 can be replaced with michael@0: // sub r1, r2, 15 michael@0: // for bonus points, dest can be replaced (nearly always invalid => ScratchRegister) michael@0: // This is useful if we wish to negate tst. tst has an invalid (aka not used) dest, michael@0: // but its negation is bic *requires* a dest. We can accomodate, but it will need to clobber michael@0: // *something*, and the scratch register isn't being used, so... michael@0: if (negOp != op_invalid && !negImm8.invalid) { michael@0: as_alu(negDest, src1, negImm8, negOp, sc, c); michael@0: return; michael@0: } michael@0: michael@0: if (hasMOVWT()) { michael@0: // If the operation is a move-a-like then we can try to use movw to michael@0: // move the bits into the destination. Otherwise, we'll need to michael@0: // fall back on a multi-instruction format :( michael@0: // movw/movt don't set condition codes, so don't hold your breath. michael@0: if (sc == NoSetCond && (op == op_mov || op == op_mvn)) { michael@0: // ARMv7 supports movw/movt. movw zero-extends michael@0: // its 16 bit argument, so we can set the register michael@0: // this way. michael@0: // movt leaves the bottom 16 bits in tact, so michael@0: // it is unsuitable to move a constant that michael@0: if (op == op_mov && ((imm.value & ~ 0xffff) == 0)) { michael@0: JS_ASSERT(src1 == InvalidReg); michael@0: as_movw(dest, (uint16_t)imm.value, c); michael@0: return; michael@0: } michael@0: michael@0: // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits michael@0: // then do it. michael@0: if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) { michael@0: JS_ASSERT(src1 == InvalidReg); michael@0: as_movw(dest, (uint16_t)~imm.value, c); michael@0: return; michael@0: } michael@0: michael@0: // TODO: constant dedup may enable us to add dest, r0, 23 *if* michael@0: // we are attempting to load a constant that looks similar to one michael@0: // that already exists michael@0: // If it can't be done with a single movw michael@0: // then we *need* to use two instructions michael@0: // since this must be some sort of a move operation, we can just use michael@0: // a movw/movt pair and get the whole thing done in two moves. This michael@0: // does not work for ops like add, sinc we'd need to do michael@0: // movw tmp; movt tmp; add dest, tmp, src1 michael@0: if (op == op_mvn) michael@0: imm.value = ~imm.value; michael@0: as_movw(dest, imm.value & 0xffff, c); michael@0: as_movt(dest, (imm.value >> 16) & 0xffff, c); michael@0: return; michael@0: } michael@0: // If we weren't doing a movalike, a 16 bit immediate michael@0: // will require 2 instructions. With the same amount of michael@0: // space and (less)time, we can do two 8 bit operations, reusing michael@0: // the dest register. e.g. michael@0: // movw tmp, 0xffff; add dest, src, tmp ror 4 michael@0: // vs. michael@0: // add dest, src, 0xff0; add dest, dest, 0xf000000f michael@0: // it turns out that there are some immediates that we miss with the michael@0: // second approach. A sample value is: add dest, src, 0x1fffe michael@0: // this can be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 michael@0: // since imm8m's only get even offsets, we cannot encode this. michael@0: // I'll try to encode as two imm8's first, since they are faster. michael@0: // Both operations should take 1 cycle, where as add dest, tmp ror 4 michael@0: // takes two cycles to execute. michael@0: } michael@0: michael@0: // Either a) this isn't ARMv7 b) this isn't a move michael@0: // start by attempting to generate a two instruction form. michael@0: // Some things cannot be made into two-inst forms correctly. michael@0: // namely, adds dest, src, 0xffff. michael@0: // Since we want the condition codes (and don't know which ones will michael@0: // be checked), we need to assume that the overflow flag will be checked michael@0: // and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not michael@0: // guaranteed to set the overflow flag the same as the (theoretical) michael@0: // one instruction variant. michael@0: if (alu_dbl(src1, imm, dest, op, sc, c)) michael@0: return; michael@0: michael@0: // And try with its negative. michael@0: if (negOp != op_invalid && michael@0: alu_dbl(src1, negImm, negDest, negOp, sc, c)) michael@0: return; michael@0: michael@0: // Well, damn. We can use two 16 bit mov's, then do the op michael@0: // or we can do a single load from a pool then op. michael@0: if (hasMOVWT()) { michael@0: // Try to load the immediate into a scratch register michael@0: // then use that michael@0: as_movw(ScratchRegister, imm.value & 0xffff, c); michael@0: if ((imm.value >> 16) != 0) michael@0: as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c); michael@0: } else { michael@0: // Going to have to use a load. If the operation is a move, then just move it into the michael@0: // destination register michael@0: if (op == op_mov) { michael@0: as_Imm32Pool(dest, imm.value, c); michael@0: return; michael@0: } else { michael@0: // If this isn't just going into a register, then stick it in a temp, and then proceed. michael@0: as_Imm32Pool(ScratchRegister, imm.value, c); michael@0: } michael@0: } michael@0: as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: JS_ASSERT(op2.getTag() == Operand::OP2); michael@0: as_alu(dest, src1, op2.toOp2(), op, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, src1, op2, op, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_nop() michael@0: { michael@0: as_nop(); michael@0: } michael@0: michael@0: Instruction * michael@0: NextInst(Instruction *i) michael@0: { michael@0: if (i == nullptr) michael@0: return nullptr; michael@0: return i->next(); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c, michael@0: RelocStyle rs, Instruction *i) michael@0: { michael@0: int32_t imm = imm_.value; michael@0: if (i) { michael@0: // Make sure the current instruction is not an artificial guard michael@0: // inserted by the assembler buffer. michael@0: // The InstructionIterator already does this and handles edge cases, michael@0: // so, just asking an iterator for its current instruction should be michael@0: // enough to make sure we don't accidentally inspect an artificial guard. michael@0: i = InstructionIterator(i).cur(); michael@0: } michael@0: switch(rs) { michael@0: case L_MOVWT: michael@0: as_movw(dest, Imm16(imm & 0xffff), c, i); michael@0: // i can be nullptr here. that just means "insert in the next in sequence." michael@0: // NextInst is special cased to not do anything when it is passed nullptr, so michael@0: // two consecutive instructions will be inserted. michael@0: i = NextInst(i); michael@0: as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i); michael@0: break; michael@0: case L_LDR: michael@0: if(i == nullptr) michael@0: as_Imm32Pool(dest, imm, c); michael@0: else michael@0: as_WritePoolEntry(i, c, imm); michael@0: break; michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest, michael@0: Assembler::Condition c, RelocStyle rs, Instruction *i) michael@0: { michael@0: return ma_movPatchable(Imm32(int32_t(imm.value)), dest, c, rs, i); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mov(Register src, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: if (sc == SetCond || dest != src) michael@0: as_mov(dest, O2Reg(src), sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mov(Imm32 imm, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(InvalidReg, imm, dest, op_mov, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mov(ImmWord imm, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mov(const ImmGCPtr &ptr, Register dest) michael@0: { michael@0: // As opposed to x86/x64 version, the data relocation has to be executed michael@0: // before to recover the pointer, and not after. michael@0: writeDataRelocation(ptr); michael@0: RelocStyle rs; michael@0: if (hasMOVWT()) michael@0: rs = L_MOVWT; michael@0: else michael@0: rs = L_LDR; michael@0: michael@0: ma_movPatchable(Imm32(ptr.value), dest, Always, rs); michael@0: } michael@0: michael@0: // Shifts (just a move with a shifting op2) michael@0: void michael@0: MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, lsl(src, shift.value)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, lsr(src, shift.value)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, asr(src, shift.value)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, ror(src, shift.value)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, rol(src, shift.value)); michael@0: } michael@0: // Shifts (just a move with a shifting op2) michael@0: void michael@0: MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, lsl(src, shift)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, lsr(src, shift)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, asr(src, shift)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst) michael@0: { michael@0: as_mov(dst, ror(src, shift)); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst) michael@0: { michael@0: ma_rsb(shift, Imm32(32), ScratchRegister); michael@0: as_mov(dst, ror(src, ScratchRegister)); michael@0: } michael@0: michael@0: // Move not (dest <- ~src) michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(InvalidReg, imm, dest, op_mvn, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c); michael@0: } michael@0: michael@0: // Negate (dest <- -src), src is a register, rather than a general op2. michael@0: void michael@0: MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: as_rsb(dest, src1, Imm8(0), sc, c); michael@0: } michael@0: michael@0: // And. michael@0: void michael@0: MacroAssemblerARM::ma_and(Register src, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_and(dest, src, dest); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: as_and(dest, src1, O2Reg(src2), sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_and, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(src1, imm, dest, op_and, sc, c); michael@0: } michael@0: michael@0: michael@0: // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2). michael@0: void michael@0: MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_bic, sc, c); michael@0: } michael@0: michael@0: // Exclusive or. michael@0: void michael@0: MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_eor(dest, src, dest, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: as_eor(dest, src1, O2Reg(src2), sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_eor, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(src1, imm, dest, op_eor, sc, c); michael@0: } michael@0: michael@0: // Or. michael@0: void michael@0: MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_orr(dest, src, dest, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: as_orr(dest, src1, O2Reg(src2), sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_orr, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, michael@0: SetCond_ sc, Assembler::Condition c) michael@0: { michael@0: ma_alu(src1, imm, dest, op_orr, sc, c); michael@0: } michael@0: michael@0: // Arithmetic-based ops. michael@0: // Add with carry. michael@0: void michael@0: MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_adc, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, dest, O2Reg(src), op_adc, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, src1, O2Reg(src2), op_adc, sc, c); michael@0: } michael@0: michael@0: // Add. michael@0: void michael@0: MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_add, sc, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, O2Reg(src1), dest, op_add, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, src1, O2Reg(src2), op_add, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(src1, op, dest, op_add, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(src1, op, dest, op_add, sc, c); michael@0: } michael@0: michael@0: // Subtract with carry. michael@0: void michael@0: MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_sbc, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, dest, O2Reg(src1), op_sbc, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c); michael@0: } michael@0: michael@0: // Subtract. michael@0: void michael@0: MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_sub, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, Operand(src1), dest, op_sub, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(src1, Operand(src2), dest, op_sub, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(src1, op, dest, op_sub, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(src1, op, dest, op_sub, sc, c); michael@0: } michael@0: michael@0: // Severse subtract. michael@0: void michael@0: MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_rsb, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, dest, O2Reg(src1), op_add, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, src1, O2Reg(src2), op_rsb, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(src1, op2, dest, op_rsb, sc, c); michael@0: } michael@0: michael@0: // Reverse subtract with carry. michael@0: void michael@0: MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: ma_alu(dest, imm, dest, op_rsc, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, dest, O2Reg(src1), op_rsc, sc, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) michael@0: { michael@0: as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c); michael@0: } michael@0: michael@0: // Compares/tests. michael@0: // Compare negative (sets condition codes as src1 + src2 would). michael@0: void michael@0: MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c) michael@0: { michael@0: ma_alu(src1, imm, InvalidReg, op_cmn, SetCond, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) michael@0: { michael@0: as_alu(InvalidReg, src2, O2Reg(src1), op_cmn, SetCond, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("Feature NYI"); michael@0: } michael@0: michael@0: // Compare (src - src2). michael@0: void michael@0: MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c) michael@0: { michael@0: ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c) michael@0: { michael@0: ma_cmp(src1, Imm32(ptr.value), c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, Condition c) michael@0: { michael@0: ma_mov(ptr, ScratchRegister); michael@0: ma_cmp(src1, ScratchRegister, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c) michael@0: { michael@0: switch (op.getTag()) { michael@0: case Operand::OP2: michael@0: as_cmp(src1, op.toOp2(), c); michael@0: break; michael@0: case Operand::MEM: michael@0: ma_ldr(op, ScratchRegister); michael@0: as_cmp(src1, O2Reg(ScratchRegister), c); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("trying to compare FP and integer registers"); michael@0: } michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) michael@0: { michael@0: as_cmp(src1, O2Reg(src2), c); michael@0: } michael@0: michael@0: // Test for equality, (src1^src2). michael@0: void michael@0: MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c) michael@0: { michael@0: ma_alu(src1, imm, InvalidReg, op_teq, SetCond, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) michael@0: { michael@0: as_tst(src1, O2Reg(src2), c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) michael@0: { michael@0: as_teq(src1, op.toOp2(), c); michael@0: } michael@0: michael@0: michael@0: // Test (src1 & src2). michael@0: void michael@0: MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c) michael@0: { michael@0: ma_alu(src1, imm, InvalidReg, op_tst, SetCond, c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) michael@0: { michael@0: as_tst(src1, O2Reg(src2), c); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c) michael@0: { michael@0: as_tst(src1, op.toOp2(), c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest) michael@0: { michael@0: as_mul(dest, src1, src2); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest) michael@0: { michael@0: michael@0: ma_mov(imm, ScratchRegister); michael@0: as_mul( dest, src1, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond) michael@0: { michael@0: // TODO: this operation is illegal on armv6 and earlier if src2 == ScratchRegister michael@0: // or src2 == dest. michael@0: if (cond == Equal || cond == NotEqual) { michael@0: as_smull(ScratchRegister, dest, src1, src2, SetCond); michael@0: return cond; michael@0: } michael@0: michael@0: if (cond == Overflow) { michael@0: as_smull(ScratchRegister, dest, src1, src2); michael@0: as_cmp(ScratchRegister, asr(dest, 31)); michael@0: return NotEqual; michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Condition NYI"); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond) michael@0: { michael@0: ma_mov(imm, ScratchRegister); michael@0: if (cond == Equal || cond == NotEqual) { michael@0: as_smull(ScratchRegister, dest, ScratchRegister, src1, SetCond); michael@0: return cond; michael@0: } michael@0: michael@0: if (cond == Overflow) { michael@0: as_smull(ScratchRegister, dest, ScratchRegister, src1); michael@0: as_cmp(ScratchRegister, asr(dest, 31)); michael@0: return NotEqual; michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("Condition NYI"); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp, michael@0: int32_t shift) michael@0: { michael@0: // MATH: michael@0: // We wish to compute x % (1< 0, store sum - C back into sum, thus performing a modulus. michael@0: ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned); michael@0: // Get rid of the bits that we extracted before, and set the condition codes michael@0: as_mov(tmp, lsr(tmp, shift), SetCond); michael@0: // If the shift produced zero, finish, otherwise, continue in the loop. michael@0: ma_b(&head, NonZero); michael@0: // Check the hold to see if we need to negate the result. Hold can only be 1 or -1, michael@0: // so this will never set the 0 flag. michael@0: ma_cmp(hold, Imm32(0)); michael@0: // If the hold was non-zero, negate the result to be in line with what JS wants michael@0: // this will set the condition codes if we try to negate michael@0: ma_rsb(Imm32(0), dest, SetCond, Signed); michael@0: // Since the Zero flag is not set by the compare, we can *only* set the Zero flag michael@0: // in the rsb, so Zero is set iff we negated zero (e.g. the result of the computation was -0.0). michael@0: michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_smod(Register num, Register div, Register dest) michael@0: { michael@0: as_sdiv(ScratchRegister, num, div); michael@0: as_mls(dest, num, ScratchRegister, div); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_umod(Register num, Register div, Register dest) michael@0: { michael@0: as_udiv(ScratchRegister, num, div); michael@0: as_mls(dest, num, ScratchRegister, div); michael@0: } michael@0: michael@0: // division michael@0: void michael@0: MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond) michael@0: { michael@0: as_sdiv(dest, num, div, cond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond) michael@0: { michael@0: as_udiv(dest, num, div, cond); michael@0: } michael@0: michael@0: // Memory. michael@0: // Shortcut for when we know we're transferring 32 bits of data. michael@0: void michael@0: MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, michael@0: Index mode, Assembler::Condition cc) michael@0: { michael@0: ma_dataTransferN(ls, 32, true, rn, offset, rt, mode, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Register rm, Register rt, michael@0: Index mode, Assembler::Condition cc) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("Feature NYI"); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc) michael@0: { michael@0: as_dtr(IsStore, 32, mode, rt, addr, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Operand &addr, Index mode, Condition cc) michael@0: { michael@0: ma_dataTransferN(ls, 32, true, michael@0: Register::FromCode(addr.base()), Imm32(addr.disp()), michael@0: rt, mode, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_str(Register rt, const Operand &addr, Index mode, Condition cc) michael@0: { michael@0: ma_dtr(IsStore, rt, addr, mode, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_strd(Register rt, DebugOnly rt2, EDtrAddr addr, Index mode, Condition cc) michael@0: { michael@0: JS_ASSERT((rt.code() & 1) == 0); michael@0: JS_ASSERT(rt2.value.code() == rt.code() + 1); michael@0: as_extdtr(IsStore, 64, true, mode, rt, addr, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc) michael@0: { michael@0: as_dtr(IsLoad, 32, mode, rt, addr, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_ldr(const Operand &addr, Register rt, Index mode, Condition cc) michael@0: { michael@0: ma_dtr(IsLoad, rt, addr, mode, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc) michael@0: { michael@0: as_dtr(IsLoad, 8, mode, rt, addr, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode, Condition cc) michael@0: { michael@0: as_extdtr(IsLoad, 16, true, mode, rt, addr, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode, Condition cc) michael@0: { michael@0: as_extdtr(IsLoad, 16, false, mode, rt, addr, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc) michael@0: { michael@0: as_extdtr(IsLoad, 8, true, mode, rt, addr, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly rt2, michael@0: Index mode, Condition cc) michael@0: { michael@0: JS_ASSERT((rt.code() & 1) == 0); michael@0: JS_ASSERT(rt2.value.code() == rt.code() + 1); michael@0: as_extdtr(IsLoad, 64, true, mode, rt, addr, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode, Condition cc) michael@0: { michael@0: as_extdtr(IsStore, 16, false, mode, rt, addr, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc) michael@0: { michael@0: as_dtr(IsStore, 8, mode, rt, addr, cc); michael@0: } michael@0: michael@0: // Specialty for moving N bits of data, where n == 8,16,32,64. michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, michael@0: Register rn, Register rm, Register rt, michael@0: Index mode, Assembler::Condition cc, unsigned shiftAmount) michael@0: { michael@0: if (size == 32 || (size == 8 && !IsSigned)) { michael@0: return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc); michael@0: } else { michael@0: if (shiftAmount != 0) { michael@0: JS_ASSERT(rn != ScratchRegister); michael@0: JS_ASSERT(rt != ScratchRegister); michael@0: ma_lsl(Imm32(shiftAmount), rm, ScratchRegister); michael@0: rm = ScratchRegister; michael@0: } michael@0: return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc); michael@0: } michael@0: } michael@0: michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, michael@0: Register rn, Imm32 offset, Register rt, michael@0: Index mode, Assembler::Condition cc) michael@0: { michael@0: int off = offset.value; michael@0: // we can encode this as a standard ldr... MAKE IT SO michael@0: if (size == 32 || (size == 8 && !IsSigned) ) { michael@0: if (off < 4096 && off > -4096) { michael@0: // This encodes as a single instruction, Emulating mode's behavior michael@0: // in a multi-instruction sequence is not necessary. michael@0: return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc); michael@0: } michael@0: michael@0: // We cannot encode this offset in a a single ldr. For mode == index, michael@0: // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|. michael@0: // This does not wark for mode == PreIndex or mode == PostIndex. michael@0: // PreIndex is simple, just do the add into the base register first, then do michael@0: // a PreIndex'ed load. PostIndexed loads can be tricky. Normally, doing the load with michael@0: // an index of 0, then doing an add would work, but if the destination is the PC, michael@0: // you don't get to execute the instruction after the branch, which will lead to michael@0: // the base register not being updated correctly. Explicitly handle this case, without michael@0: // doing anything fancy, then handle all of the other cases. michael@0: michael@0: // mode == Offset michael@0: // add scratch, base, offset_hi michael@0: // ldr dest, [scratch, +offset_lo] michael@0: // michael@0: // mode == PreIndex michael@0: // add base, base, offset_hi michael@0: // ldr dest, [base, +offset_lo]! michael@0: // michael@0: // mode == PostIndex, dest == pc michael@0: // ldr scratch, [base] michael@0: // add base, base, offset_hi michael@0: // add base, base, offset_lo michael@0: // mov dest, scratch michael@0: // PostIndex with the pc as the destination needs to be handled michael@0: // specially, since in the code below, the write into 'dest' michael@0: // is going to alter the control flow, so the following instruction would michael@0: // never get emitted. michael@0: // michael@0: // mode == PostIndex, dest != pc michael@0: // ldr dest, [base], offset_lo michael@0: // add base, base, offset_hi michael@0: michael@0: if (rt == pc && mode == PostIndex && ls == IsLoad) { michael@0: ma_mov(rn, ScratchRegister); michael@0: ma_alu(rn, offset, rn, op_add); michael@0: return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc); michael@0: } michael@0: michael@0: int bottom = off & 0xfff; michael@0: int neg_bottom = 0x1000 - bottom; michael@0: // For a regular offset, base == ScratchRegister does what we want. Modify the michael@0: // scratch register, leaving the actual base unscathed. michael@0: Register base = ScratchRegister; michael@0: // For the preindex case, we want to just re-use rn as the base register, so when michael@0: // the base register is updated *before* the load, rn is updated. michael@0: if (mode == PreIndex) michael@0: base = rn; michael@0: JS_ASSERT(mode != PostIndex); michael@0: // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. michael@0: // michael@0: // Note a neg_bottom of 0x1000 can not be encoded as an immediate negative offset in the michael@0: // instruction and this occurs when bottom is zero, so this case is guarded against below. michael@0: if (off < 0) { michael@0: Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off michael@0: if (!sub_off.invalid) { michael@0: as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom michael@0: return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); michael@0: } michael@0: sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off michael@0: if (!sub_off.invalid && bottom != 0) { michael@0: JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 michael@0: as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off michael@0: return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); michael@0: } michael@0: } else { michael@0: Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom michael@0: if (!sub_off.invalid) { michael@0: as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom michael@0: return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); michael@0: } michael@0: sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off michael@0: if (!sub_off.invalid && bottom != 0) { michael@0: JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 michael@0: as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off michael@0: return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); michael@0: } michael@0: } michael@0: ma_mov(offset, ScratchRegister); michael@0: return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0))); michael@0: } else { michael@0: // should attempt to use the extended load/store instructions michael@0: if (off < 256 && off > -256) michael@0: return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc); michael@0: michael@0: // We cannot encode this offset in a single extldr. Try to encode it as michael@0: // an add scratch, base, imm; extldr dest, [scratch, +offset]. michael@0: int bottom = off & 0xff; michael@0: int neg_bottom = 0x100 - bottom; michael@0: // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. michael@0: // michael@0: // Note a neg_bottom of 0x100 can not be encoded as an immediate negative offset in the michael@0: // instruction and this occurs when bottom is zero, so this case is guarded against below. michael@0: if (off < 0) { michael@0: Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off michael@0: if (!sub_off.invalid) { michael@0: as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom michael@0: return as_extdtr(ls, size, IsSigned, Offset, rt, michael@0: EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), michael@0: cc); michael@0: } michael@0: sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off michael@0: if (!sub_off.invalid && bottom != 0) { michael@0: JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 michael@0: as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off michael@0: return as_extdtr(ls, size, IsSigned, Offset, rt, michael@0: EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), michael@0: cc); michael@0: } michael@0: } else { michael@0: Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom michael@0: if (!sub_off.invalid) { michael@0: as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom michael@0: return as_extdtr(ls, size, IsSigned, Offset, rt, michael@0: EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), michael@0: cc); michael@0: } michael@0: sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off michael@0: if (!sub_off.invalid && bottom != 0) { michael@0: JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 michael@0: as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off michael@0: return as_extdtr(ls, size, IsSigned, Offset, rt, michael@0: EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), michael@0: cc); michael@0: } michael@0: } michael@0: ma_mov(offset, ScratchRegister); michael@0: return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_pop(Register r) michael@0: { michael@0: ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex); michael@0: if (r == pc) michael@0: m_buffer.markGuard(); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_push(Register r) michael@0: { michael@0: // Pushing sp is not well defined: use two instructions. michael@0: if (r == sp) { michael@0: ma_mov(sp, ScratchRegister); michael@0: r = ScratchRegister; michael@0: } michael@0: ma_dtr(IsStore, sp,Imm32(-4), r, PreIndex); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vpop(VFPRegister r) michael@0: { michael@0: startFloatTransferM(IsLoad, sp, IA, WriteBack); michael@0: transferFloatReg(r); michael@0: finishFloatTransfer(); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vpush(VFPRegister r) michael@0: { michael@0: startFloatTransferM(IsStore, sp, DB, WriteBack); michael@0: transferFloatReg(r); michael@0: finishFloatTransfer(); michael@0: } michael@0: michael@0: // Branches when done from within arm-specific code. michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_b(Label *dest, Assembler::Condition c, bool isPatchable) michael@0: { michael@0: return as_b(dest, c, isPatchable); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c) michael@0: { michael@0: as_bx(dest, c); michael@0: } michael@0: michael@0: static Assembler::RelocBranchStyle michael@0: b_type() michael@0: { michael@0: return Assembler::B_LDR; michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Condition c) michael@0: { michael@0: // we know the absolute address of the target, but not our final michael@0: // location (with relocating GC, we *can't* know our final location) michael@0: // for now, I'm going to be conservative, and load this with an michael@0: // absolute address michael@0: uint32_t trg = (uint32_t)target; michael@0: switch (b_type()) { michael@0: case Assembler::B_MOVWT: michael@0: as_movw(ScratchRegister, Imm16(trg & 0xffff), c); michael@0: as_movt(ScratchRegister, Imm16(trg >> 16), c); michael@0: // this is going to get the branch predictor pissed off. michael@0: as_bx(ScratchRegister, c); michael@0: break; michael@0: case Assembler::B_LDR_BX: michael@0: as_Imm32Pool(ScratchRegister, trg, c); michael@0: as_bx(ScratchRegister, c); michael@0: break; michael@0: case Assembler::B_LDR: michael@0: as_Imm32Pool(pc, trg, c); michael@0: if (c == Always) michael@0: m_buffer.markGuard(); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Other methods of generating tracable jumps NYI"); michael@0: } michael@0: } michael@0: michael@0: // This is almost NEVER necessary: we'll basically never be calling a label, michael@0: // except possibly in the crazy bailout-table case. michael@0: void michael@0: MacroAssemblerARM::ma_bl(Label *dest, Assembler::Condition c) michael@0: { michael@0: as_bl(dest, c); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c) michael@0: { michael@0: as_blx(reg, c); michael@0: } michael@0: michael@0: // VFP/ALU michael@0: void michael@0: MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), michael@0: VFPRegister(src2).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), michael@0: VFPRegister(src2).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), michael@0: VFPRegister(src2).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) michael@0: { michael@0: as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), michael@0: VFPRegister(src2).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vmov(dest, src, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vneg(dest, src, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vabs(dest, src, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vsqrt(dest, src, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: DoubleHighWord(const double value) michael@0: { michael@0: return static_cast(BitwiseCast(value) >> 32); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: DoubleLowWord(const double value) michael@0: { michael@0: return BitwiseCast(value) & uint32_t(0xffffffff); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc) michael@0: { michael@0: if (hasVFPv3()) { michael@0: if (DoubleLowWord(value) == 0) { michael@0: if (DoubleHighWord(value) == 0) { michael@0: // To zero a register, load 1.0, then execute dN <- dN - dN michael@0: as_vimm(dest, VFPImm::one, cc); michael@0: as_vsub(dest, dest, dest, cc); michael@0: return; michael@0: } michael@0: michael@0: VFPImm enc(DoubleHighWord(value)); michael@0: if (enc.isValid()) { michael@0: as_vimm(dest, enc, cc); michael@0: return; michael@0: } michael@0: michael@0: } michael@0: } michael@0: // Fall back to putting the value in a pool. michael@0: as_FImm64Pool(dest, value, cc); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: Float32Word(const float value) michael@0: { michael@0: return BitwiseCast(value); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc) michael@0: { michael@0: VFPRegister vd = VFPRegister(dest).singleOverlay(); michael@0: if (hasVFPv3()) { michael@0: if (Float32Word(value) == 0) { michael@0: // To zero a register, load 1.0, then execute sN <- sN - sN michael@0: as_vimm(vd, VFPImm::one, cc); michael@0: as_vsub(vd, vd, vd, cc); michael@0: return; michael@0: } michael@0: michael@0: // Note that the vimm immediate float32 instruction encoding differs from the michael@0: // vimm immediate double encoding, but this difference matches the difference michael@0: // in the floating point formats, so it is possible to convert the float32 to michael@0: // a double and then use the double encoding paths. It is still necessary to michael@0: // firstly check that the double low word is zero because some float32 michael@0: // numbers set these bits and this can not be ignored. michael@0: double doubleValue = value; michael@0: if (DoubleLowWord(value) == 0) { michael@0: VFPImm enc(DoubleHighWord(doubleValue)); michael@0: if (enc.isValid()) { michael@0: as_vimm(vd, enc, cc); michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: // Fall back to putting the value in a pool. michael@0: as_FImm32Pool(vd, value, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc) michael@0: { michael@0: as_vcmp(VFPRegister(src1), VFPRegister(src2), cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc) michael@0: { michael@0: as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(), cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc) michael@0: { michael@0: as_vcmpz(VFPRegister(src1), cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc) michael@0: { michael@0: as_vcmpz(VFPRegister(src1).singleOverlay(), cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src), false, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src), false, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister dest, FloatRegister src, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest), VFPRegister(src).sintOverlay(), false, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister dest, FloatRegister src, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest), VFPRegister(src).uintOverlay(), false, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(), false, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(), false, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister dest, FloatRegister src, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(), false, cc); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister dest, FloatRegister src, Condition cc) michael@0: { michael@0: as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(), false, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest, Condition cc) michael@0: { michael@0: as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc) michael@0: { michael@0: as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc) michael@0: { michael@0: as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest, Condition cc) michael@0: { michael@0: as_vxfer(dest, InvalidReg, src, FloatToCore, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc) michael@0: { michael@0: as_vxfer(dest1, dest2, src, FloatToCore, cc); michael@0: } michael@0: michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Condition cc) michael@0: { michael@0: int off = addr.disp(); michael@0: JS_ASSERT((off & 3) == 0); michael@0: Register base = Register::FromCode(addr.base()); michael@0: if (off > -1024 && off < 1024) michael@0: return as_vdtr(ls, rt, addr.toVFPAddr(), cc); michael@0: michael@0: // We cannot encode this offset in a a single ldr. Try to encode it as michael@0: // an add scratch, base, imm; ldr dest, [scratch, +offset]. michael@0: int bottom = off & (0xff << 2); michael@0: int neg_bottom = (0x100 << 2) - bottom; michael@0: // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. michael@0: // michael@0: // Note a neg_bottom of 0x400 can not be encoded as an immediate negative offset in the michael@0: // instruction and this occurs when bottom is zero, so this case is guarded against below. michael@0: if (off < 0) { michael@0: Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off michael@0: if (!sub_off.invalid) { michael@0: as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom michael@0: return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); michael@0: } michael@0: sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off michael@0: if (!sub_off.invalid && bottom != 0) { michael@0: JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 michael@0: as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off michael@0: return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); michael@0: } michael@0: } else { michael@0: Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom michael@0: if (!sub_off.invalid) { michael@0: as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = off - bottom michael@0: return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); michael@0: } michael@0: sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off michael@0: if (!sub_off.invalid && bottom != 0) { michael@0: JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 michael@0: as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off michael@0: return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); michael@0: } michael@0: } michael@0: ma_add(base, Imm32(off), ScratchRegister, NoSetCond, cc); michael@0: return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc); michael@0: } michael@0: michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc) michael@0: { michael@0: return as_vdtr(IsLoad, dest, addr, cc); michael@0: } michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vldr(const Operand &addr, VFPRegister dest, Condition cc) michael@0: { michael@0: return ma_vdtr(IsLoad, addr, dest, cc); michael@0: } michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) michael@0: { michael@0: as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc); michael@0: return ma_vldr(Operand(ScratchRegister, 0), src, cc); michael@0: } michael@0: michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc) michael@0: { michael@0: return as_vdtr(IsStore, src, addr, cc); michael@0: } michael@0: michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vstr(VFPRegister src, const Operand &addr, Condition cc) michael@0: { michael@0: return ma_vdtr(IsStore, addr, src, cc); michael@0: } michael@0: BufferOffset michael@0: MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) michael@0: { michael@0: as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc); michael@0: return ma_vstr(src, Operand(ScratchRegister, 0), cc); michael@0: } michael@0: michael@0: bool michael@0: MacroAssemblerARMCompat::buildFakeExitFrame(const Register &scratch, uint32_t *offset) michael@0: { michael@0: DebugOnly initialDepth = framePushed(); michael@0: uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); michael@0: michael@0: Push(Imm32(descriptor)); // descriptor_ michael@0: michael@0: enterNoPool(); michael@0: DebugOnly offsetBeforePush = currentOffset(); michael@0: Push(pc); // actually pushes $pc + 8. michael@0: michael@0: // Consume an additional 4 bytes. The start of the next instruction will michael@0: // then be 8 bytes after the instruction for Push(pc); this offset can michael@0: // therefore be fed to the safepoint. michael@0: ma_nop(); michael@0: uint32_t pseudoReturnOffset = currentOffset(); michael@0: leaveNoPool(); michael@0: michael@0: JS_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size()); michael@0: JS_ASSERT(pseudoReturnOffset - offsetBeforePush == 8); michael@0: michael@0: *offset = pseudoReturnOffset; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: MacroAssemblerARMCompat::buildOOLFakeExitFrame(void *fakeReturnAddr) michael@0: { michael@0: DebugOnly initialDepth = framePushed(); michael@0: uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); michael@0: michael@0: Push(Imm32(descriptor)); // descriptor_ michael@0: Push(ImmPtr(fakeReturnAddr)); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithExitFrame(JitCode *target) michael@0: { michael@0: uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); michael@0: Push(Imm32(descriptor)); // descriptor michael@0: michael@0: addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); michael@0: RelocStyle rs; michael@0: if (hasMOVWT()) michael@0: rs = L_MOVWT; michael@0: else michael@0: rs = L_LDR; michael@0: michael@0: ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs); michael@0: ma_callIonHalfPush(ScratchRegister); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithExitFrame(JitCode *target, Register dynStack) michael@0: { michael@0: ma_add(Imm32(framePushed()), dynStack); michael@0: makeFrameDescriptor(dynStack, JitFrame_IonJS); michael@0: Push(dynStack); // descriptor michael@0: michael@0: addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); michael@0: RelocStyle rs; michael@0: if (hasMOVWT()) michael@0: rs = L_MOVWT; michael@0: else michael@0: rs = L_LDR; michael@0: michael@0: ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs); michael@0: ma_callIonHalfPush(ScratchRegister); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callIon(const Register &callee) michael@0: { michael@0: JS_ASSERT((framePushed() & 3) == 0); michael@0: if ((framePushed() & 7) == 4) { michael@0: ma_callIonHalfPush(callee); michael@0: } else { michael@0: adjustFrame(sizeof(void*)); michael@0: ma_callIon(callee); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::reserveStack(uint32_t amount) michael@0: { michael@0: if (amount) michael@0: ma_sub(Imm32(amount), sp); michael@0: adjustFrame(amount); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::freeStack(uint32_t amount) michael@0: { michael@0: JS_ASSERT(amount <= framePushed_); michael@0: if (amount) michael@0: ma_add(Imm32(amount), sp); michael@0: adjustFrame(-amount); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::freeStack(Register amount) michael@0: { michael@0: ma_add(amount, sp); michael@0: } michael@0: michael@0: void michael@0: MacroAssembler::PushRegsInMask(RegisterSet set) michael@0: { michael@0: int32_t diffF = set.fpus().size() * sizeof(double); michael@0: int32_t diffG = set.gprs().size() * sizeof(intptr_t); michael@0: michael@0: if (set.gprs().size() > 1) { michael@0: adjustFrame(diffG); michael@0: startDataTransferM(IsStore, StackPointer, DB, WriteBack); michael@0: for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { michael@0: diffG -= sizeof(intptr_t); michael@0: transferReg(*iter); michael@0: } michael@0: finishDataTransfer(); michael@0: } else { michael@0: reserveStack(diffG); michael@0: for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { michael@0: diffG -= sizeof(intptr_t); michael@0: storePtr(*iter, Address(StackPointer, diffG)); michael@0: } michael@0: } michael@0: JS_ASSERT(diffG == 0); michael@0: michael@0: adjustFrame(diffF); michael@0: diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB); michael@0: JS_ASSERT(diffF == 0); michael@0: } michael@0: michael@0: void michael@0: MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore) michael@0: { michael@0: int32_t diffG = set.gprs().size() * sizeof(intptr_t); michael@0: int32_t diffF = set.fpus().size() * sizeof(double); michael@0: const int32_t reservedG = diffG; michael@0: const int32_t reservedF = diffF; michael@0: michael@0: // ARM can load multiple registers at once, but only if we want back all michael@0: // the registers we previously saved to the stack. michael@0: if (ignore.empty(true)) { michael@0: diffF -= transferMultipleByRuns(set.fpus(), IsLoad, StackPointer, IA); michael@0: adjustFrame(-reservedF); michael@0: } else { michael@0: for (FloatRegisterBackwardIterator iter(set.fpus()); iter.more(); iter++) { michael@0: diffF -= sizeof(double); michael@0: if (!ignore.has(*iter)) michael@0: loadDouble(Address(StackPointer, diffF), *iter); michael@0: } michael@0: freeStack(reservedF); michael@0: } michael@0: JS_ASSERT(diffF == 0); michael@0: michael@0: if (set.gprs().size() > 1 && ignore.empty(false)) { michael@0: startDataTransferM(IsLoad, StackPointer, IA, WriteBack); michael@0: for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { michael@0: diffG -= sizeof(intptr_t); michael@0: transferReg(*iter); michael@0: } michael@0: finishDataTransfer(); michael@0: adjustFrame(-reservedG); michael@0: } else { michael@0: for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { michael@0: diffG -= sizeof(intptr_t); michael@0: if (!ignore.has(*iter)) michael@0: loadPtr(Address(StackPointer, diffG), *iter); michael@0: } michael@0: freeStack(reservedG); michael@0: } michael@0: JS_ASSERT(diffG == 0); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::add32(Register src, Register dest) michael@0: { michael@0: ma_add(src, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::add32(Imm32 imm, Register dest) michael@0: { michael@0: ma_add(imm, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::xor32(Imm32 imm, Register dest) michael@0: { michael@0: ma_eor(imm, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::add32(Imm32 imm, const Address &dest) michael@0: { michael@0: load32(dest, ScratchRegister); michael@0: ma_add(imm, ScratchRegister, SetCond); michael@0: store32(ScratchRegister, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::sub32(Imm32 imm, Register dest) michael@0: { michael@0: ma_sub(imm, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::sub32(Register src, Register dest) michael@0: { michael@0: ma_sub(src, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::and32(Imm32 imm, Register dest) michael@0: { michael@0: ma_and(imm, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::addPtr(Register src, Register dest) michael@0: { michael@0: ma_add(src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::addPtr(const Address &src, Register dest) michael@0: { michael@0: load32(src, ScratchRegister); michael@0: ma_add(ScratchRegister, dest, SetCond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::not32(Register reg) michael@0: { michael@0: ma_mvn(reg, reg); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::and32(Imm32 imm, const Address &dest) michael@0: { michael@0: load32(dest, ScratchRegister); michael@0: ma_and(imm, ScratchRegister); michael@0: store32(ScratchRegister, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::or32(Imm32 imm, const Address &dest) michael@0: { michael@0: load32(dest, ScratchRegister); michael@0: ma_orr(imm, ScratchRegister); michael@0: store32(ScratchRegister, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::xorPtr(Imm32 imm, Register dest) michael@0: { michael@0: ma_eor(imm, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::xorPtr(Register src, Register dest) michael@0: { michael@0: ma_eor(src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::orPtr(Imm32 imm, Register dest) michael@0: { michael@0: ma_orr(imm, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::orPtr(Register src, Register dest) michael@0: { michael@0: ma_orr(src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::andPtr(Imm32 imm, Register dest) michael@0: { michael@0: ma_and(imm, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::andPtr(Register src, Register dest) michael@0: { michael@0: ma_and(src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::move32(const Imm32 &imm, const Register &dest) michael@0: { michael@0: ma_mov(imm, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::move32(const Register &src, const Register &dest) { michael@0: ma_mov(src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::movePtr(const Register &src, const Register &dest) michael@0: { michael@0: ma_mov(src, dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::movePtr(const ImmWord &imm, const Register &dest) michael@0: { michael@0: ma_mov(Imm32(imm.value), dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::movePtr(const ImmGCPtr &imm, const Register &dest) michael@0: { michael@0: ma_mov(imm, dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::movePtr(const ImmPtr &imm, const Register &dest) michael@0: { michael@0: movePtr(ImmWord(uintptr_t(imm.value)), dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest) michael@0: { michael@0: RelocStyle rs; michael@0: if (hasMOVWT()) michael@0: rs = L_MOVWT; michael@0: else michael@0: rs = L_LDR; michael@0: michael@0: enoughMemory_ &= append(AsmJSAbsoluteLink(nextOffset().getOffset(), imm.kind())); michael@0: ma_movPatchable(Imm32(-1), dest, Always, rs); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::load8ZeroExtend(const Address &address, const Register &dest) michael@0: { michael@0: ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest) michael@0: { michael@0: Register base = src.base; michael@0: uint32_t scale = Imm32::ShiftOf(src.scale).value; michael@0: michael@0: if (src.offset != 0) { michael@0: ma_mov(base, ScratchRegister); michael@0: base = ScratchRegister; michael@0: ma_add(base, Imm32(src.offset), base); michael@0: } michael@0: ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); michael@0: michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load8SignExtend(const Address &address, const Register &dest) michael@0: { michael@0: ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load8SignExtend(const BaseIndex &src, const Register &dest) michael@0: { michael@0: Register index = src.index; michael@0: michael@0: // ARMv7 does not have LSL on an index register with an extended load. michael@0: if (src.scale != TimesOne) { michael@0: ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: michael@0: if (src.offset != 0) { michael@0: if (index != ScratchRegister) { michael@0: ma_mov(index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: ma_add(Imm32(src.offset), index); michael@0: } michael@0: ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load16ZeroExtend(const Address &address, const Register &dest) michael@0: { michael@0: ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest) michael@0: { michael@0: Register index = src.index; michael@0: michael@0: // ARMv7 does not have LSL on an index register with an extended load. michael@0: if (src.scale != TimesOne) { michael@0: ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: michael@0: if (src.offset != 0) { michael@0: if (index != ScratchRegister) { michael@0: ma_mov(index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: ma_add(Imm32(src.offset), index); michael@0: } michael@0: ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load16SignExtend(const Address &address, const Register &dest) michael@0: { michael@0: ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load16SignExtend(const BaseIndex &src, const Register &dest) michael@0: { michael@0: Register index = src.index; michael@0: michael@0: // We don't have LSL on index register yet. michael@0: if (src.scale != TimesOne) { michael@0: ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: michael@0: if (src.offset != 0) { michael@0: if (index != ScratchRegister) { michael@0: ma_mov(index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: ma_add(Imm32(src.offset), index); michael@0: } michael@0: ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load32(const Address &address, const Register &dest) michael@0: { michael@0: loadPtr(address, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load32(const BaseIndex &address, const Register &dest) michael@0: { michael@0: loadPtr(address, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::load32(const AbsoluteAddress &address, const Register &dest) michael@0: { michael@0: loadPtr(address, dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::loadPtr(const Address &address, const Register &dest) michael@0: { michael@0: ma_ldr(Operand(address), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadPtr(const BaseIndex &src, const Register &dest) michael@0: { michael@0: Register base = src.base; michael@0: uint32_t scale = Imm32::ShiftOf(src.scale).value; michael@0: michael@0: if (src.offset != 0) { michael@0: ma_mov(base, ScratchRegister); michael@0: base = ScratchRegister; michael@0: ma_add(Imm32(src.offset), base); michael@0: } michael@0: ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::loadPtr(const AbsoluteAddress &address, const Register &dest) michael@0: { michael@0: movePtr(ImmWord(uintptr_t(address.addr)), ScratchRegister); michael@0: loadPtr(Address(ScratchRegister, 0x0), dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest) michael@0: { michael@0: movePtr(AsmJSImmPtr(address.kind()), ScratchRegister); michael@0: loadPtr(Address(ScratchRegister, 0x0), dest); michael@0: } michael@0: michael@0: Operand payloadOf(const Address &address) { michael@0: return Operand(address.base, address.offset); michael@0: } michael@0: Operand tagOf(const Address &address) { michael@0: return Operand(address.base, address.offset + 4); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadPrivate(const Address &address, const Register &dest) michael@0: { michael@0: ma_ldr(payloadOf(address), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadDouble(const Address &address, const FloatRegister &dest) michael@0: { michael@0: ma_vldr(Operand(address), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest) michael@0: { michael@0: // VFP instructions don't even support register Base + register Index modes, so michael@0: // just add the index, then handle the offset like normal michael@0: Register base = src.base; michael@0: Register index = src.index; michael@0: uint32_t scale = Imm32::ShiftOf(src.scale).value; michael@0: int32_t offset = src.offset; michael@0: as_add(ScratchRegister, base, lsl(index, scale)); michael@0: michael@0: ma_vldr(Operand(ScratchRegister, offset), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest) michael@0: { michael@0: VFPRegister rt = dest; michael@0: ma_vldr(Operand(address), rt.singleOverlay()); michael@0: as_vcvt(rt, rt.singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest) michael@0: { michael@0: // VFP instructions don't even support register Base + register Index modes, so michael@0: // just add the index, then handle the offset like normal michael@0: Register base = src.base; michael@0: Register index = src.index; michael@0: uint32_t scale = Imm32::ShiftOf(src.scale).value; michael@0: int32_t offset = src.offset; michael@0: VFPRegister rt = dest; michael@0: as_add(ScratchRegister, base, lsl(index, scale)); michael@0: michael@0: ma_vldr(Operand(ScratchRegister, offset), rt.singleOverlay()); michael@0: as_vcvt(rt, rt.singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadFloat32(const Address &address, const FloatRegister &dest) michael@0: { michael@0: ma_vldr(Operand(address), VFPRegister(dest).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest) michael@0: { michael@0: // VFP instructions don't even support register Base + register Index modes, so michael@0: // just add the index, then handle the offset like normal michael@0: Register base = src.base; michael@0: Register index = src.index; michael@0: uint32_t scale = Imm32::ShiftOf(src.scale).value; michael@0: int32_t offset = src.offset; michael@0: as_add(ScratchRegister, base, lsl(index, scale)); michael@0: michael@0: ma_vldr(Operand(ScratchRegister, offset), VFPRegister(dest).singleOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store8(const Imm32 &imm, const Address &address) michael@0: { michael@0: ma_mov(imm, secondScratchReg_); michael@0: store8(secondScratchReg_, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store8(const Register &src, const Address &address) michael@0: { michael@0: ma_dataTransferN(IsStore, 8, false, address.base, Imm32(address.offset), src); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store8(const Imm32 &imm, const BaseIndex &dest) michael@0: { michael@0: ma_mov(imm, secondScratchReg_); michael@0: store8(secondScratchReg_, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store8(const Register &src, const BaseIndex &dest) michael@0: { michael@0: Register base = dest.base; michael@0: uint32_t scale = Imm32::ShiftOf(dest.scale).value; michael@0: michael@0: if (dest.offset != 0) { michael@0: ma_add(base, Imm32(dest.offset), ScratchRegister); michael@0: base = ScratchRegister; michael@0: } michael@0: ma_strb(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store16(const Imm32 &imm, const Address &address) michael@0: { michael@0: ma_mov(imm, secondScratchReg_); michael@0: store16(secondScratchReg_, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store16(const Register &src, const Address &address) michael@0: { michael@0: ma_dataTransferN(IsStore, 16, false, address.base, Imm32(address.offset), src); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store16(const Imm32 &imm, const BaseIndex &dest) michael@0: { michael@0: ma_mov(imm, secondScratchReg_); michael@0: store16(secondScratchReg_, dest); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::store16(const Register &src, const BaseIndex &address) michael@0: { michael@0: Register index = address.index; michael@0: michael@0: // We don't have LSL on index register yet. michael@0: if (address.scale != TimesOne) { michael@0: ma_lsl(Imm32::ShiftOf(address.scale), index, ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: michael@0: if (address.offset != 0) { michael@0: ma_add(index, Imm32(address.offset), ScratchRegister); michael@0: index = ScratchRegister; michael@0: } michael@0: ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index))); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::store32(const Register &src, const AbsoluteAddress &address) michael@0: { michael@0: storePtr(src, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store32(const Register &src, const Address &address) michael@0: { michael@0: storePtr(src, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store32(const Imm32 &src, const Address &address) michael@0: { michael@0: move32(src, secondScratchReg_); michael@0: storePtr(secondScratchReg_, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store32(const Imm32 &imm, const BaseIndex &dest) michael@0: { michael@0: ma_mov(imm, secondScratchReg_); michael@0: store32(secondScratchReg_, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::store32(const Register &src, const BaseIndex &dest) michael@0: { michael@0: Register base = dest.base; michael@0: uint32_t scale = Imm32::ShiftOf(dest.scale).value; michael@0: michael@0: if (dest.offset != 0) { michael@0: ma_add(base, Imm32(dest.offset), ScratchRegister); michael@0: base = ScratchRegister; michael@0: } michael@0: ma_str(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storePtr(ImmWord imm, const Address &address) michael@0: { michael@0: movePtr(imm, ScratchRegister); michael@0: storePtr(ScratchRegister, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storePtr(ImmPtr imm, const Address &address) michael@0: { michael@0: storePtr(ImmWord(uintptr_t(imm.value)), address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const Address &address) michael@0: { michael@0: movePtr(imm, ScratchRegister); michael@0: storePtr(ScratchRegister, address); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storePtr(Register src, const Address &address) michael@0: { michael@0: ma_str(src, Operand(address)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storePtr(const Register &src, const AbsoluteAddress &dest) michael@0: { michael@0: movePtr(ImmWord(uintptr_t(dest.addr)), ScratchRegister); michael@0: storePtr(src, Address(ScratchRegister, 0x0)); michael@0: } michael@0: michael@0: // Note: this function clobbers the input register. michael@0: void michael@0: MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) michael@0: { michael@0: JS_ASSERT(input != ScratchFloatReg); michael@0: ma_vimm(0.5, ScratchFloatReg); michael@0: if (hasVFPv3()) { michael@0: Label notSplit; michael@0: ma_vadd(input, ScratchFloatReg, ScratchFloatReg); michael@0: // Convert the double into an unsigned fixed point value with 24 bits of michael@0: // precision. The resulting number will look like 0xII.DDDDDD michael@0: as_vcvtFixed(ScratchFloatReg, false, 24, true); michael@0: // Move the fixed point value into an integer register michael@0: as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore); michael@0: // see if this value *might* have been an exact integer after adding 0.5 michael@0: // This tests the 1/2 through 1/16,777,216th places, but 0.5 needs to be tested out to michael@0: // the 1/140,737,488,355,328th place. michael@0: ma_tst(output, Imm32(0x00ffffff)); michael@0: // convert to a uint8 by shifting out all of the fraction bits michael@0: ma_lsr(Imm32(24), output, output); michael@0: // If any of the bottom 24 bits were non-zero, then we're good, since this number michael@0: // can't be exactly XX.0 michael@0: ma_b(¬Split, NonZero); michael@0: as_vxfer(ScratchRegister, InvalidReg, input, FloatToCore); michael@0: ma_cmp(ScratchRegister, Imm32(0)); michael@0: // If the lower 32 bits of the double were 0, then this was an exact number, michael@0: // and it should be even. michael@0: ma_bic(Imm32(1), output, NoSetCond, Zero); michael@0: bind(¬Split); michael@0: } else { michael@0: Label outOfRange; michael@0: ma_vcmpz(input); michael@0: // do the add, in place so we can reference it later michael@0: ma_vadd(input, ScratchFloatReg, input); michael@0: // do the conversion to an integer. michael@0: as_vcvt(VFPRegister(ScratchFloatReg).uintOverlay(), VFPRegister(input)); michael@0: // copy the converted value out michael@0: as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore); michael@0: as_vmrs(pc); michael@0: ma_mov(Imm32(0), output, NoSetCond, Overflow); // NaN => 0 michael@0: ma_b(&outOfRange, Overflow); // NaN michael@0: ma_cmp(output, Imm32(0xff)); michael@0: ma_mov(Imm32(0xff), output, NoSetCond, Above); michael@0: ma_b(&outOfRange, Above); michael@0: // convert it back to see if we got the same value back michael@0: as_vcvt(ScratchFloatReg, VFPRegister(ScratchFloatReg).uintOverlay()); michael@0: // do the check michael@0: as_vcmp(ScratchFloatReg, input); michael@0: as_vmrs(pc); michael@0: ma_bic(Imm32(1), output, NoSetCond, Zero); michael@0: bind(&outOfRange); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmp32(const Register &lhs, const Imm32 &rhs) michael@0: { michael@0: JS_ASSERT(lhs != ScratchRegister); michael@0: ma_cmp(lhs, rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmp32(const Operand &lhs, const Register &rhs) michael@0: { michael@0: ma_cmp(lhs.toReg(), rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmp32(const Operand &lhs, const Imm32 &rhs) michael@0: { michael@0: JS_ASSERT(lhs.toReg() != ScratchRegister); michael@0: ma_cmp(lhs.toReg(), rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmp32(const Register &lhs, const Register &rhs) michael@0: { michael@0: ma_cmp(lhs, rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmWord &rhs) michael@0: { michael@0: JS_ASSERT(lhs != ScratchRegister); michael@0: ma_cmp(lhs, Imm32(rhs.value)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmPtr &rhs) michael@0: { michael@0: return cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const Register &rhs) michael@0: { michael@0: ma_cmp(lhs, rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmGCPtr &rhs) michael@0: { michael@0: ma_cmp(lhs, rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const Imm32 &rhs) michael@0: { michael@0: ma_cmp(lhs, rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const Register &rhs) michael@0: { michael@0: loadPtr(lhs, ScratchRegister); michael@0: cmpPtr(ScratchRegister, rhs); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const ImmWord &rhs) michael@0: { michael@0: loadPtr(lhs, secondScratchReg_); michael@0: ma_cmp(secondScratchReg_, Imm32(rhs.value)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const ImmPtr &rhs) michael@0: { michael@0: cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::setStackArg(const Register ®, uint32_t arg) michael@0: { michael@0: ma_dataTransferN(IsStore, 32, true, sp, Imm32(arg * sizeof(intptr_t)), reg); michael@0: michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::subPtr(Imm32 imm, const Register dest) michael@0: { michael@0: ma_sub(imm, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::subPtr(const Address &addr, const Register dest) michael@0: { michael@0: loadPtr(addr, ScratchRegister); michael@0: ma_sub(ScratchRegister, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::subPtr(const Register &src, const Register &dest) michael@0: { michael@0: ma_sub(src, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::subPtr(const Register &src, const Address &dest) michael@0: { michael@0: loadPtr(dest, ScratchRegister); michael@0: ma_sub(src, ScratchRegister); michael@0: storePtr(ScratchRegister, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::addPtr(Imm32 imm, const Register dest) michael@0: { michael@0: ma_add(imm, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::addPtr(Imm32 imm, const Address &dest) michael@0: { michael@0: loadPtr(dest, ScratchRegister); michael@0: addPtr(imm, ScratchRegister); michael@0: storePtr(ScratchRegister, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::compareDouble(FloatRegister lhs, FloatRegister rhs) michael@0: { michael@0: // Compare the doubles, setting vector status flags. michael@0: if (rhs == InvalidFloatReg) michael@0: ma_vcmpz(lhs); michael@0: else michael@0: ma_vcmp(lhs, rhs); michael@0: michael@0: // Move vector status bits to normal status flags. michael@0: as_vmrs(pc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::branchDouble(DoubleCondition cond, const FloatRegister &lhs, michael@0: const FloatRegister &rhs, Label *label) michael@0: { michael@0: compareDouble(lhs, rhs); michael@0: michael@0: if (cond == DoubleNotEqual) { michael@0: // Force the unordered cases not to jump. michael@0: Label unordered; michael@0: ma_b(&unordered, VFP_Unordered); michael@0: ma_b(label, VFP_NotEqualOrUnordered); michael@0: bind(&unordered); michael@0: return; michael@0: } michael@0: michael@0: if (cond == DoubleEqualOrUnordered) { michael@0: ma_b(label, VFP_Unordered); michael@0: ma_b(label, VFP_Equal); michael@0: return; michael@0: } michael@0: michael@0: ma_b(label, ConditionFromDoubleCondition(cond)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::compareFloat(FloatRegister lhs, FloatRegister rhs) michael@0: { michael@0: // Compare the doubles, setting vector status flags. michael@0: if (rhs == InvalidFloatReg) michael@0: as_vcmpz(VFPRegister(lhs).singleOverlay()); michael@0: else michael@0: as_vcmp(VFPRegister(lhs).singleOverlay(), VFPRegister(rhs).singleOverlay()); michael@0: michael@0: // Move vector status bits to normal status flags. michael@0: as_vmrs(pc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::branchFloat(DoubleCondition cond, const FloatRegister &lhs, michael@0: const FloatRegister &rhs, Label *label) michael@0: { michael@0: compareFloat(lhs, rhs); michael@0: michael@0: if (cond == DoubleNotEqual) { michael@0: // Force the unordered cases not to jump. michael@0: Label unordered; michael@0: ma_b(&unordered, VFP_Unordered); michael@0: ma_b(label, VFP_NotEqualOrUnordered); michael@0: bind(&unordered); michael@0: return; michael@0: } michael@0: michael@0: if (cond == DoubleEqualOrUnordered) { michael@0: ma_b(label, VFP_Unordered); michael@0: ma_b(label, VFP_Equal); michael@0: return; michael@0: } michael@0: michael@0: ma_b(label, ConditionFromDoubleCondition(cond)); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); michael@0: ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_INT32)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); michael@0: ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN)); michael@0: return cond; michael@0: } michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); michael@0: Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; michael@0: ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR)); michael@0: return actual; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); michael@0: ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_NULL)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); michael@0: ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testString(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: return testString(cond, value.typeReg()); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: return testObject(cond, value.typeReg()); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNumber(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: return testNumber(cond, value.typeReg()); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: return testMagic(cond, value.typeReg()); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const ValueOperand &value) michael@0: { michael@0: return testPrimitive(cond, value.typeReg()); michael@0: } michael@0: michael@0: // Register-based tests. michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_INT32)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const Register &tag) { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_NULL)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const Register &tag) { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testString(Assembler::Condition cond, const Register &tag) { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_STRING)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET)); michael@0: return cond == Equal ? Below : AboveOrEqual; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testGCThing(Assembler::Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); michael@0: return cond == Equal ? AboveOrEqual : Below; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_MAGIC)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_INT32)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testDouble(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testDouble(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testBoolean(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testBoolean(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNull(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testNull(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testUndefined(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testUndefined(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testString(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testString(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testObject(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testObject(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNumber(Condition cond, const Address &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: return testNumber(cond, ScratchRegister); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testDouble(Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); michael@0: Condition actual = (cond == Equal) ? Below : AboveOrEqual; michael@0: ma_cmp(tag, ImmTag(JSVAL_TAG_CLEAR)); michael@0: return actual; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNumber(Condition cond, const Register &tag) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: ma_cmp(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET)); michael@0: return cond == Equal ? BelowOrEqual : Above; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testUndefined(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_UNDEFINED)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testNull(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_NULL)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testBoolean(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_BOOLEAN)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testString(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_STRING)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testInt32(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_INT32)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testObject(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_OBJECT)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testDouble(Condition cond, const BaseIndex &src) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; michael@0: extractTag(src, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_CLEAR)); michael@0: return actual; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testMagic(Condition cond, const BaseIndex &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_MAGIC)); michael@0: return cond; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testGCThing(Condition cond, const BaseIndex &address) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: extractTag(address, ScratchRegister); michael@0: ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); michael@0: return cond == Equal ? AboveOrEqual : Below; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand &value, const Value &v, michael@0: Label *label) michael@0: { michael@0: // If cond == NotEqual, branch when a.payload != b.payload || a.tag != b.tag. michael@0: // If the payloads are equal, compare the tags. If the payloads are not equal, michael@0: // short circuit true (NotEqual). michael@0: // michael@0: // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag. michael@0: // If the payloads are equal, compare the tags. If the payloads are not equal, michael@0: // short circuit false (NotEqual). michael@0: jsval_layout jv = JSVAL_TO_IMPL(v); michael@0: if (v.isMarkable()) michael@0: ma_cmp(value.payloadReg(), ImmGCPtr(reinterpret_cast(v.toGCThing()))); michael@0: else michael@0: ma_cmp(value.payloadReg(), Imm32(jv.s.payload.i32)); michael@0: ma_cmp(value.typeReg(), Imm32(jv.s.tag), Equal); michael@0: ma_b(label, cond); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::branchTestValue(Condition cond, const Address &valaddr, michael@0: const ValueOperand &value, Label *label) michael@0: { michael@0: JS_ASSERT(cond == Equal || cond == NotEqual); michael@0: michael@0: // Check payload before tag, since payload is more likely to differ. michael@0: if (cond == NotEqual) { michael@0: ma_ldr(payloadOf(valaddr), ScratchRegister); michael@0: branchPtr(NotEqual, ScratchRegister, value.payloadReg(), label); michael@0: michael@0: ma_ldr(tagOf(valaddr), ScratchRegister); michael@0: branchPtr(NotEqual, ScratchRegister, value.typeReg(), label); michael@0: michael@0: } else { michael@0: Label fallthrough; michael@0: michael@0: ma_ldr(payloadOf(valaddr), ScratchRegister); michael@0: branchPtr(NotEqual, ScratchRegister, value.payloadReg(), &fallthrough); michael@0: michael@0: ma_ldr(tagOf(valaddr), ScratchRegister); michael@0: branchPtr(Equal, ScratchRegister, value.typeReg(), label); michael@0: michael@0: bind(&fallthrough); michael@0: } michael@0: } michael@0: michael@0: // unboxing code michael@0: void michael@0: MacroAssemblerARMCompat::unboxInt32(const ValueOperand &operand, const Register &dest) michael@0: { michael@0: ma_mov(operand.payloadReg(), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxInt32(const Address &src, const Register &dest) michael@0: { michael@0: ma_ldr(payloadOf(src), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxBoolean(const ValueOperand &operand, const Register &dest) michael@0: { michael@0: ma_mov(operand.payloadReg(), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxBoolean(const Address &src, const Register &dest) michael@0: { michael@0: ma_ldr(payloadOf(src), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxDouble(const ValueOperand &operand, const FloatRegister &dest) michael@0: { michael@0: JS_ASSERT(dest != ScratchFloatReg); michael@0: as_vxfer(operand.payloadReg(), operand.typeReg(), michael@0: VFPRegister(dest), CoreToFloat); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxDouble(const Address &src, const FloatRegister &dest) michael@0: { michael@0: ma_vldr(Operand(src), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxString(const ValueOperand &operand, const Register &dest) michael@0: { michael@0: ma_mov(operand.payloadReg(), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxString(const Address &src, const Register &dest) michael@0: { michael@0: ma_ldr(payloadOf(src), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxObject(const ValueOperand &src, const Register &dest) michael@0: { michael@0: ma_mov(src.payloadReg(), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxValue(const ValueOperand &src, AnyRegister dest) michael@0: { michael@0: if (dest.isFloat()) { michael@0: Label notInt32, end; michael@0: branchTestInt32(Assembler::NotEqual, src, ¬Int32); michael@0: convertInt32ToDouble(src.payloadReg(), dest.fpu()); michael@0: ma_b(&end); michael@0: bind(¬Int32); michael@0: unboxDouble(src, dest.fpu()); michael@0: bind(&end); michael@0: } else if (src.payloadReg() != dest.gpr()) { michael@0: as_mov(dest.gpr(), O2Reg(src.payloadReg())); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::unboxPrivate(const ValueOperand &src, Register dest) michael@0: { michael@0: ma_mov(src.payloadReg(), dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest) michael@0: { michael@0: as_vxfer(dest.payloadReg(), dest.typeReg(), VFPRegister(src), FloatToCore); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest) { michael@0: if (src != dest.payloadReg()) michael@0: ma_mov(src, dest.payloadReg()); michael@0: ma_mov(ImmType(type), dest.typeReg()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) michael@0: { michael@0: VFPRegister d = VFPRegister(dest); michael@0: ma_vimm(1.0, dest); michael@0: ma_cmp(operand.payloadReg(), Imm32(0)); michael@0: // If the source is 0, then subtract the dest from itself, producing 0. michael@0: as_vsub(d, d, d, Equal); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) michael@0: { michael@0: // transfer the integral value to a floating point register michael@0: VFPRegister vfpdest = VFPRegister(dest); michael@0: as_vxfer(operand.payloadReg(), InvalidReg, michael@0: vfpdest.sintOverlay(), CoreToFloat); michael@0: // convert the value to a double. michael@0: as_vcvt(vfpdest, vfpdest.sintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest) michael@0: { michael@0: VFPRegister d = VFPRegister(dest).singleOverlay(); michael@0: ma_vimm_f32(1.0, dest); michael@0: ma_cmp(operand.payloadReg(), Imm32(0)); michael@0: // If the source is 0, then subtract the dest from itself, producing 0. michael@0: as_vsub(d, d, d, Equal); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest) michael@0: { michael@0: // transfer the integral value to a floating point register michael@0: VFPRegister vfpdest = VFPRegister(dest).singleOverlay(); michael@0: as_vxfer(operand.payloadReg(), InvalidReg, michael@0: vfpdest.sintOverlay(), CoreToFloat); michael@0: // convert the value to a float. michael@0: as_vcvt(vfpdest, vfpdest.sintOverlay()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadConstantFloat32(float f, const FloatRegister &dest) michael@0: { michael@0: ma_vimm_f32(f, dest); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadInt32OrDouble(const Operand &src, const FloatRegister &dest) michael@0: { michael@0: Label notInt32, end; michael@0: // If it's an int, convert it to double. michael@0: ma_ldr(ToType(src), ScratchRegister); michael@0: branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); michael@0: ma_ldr(ToPayload(src), ScratchRegister); michael@0: convertInt32ToDouble(ScratchRegister, dest); michael@0: ma_b(&end); michael@0: michael@0: // Not an int, just load as double. michael@0: bind(¬Int32); michael@0: ma_vldr(src, dest); michael@0: bind(&end); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, const FloatRegister &dest, int32_t shift) michael@0: { michael@0: Label notInt32, end; michael@0: michael@0: JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); michael@0: michael@0: // If it's an int, convert it to double. michael@0: ma_alu(base, lsl(index, shift), ScratchRegister, op_add); michael@0: michael@0: // Since we only have one scratch register, we need to stomp over it with the tag michael@0: ma_ldr(Address(ScratchRegister, NUNBOX32_TYPE_OFFSET), ScratchRegister); michael@0: branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); michael@0: michael@0: // Implicitly requires NUNBOX32_PAYLOAD_OFFSET == 0: no offset provided michael@0: ma_ldr(DTRAddr(base, DtrRegImmShift(index, LSL, shift)), ScratchRegister); michael@0: convertInt32ToDouble(ScratchRegister, dest); michael@0: ma_b(&end); michael@0: michael@0: // Not an int, just load as double. michael@0: bind(¬Int32); michael@0: // First, recompute the offset that had been stored in the scratch register michael@0: // since the scratch register was overwritten loading in the type. michael@0: ma_alu(base, lsl(index, shift), ScratchRegister, op_add); michael@0: ma_vldr(Address(ScratchRegister, 0), dest); michael@0: bind(&end); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadConstantDouble(double dp, const FloatRegister &dest) michael@0: { michael@0: as_FImm64Pool(dest, dp); michael@0: } michael@0: michael@0: // treat the value as a boolean, and set condition codes accordingly michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand &operand) michael@0: { michael@0: ma_tst(operand.payloadReg(), operand.payloadReg()); michael@0: return truthy ? NonZero : Zero; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testBooleanTruthy(bool truthy, const ValueOperand &operand) michael@0: { michael@0: ma_tst(operand.payloadReg(), operand.payloadReg()); michael@0: return truthy ? NonZero : Zero; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testDoubleTruthy(bool truthy, const FloatRegister ®) michael@0: { michael@0: as_vcmpz(VFPRegister(reg)); michael@0: as_vmrs(pc); michael@0: as_cmp(r0, O2Reg(r0), Overflow); michael@0: return truthy ? NonZero : Zero; michael@0: } michael@0: michael@0: Register michael@0: MacroAssemblerARMCompat::extractObject(const Address &address, Register scratch) michael@0: { michael@0: ma_ldr(payloadOf(address), scratch); michael@0: return scratch; michael@0: } michael@0: michael@0: Register michael@0: MacroAssemblerARMCompat::extractTag(const Address &address, Register scratch) michael@0: { michael@0: ma_ldr(tagOf(address), scratch); michael@0: return scratch; michael@0: } michael@0: michael@0: Register michael@0: MacroAssemblerARMCompat::extractTag(const BaseIndex &address, Register scratch) michael@0: { michael@0: ma_alu(address.base, lsl(address.index, address.scale), scratch, op_add, NoSetCond); michael@0: return extractTag(Address(scratch, address.offset), scratch); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::moveValue(const Value &val, Register type, Register data) michael@0: { michael@0: jsval_layout jv = JSVAL_TO_IMPL(val); michael@0: ma_mov(Imm32(jv.s.tag), type); michael@0: if (val.isMarkable()) michael@0: ma_mov(ImmGCPtr(reinterpret_cast(val.toGCThing())), data); michael@0: else michael@0: ma_mov(Imm32(jv.s.payload.i32), data); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::moveValue(const Value &val, const ValueOperand &dest) michael@0: { michael@0: moveValue(val, dest.typeReg(), dest.payloadReg()); michael@0: } michael@0: michael@0: ///////////////////////////////////////////////////////////////// michael@0: // X86/X64-common (ARM too now) interface. michael@0: ///////////////////////////////////////////////////////////////// michael@0: void michael@0: MacroAssemblerARMCompat::storeValue(ValueOperand val, Operand dst) michael@0: { michael@0: ma_str(val.payloadReg(), ToPayload(dst)); michael@0: ma_str(val.typeReg(), ToType(dst)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storeValue(ValueOperand val, const BaseIndex &dest) michael@0: { michael@0: if (isValueDTRDCandidate(val) && Abs(dest.offset) <= 255) { michael@0: Register tmpIdx; michael@0: if (dest.offset == 0) { michael@0: if (dest.scale == TimesOne) { michael@0: tmpIdx = dest.index; michael@0: } else { michael@0: ma_lsl(Imm32(dest.scale), dest.index, ScratchRegister); michael@0: tmpIdx = ScratchRegister; michael@0: } michael@0: ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(dest.base, EDtrOffReg(tmpIdx))); michael@0: } else { michael@0: ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); michael@0: ma_strd(val.payloadReg(), val.typeReg(), michael@0: EDtrAddr(ScratchRegister, EDtrOffImm(dest.offset))); michael@0: } michael@0: } else { michael@0: ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); michael@0: storeValue(val, Address(ScratchRegister, dest.offset)); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadValue(const BaseIndex &addr, ValueOperand val) michael@0: { michael@0: if (isValueDTRDCandidate(val) && Abs(addr.offset) <= 255) { michael@0: Register tmpIdx; michael@0: if (addr.offset == 0) { michael@0: if (addr.scale == TimesOne) { michael@0: tmpIdx = addr.index; michael@0: } else { michael@0: ma_lsl(Imm32(addr.scale), addr.index, ScratchRegister); michael@0: tmpIdx = ScratchRegister; michael@0: } michael@0: ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(), val.typeReg()); michael@0: } else { michael@0: ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); michael@0: ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(addr.offset)), michael@0: val.payloadReg(), val.typeReg()); michael@0: } michael@0: } else { michael@0: ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); michael@0: loadValue(Address(ScratchRegister, addr.offset), val); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val) michael@0: { michael@0: Operand srcOp = Operand(src); michael@0: Operand payload = ToPayload(srcOp); michael@0: Operand type = ToType(srcOp); michael@0: // TODO: copy this code into a generic function that acts on all sequences of memory accesses michael@0: if (isValueDTRDCandidate(val)) { michael@0: // If the value we want is in two consecutive registers starting with an even register, michael@0: // they can be combined as a single ldrd. michael@0: int offset = srcOp.disp(); michael@0: if (offset < 256 && offset > -256) { michael@0: ma_ldrd(EDtrAddr(Register::FromCode(srcOp.base()), EDtrOffImm(srcOp.disp())), val.payloadReg(), val.typeReg()); michael@0: return; michael@0: } michael@0: } michael@0: // if the value is lower than the type, then we may be able to use an ldm instruction michael@0: michael@0: if (val.payloadReg().code() < val.typeReg().code()) { michael@0: if (srcOp.disp() <= 4 && srcOp.disp() >= -8 && (srcOp.disp() & 3) == 0) { michael@0: // turns out each of the 4 value -8, -4, 0, 4 corresponds exactly with one of michael@0: // LDM{DB, DA, IA, IB} michael@0: DTMMode mode; michael@0: switch(srcOp.disp()) { michael@0: case -8: michael@0: mode = DB; michael@0: break; michael@0: case -4: michael@0: mode = DA; michael@0: break; michael@0: case 0: michael@0: mode = IA; michael@0: break; michael@0: case 4: michael@0: mode = IB; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Bogus Offset for LoadValue as DTM"); michael@0: } michael@0: startDataTransferM(IsLoad, Register::FromCode(srcOp.base()), mode); michael@0: transferReg(val.payloadReg()); michael@0: transferReg(val.typeReg()); michael@0: finishDataTransfer(); michael@0: return; michael@0: } michael@0: } michael@0: // Ensure that loading the payload does not erase the pointer to the michael@0: // Value in memory. michael@0: if (Register::FromCode(type.base()) != val.payloadReg()) { michael@0: ma_ldr(payload, val.payloadReg()); michael@0: ma_ldr(type, val.typeReg()); michael@0: } else { michael@0: ma_ldr(type, val.typeReg()); michael@0: ma_ldr(payload, val.payloadReg()); michael@0: } michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::tagValue(JSValueType type, Register payload, ValueOperand dest) michael@0: { michael@0: JS_ASSERT(dest.typeReg() != dest.payloadReg()); michael@0: if (payload != dest.payloadReg()) michael@0: ma_mov(payload, dest.payloadReg()); michael@0: ma_mov(ImmType(type), dest.typeReg()); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::pushValue(ValueOperand val) { michael@0: ma_push(val.typeReg()); michael@0: ma_push(val.payloadReg()); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::pushValue(const Address &addr) michael@0: { michael@0: JS_ASSERT(addr.base != StackPointer); michael@0: Operand srcOp = Operand(addr); michael@0: Operand payload = ToPayload(srcOp); michael@0: Operand type = ToType(srcOp); michael@0: michael@0: ma_ldr(type, ScratchRegister); michael@0: ma_push(ScratchRegister); michael@0: ma_ldr(payload, ScratchRegister); michael@0: ma_push(ScratchRegister); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::popValue(ValueOperand val) { michael@0: ma_pop(val.payloadReg()); michael@0: ma_pop(val.typeReg()); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::storePayload(const Value &val, Operand dest) michael@0: { michael@0: jsval_layout jv = JSVAL_TO_IMPL(val); michael@0: if (val.isMarkable()) michael@0: ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), secondScratchReg_); michael@0: else michael@0: ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_); michael@0: ma_str(secondScratchReg_, ToPayload(dest)); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::storePayload(Register src, Operand dest) michael@0: { michael@0: if (dest.getTag() == Operand::MEM) { michael@0: ma_str(src, ToPayload(dest)); michael@0: return; michael@0: } michael@0: MOZ_ASSUME_UNREACHABLE("why do we do all of these things?"); michael@0: michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storePayload(const Value &val, Register base, Register index, int32_t shift) michael@0: { michael@0: jsval_layout jv = JSVAL_TO_IMPL(val); michael@0: if (val.isMarkable()) michael@0: ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), ScratchRegister); michael@0: else michael@0: ma_mov(Imm32(jv.s.payload.i32), ScratchRegister); michael@0: JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); michael@0: // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] michael@0: // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. michael@0: as_dtr(IsStore, 32, Offset, ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); michael@0: } michael@0: void michael@0: MacroAssemblerARMCompat::storePayload(Register src, Register base, Register index, int32_t shift) michael@0: { michael@0: JS_ASSERT((shift < 32) && (shift >= 0)); michael@0: // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] michael@0: // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. michael@0: JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); michael@0: // Technically, shift > -32 can be handle by changing LSL to ASR, but should never come up, michael@0: // and this is one less code path to get wrong. michael@0: as_dtr(IsStore, 32, Offset, src, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Operand dest) { michael@0: if (dest.getTag() == Operand::MEM) { michael@0: ma_mov(tag, secondScratchReg_); michael@0: ma_str(secondScratchReg_, ToType(dest)); michael@0: return; michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("why do we do all of these things?"); michael@0: michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift) { michael@0: JS_ASSERT(base != ScratchRegister); michael@0: JS_ASSERT(index != ScratchRegister); michael@0: // A value needs to be store a value int base + index << shift + 4. michael@0: // Arm cannot handle this in a single operand, so a temp register is required. michael@0: // However, the scratch register is presently in use to hold the immediate that michael@0: // is being stored into said memory location. Work around this by modifying michael@0: // the base so the valid [base + index << shift] format can be used, then michael@0: // restore it. michael@0: ma_add(base, Imm32(NUNBOX32_TYPE_OFFSET), base); michael@0: ma_mov(tag, ScratchRegister); michael@0: ma_str(ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); michael@0: ma_sub(base, Imm32(NUNBOX32_TYPE_OFFSET), base); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::linkExitFrame() { michael@0: uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop(); michael@0: movePtr(ImmPtr(dest), ScratchRegister); michael@0: ma_str(StackPointer, Operand(ScratchRegister, 0)); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::linkParallelExitFrame(const Register &pt) michael@0: { michael@0: ma_str(StackPointer, Operand(pt, offsetof(PerThreadData, ionTop))); michael@0: } michael@0: michael@0: // ARM says that all reads of pc will return 8 higher than the michael@0: // address of the currently executing instruction. This means we are michael@0: // correctly storing the address of the instruction after the call michael@0: // in the register. michael@0: // Also ION is breaking the ARM EABI here (sort of). The ARM EABI michael@0: // says that a function call should move the pc into the link register, michael@0: // then branch to the function, and *sp is data that is owned by the caller, michael@0: // not the callee. The ION ABI says *sp should be the address that michael@0: // we will return to when leaving this function michael@0: void michael@0: MacroAssemblerARM::ma_callIon(const Register r) michael@0: { michael@0: // When the stack is 8 byte aligned, michael@0: // we want to decrement sp by 8, and write pc+8 into the new sp. michael@0: // when we return from this call, sp will be its present value minus 4. michael@0: AutoForbidPools afp(this); michael@0: as_dtr(IsStore, 32, PreIndex, pc, DTRAddr(sp, DtrOffImm(-8))); michael@0: as_blx(r); michael@0: } michael@0: void michael@0: MacroAssemblerARM::ma_callIonNoPush(const Register r) michael@0: { michael@0: // Since we just write the return address into the stack, which is michael@0: // popped on return, the net effect is removing 4 bytes from the stack michael@0: AutoForbidPools afp(this); michael@0: as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); michael@0: as_blx(r); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_callIonHalfPush(const Register r) michael@0: { michael@0: // The stack is unaligned by 4 bytes. michael@0: // We push the pc to the stack to align the stack before the call, when we michael@0: // return the pc is poped and the stack is restored to its unaligned state. michael@0: AutoForbidPools afp(this); michael@0: ma_push(pc); michael@0: as_blx(r); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_call(ImmPtr dest) michael@0: { michael@0: RelocStyle rs; michael@0: if (hasMOVWT()) michael@0: rs = L_MOVWT; michael@0: else michael@0: rs = L_LDR; michael@0: michael@0: ma_movPatchable(dest, CallReg, Always, rs); michael@0: as_blx(CallReg); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARM::ma_callAndStoreRet(const Register r, uint32_t stackArgBytes) michael@0: { michael@0: // Note: this function stores the return address to sp[0]. The caller must michael@0: // anticipate this by pushing additional space on the stack. The ABI does michael@0: // not provide space for a return address so this function may only be michael@0: // called if no argument are passed. michael@0: JS_ASSERT(stackArgBytes == 0); michael@0: AutoForbidPools afp(this); michael@0: as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); michael@0: as_blx(r); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::breakpoint() michael@0: { michael@0: as_bkpt(); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure) michael@0: { michael@0: Label isDouble, done; michael@0: branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble); michael@0: branchTestInt32(Assembler::NotEqual, source.typeReg(), failure); michael@0: michael@0: convertInt32ToDouble(source.payloadReg(), dest); michael@0: jump(&done); michael@0: michael@0: bind(&isDouble); michael@0: unboxDouble(source, dest); michael@0: michael@0: bind(&done); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::breakpoint(Condition cc) michael@0: { michael@0: ma_ldr(DTRAddr(r12, DtrRegImmShift(r12, LSL, 0, IsDown)), r12, Offset, cc); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::setupABICall(uint32_t args) michael@0: { michael@0: JS_ASSERT(!inCall_); michael@0: inCall_ = true; michael@0: args_ = args; michael@0: passedArgs_ = 0; michael@0: passedArgTypes_ = 0; michael@0: usedIntSlots_ = 0; michael@0: #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) michael@0: usedFloatSlots_ = 0; michael@0: usedFloat32_ = false; michael@0: padding_ = 0; michael@0: #endif michael@0: floatArgsInGPR[0] = MoveOperand(); michael@0: floatArgsInGPR[1] = MoveOperand(); michael@0: floatArgsInGPRValid[0] = false; michael@0: floatArgsInGPRValid[1] = false; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::setupAlignedABICall(uint32_t args) michael@0: { michael@0: setupABICall(args); michael@0: michael@0: dynamicAlignment_ = false; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, const Register &scratch) michael@0: { michael@0: setupABICall(args); michael@0: dynamicAlignment_ = true; michael@0: michael@0: ma_mov(sp, scratch); michael@0: michael@0: // Force sp to be aligned michael@0: ma_and(Imm32(~(StackAlignment - 1)), sp, sp); michael@0: ma_push(scratch); michael@0: } michael@0: michael@0: #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) michael@0: void michael@0: MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type type) michael@0: { michael@0: MoveOperand to; michael@0: ++passedArgs_; michael@0: if (!enoughMemory_) michael@0: return; michael@0: switch (type) { michael@0: case MoveOp::FLOAT32: michael@0: case MoveOp::DOUBLE: { michael@0: // N.B. this isn't a limitation of the ABI, it is a limitation of the compiler right now. michael@0: // There isn't a good way to handle odd numbered single registers, so everything goes to hell michael@0: // when we try. Current fix is to never use more than one float in a function call. michael@0: // Fix coming along with complete float32 support in bug 957504. michael@0: JS_ASSERT(!usedFloat32_); michael@0: if (type == MoveOp::FLOAT32) michael@0: usedFloat32_ = true; michael@0: FloatRegister fr; michael@0: if (GetFloatArgReg(usedIntSlots_, usedFloatSlots_, &fr)) { michael@0: if (from.isFloatReg() && from.floatReg() == fr) { michael@0: // Nothing to do; the value is in the right register already michael@0: usedFloatSlots_++; michael@0: if (type == MoveOp::FLOAT32) michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; michael@0: else michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; michael@0: return; michael@0: } michael@0: to = MoveOperand(fr); michael@0: } else { michael@0: // If (and only if) the integer registers have started spilling, do we michael@0: // need to take the register's alignment into account michael@0: uint32_t disp = INT_MAX; michael@0: if (type == MoveOp::FLOAT32) michael@0: disp = GetFloat32ArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); michael@0: else michael@0: disp = GetDoubleArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); michael@0: to = MoveOperand(sp, disp); michael@0: } michael@0: usedFloatSlots_++; michael@0: if (type == MoveOp::FLOAT32) michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; michael@0: else michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; michael@0: break; michael@0: } michael@0: case MoveOp::GENERAL: { michael@0: Register r; michael@0: if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) { michael@0: if (from.isGeneralReg() && from.reg() == r) { michael@0: // Nothing to do; the value is in the right register already michael@0: usedIntSlots_++; michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; michael@0: return; michael@0: } michael@0: to = MoveOperand(r); michael@0: } else { michael@0: uint32_t disp = GetIntArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); michael@0: to = MoveOperand(sp, disp); michael@0: } michael@0: usedIntSlots_++; michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; michael@0: break; michael@0: } michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); michael@0: } michael@0: michael@0: enoughMemory_ = moveResolver_.addMove(from, to, type); michael@0: } michael@0: #endif michael@0: michael@0: #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) michael@0: void michael@0: MacroAssemblerARMCompat::passSoftFpABIArg(const MoveOperand &from, MoveOp::Type type) michael@0: { michael@0: MoveOperand to; michael@0: uint32_t increment = 1; michael@0: bool useResolver = true; michael@0: ++passedArgs_; michael@0: switch (type) { michael@0: case MoveOp::DOUBLE: michael@0: // Double arguments need to be rounded up to the nearest doubleword michael@0: // boundary, even if it is in a register! michael@0: usedIntSlots_ = (usedIntSlots_ + 1) & ~1; michael@0: increment = 2; michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; michael@0: break; michael@0: case MoveOp::FLOAT32: michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; michael@0: break; michael@0: case MoveOp::GENERAL: michael@0: passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); michael@0: } michael@0: michael@0: Register destReg; michael@0: MoveOperand dest; michael@0: if (GetIntArgReg(usedIntSlots_, 0, &destReg)) { michael@0: if (type == MoveOp::DOUBLE || type == MoveOp::FLOAT32) { michael@0: floatArgsInGPR[destReg.code() >> 1] = from; michael@0: floatArgsInGPRValid[destReg.code() >> 1] = true; michael@0: useResolver = false; michael@0: } else if (from.isGeneralReg() && from.reg() == destReg) { michael@0: // No need to move anything michael@0: useResolver = false; michael@0: } else { michael@0: dest = MoveOperand(destReg); michael@0: } michael@0: } else { michael@0: uint32_t disp = GetArgStackDisp(usedIntSlots_); michael@0: dest = MoveOperand(sp, disp); michael@0: } michael@0: michael@0: if (useResolver) michael@0: enoughMemory_ = enoughMemory_ && moveResolver_.addMove(from, dest, type); michael@0: usedIntSlots_ += increment; michael@0: } michael@0: #endif michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::passABIArg(const MoveOperand &from, MoveOp::Type type) michael@0: { michael@0: #if defined(JS_ARM_SIMULATOR) michael@0: if (useHardFpABI()) michael@0: MacroAssemblerARMCompat::passHardFpABIArg(from, type); michael@0: else michael@0: MacroAssemblerARMCompat::passSoftFpABIArg(from, type); michael@0: #elif defined(JS_CODEGEN_ARM_HARDFP) michael@0: MacroAssemblerARMCompat::passHardFpABIArg(from, type); michael@0: #else michael@0: MacroAssemblerARMCompat::passSoftFpABIArg(from, type); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::passABIArg(const Register ®) michael@0: { michael@0: passABIArg(MoveOperand(reg), MoveOp::GENERAL); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::passABIArg(const FloatRegister &freg, MoveOp::Type type) michael@0: { michael@0: passABIArg(MoveOperand(freg), type); michael@0: } michael@0: michael@0: void MacroAssemblerARMCompat::checkStackAlignment() michael@0: { michael@0: #ifdef DEBUG michael@0: ma_tst(sp, Imm32(StackAlignment - 1)); michael@0: breakpoint(NonZero); michael@0: #endif michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust) michael@0: { michael@0: JS_ASSERT(inCall_); michael@0: michael@0: *stackAdjust = ((usedIntSlots_ > NumIntArgRegs) ? usedIntSlots_ - NumIntArgRegs : 0) * sizeof(intptr_t); michael@0: #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) michael@0: if (useHardFpABI()) michael@0: *stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t); michael@0: #endif michael@0: if (!dynamicAlignment_) { michael@0: *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust, StackAlignment); michael@0: } else { michael@0: // sizeof(intptr_t) account for the saved stack pointer pushed by setupUnalignedABICall michael@0: *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment); michael@0: } michael@0: michael@0: reserveStack(*stackAdjust); michael@0: michael@0: // Position all arguments. michael@0: { michael@0: enoughMemory_ = enoughMemory_ && moveResolver_.resolve(); michael@0: if (!enoughMemory_) michael@0: return; michael@0: michael@0: MoveEmitter emitter(*this); michael@0: emitter.emit(moveResolver_); michael@0: emitter.finish(); michael@0: } michael@0: for (int i = 0; i < 2; i++) { michael@0: if (floatArgsInGPRValid[i]) { michael@0: MoveOperand from = floatArgsInGPR[i]; michael@0: Register to0 = Register::FromCode(i * 2), to1 = Register::FromCode(i * 2 + 1); michael@0: michael@0: if (from.isFloatReg()) { michael@0: ma_vxfer(VFPRegister(from.floatReg()), to0, to1); michael@0: } else { michael@0: JS_ASSERT(from.isMemory()); michael@0: // Note: We can safely use the MoveOperand's displacement here, michael@0: // even if the base is SP: MoveEmitter::toOperand adjusts michael@0: // SP-relative operands by the difference between the current michael@0: // stack usage and stackAdjust, which emitter.finish() resets michael@0: // to 0. michael@0: // michael@0: // Warning: if the offset isn't within [-255,+255] then this michael@0: // will assert-fail (or, if non-debug, load the wrong words). michael@0: // Nothing uses such an offset at the time of this writing. michael@0: ma_ldrd(EDtrAddr(from.base(), EDtrOffImm(from.disp())), to0, to1); michael@0: } michael@0: } michael@0: } michael@0: checkStackAlignment(); michael@0: michael@0: // Save the lr register if we need to preserve it. michael@0: if (secondScratchReg_ != lr) michael@0: ma_mov(lr, secondScratchReg_); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) michael@0: { michael@0: if (secondScratchReg_ != lr) michael@0: ma_mov(secondScratchReg_, lr); michael@0: michael@0: switch (result) { michael@0: case MoveOp::DOUBLE: michael@0: if (!useHardFpABI()) { michael@0: // Move double from r0/r1 to ReturnFloatReg. michael@0: as_vxfer(r0, r1, ReturnFloatReg, CoreToFloat); michael@0: break; michael@0: } michael@0: case MoveOp::FLOAT32: michael@0: if (!useHardFpABI()) { michael@0: // Move float32 from r0 to ReturnFloatReg. michael@0: as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), CoreToFloat); michael@0: break; michael@0: } michael@0: case MoveOp::GENERAL: michael@0: break; michael@0: michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected callWithABI result"); michael@0: } michael@0: michael@0: freeStack(stackAdjust); michael@0: michael@0: if (dynamicAlignment_) { michael@0: // x86 supports pop esp. on arm, that isn't well defined, so just michael@0: // do it manually michael@0: as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0))); michael@0: } michael@0: michael@0: JS_ASSERT(inCall_); michael@0: inCall_ = false; michael@0: } michael@0: michael@0: #if defined(DEBUG) && defined(JS_ARM_SIMULATOR) michael@0: static void michael@0: AssertValidABIFunctionType(uint32_t passedArgTypes) michael@0: { michael@0: switch (passedArgTypes) { michael@0: case Args_General0: michael@0: case Args_General1: michael@0: case Args_General2: michael@0: case Args_General3: michael@0: case Args_General4: michael@0: case Args_General5: michael@0: case Args_General6: michael@0: case Args_General7: michael@0: case Args_General8: michael@0: case Args_Double_None: michael@0: case Args_Int_Double: michael@0: case Args_Float32_Float32: michael@0: case Args_Double_Double: michael@0: case Args_Double_Int: michael@0: case Args_Double_DoubleInt: michael@0: case Args_Double_DoubleDouble: michael@0: case Args_Double_IntDouble: michael@0: case Args_Int_IntDouble: michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected type"); michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithABI(void *fun, MoveOp::Type result) michael@0: { michael@0: #ifdef JS_ARM_SIMULATOR michael@0: MOZ_ASSERT(passedArgs_ <= 15); michael@0: passedArgTypes_ <<= ArgType_Shift; michael@0: switch (result) { michael@0: case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break; michael@0: case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break; michael@0: case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break; michael@0: default: MOZ_ASSUME_UNREACHABLE("Invalid return type"); michael@0: } michael@0: #ifdef DEBUG michael@0: AssertValidABIFunctionType(passedArgTypes_); michael@0: #endif michael@0: ABIFunctionType type = ABIFunctionType(passedArgTypes_); michael@0: fun = Simulator::RedirectNativeFunction(fun, type); michael@0: #endif michael@0: michael@0: uint32_t stackAdjust; michael@0: callWithABIPre(&stackAdjust); michael@0: ma_call(ImmPtr(fun)); michael@0: callWithABIPost(stackAdjust, result); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result) michael@0: { michael@0: uint32_t stackAdjust; michael@0: callWithABIPre(&stackAdjust); michael@0: call(imm); michael@0: callWithABIPost(stackAdjust, result); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::callWithABI(const Address &fun, MoveOp::Type result) michael@0: { michael@0: // Load the callee in r12, no instruction between the ldr and call michael@0: // should clobber it. Note that we can't use fun.base because it may michael@0: // be one of the IntArg registers clobbered before the call. michael@0: ma_ldr(fun, r12); michael@0: uint32_t stackAdjust; michael@0: callWithABIPre(&stackAdjust); michael@0: call(r12); michael@0: callWithABIPost(stackAdjust, result); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::handleFailureWithHandler(void *handler) michael@0: { michael@0: // Reserve space for exception information. michael@0: int size = (sizeof(ResumeFromException) + 7) & ~7; michael@0: ma_sub(Imm32(size), sp); michael@0: ma_mov(sp, r0); michael@0: michael@0: // Ask for an exception handler. michael@0: setupUnalignedABICall(1, r1); michael@0: passABIArg(r0); michael@0: callWithABI(handler); michael@0: michael@0: JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail(); michael@0: branch(excTail); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::handleFailureWithHandlerTail() michael@0: { michael@0: Label entryFrame; michael@0: Label catch_; michael@0: Label finally; michael@0: Label return_; michael@0: Label bailout; michael@0: michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, kind)), r0); michael@0: branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame); michael@0: branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_); michael@0: branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally); michael@0: branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_); michael@0: branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); michael@0: michael@0: breakpoint(); // Invalid kind. michael@0: michael@0: // No exception handler. Load the error value, load the new stack pointer michael@0: // and return from the entry frame. michael@0: bind(&entryFrame); michael@0: moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); michael@0: michael@0: // We're going to be returning by the ion calling convention, which returns michael@0: // by ??? (for now, I think ldr pc, [sp]!) michael@0: as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4))); michael@0: michael@0: // If we found a catch handler, this must be a baseline frame. Restore state michael@0: // and jump to the catch block. michael@0: bind(&catch_); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); michael@0: jump(r0); michael@0: michael@0: // If we found a finally block, this must be a baseline frame. Push michael@0: // two values expected by JSOP_RETSUB: BooleanValue(true) and the michael@0: // exception. michael@0: bind(&finally); michael@0: ValueOperand exception = ValueOperand(r1, r2); michael@0: loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception); michael@0: michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); michael@0: michael@0: pushValue(BooleanValue(true)); michael@0: pushValue(exception); michael@0: jump(r0); michael@0: michael@0: // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. michael@0: bind(&return_); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); michael@0: loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand); michael@0: ma_mov(r11, sp); michael@0: pop(r11); michael@0: ret(); michael@0: michael@0: // If we are bailing out to baseline to handle an exception, jump to michael@0: // the bailout tail stub. michael@0: bind(&bailout); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, bailoutInfo)), r2); michael@0: ma_mov(Imm32(BAILOUT_RETURN_OK), r0); michael@0: ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r1); michael@0: jump(r1); michael@0: } michael@0: michael@0: Assembler::Condition michael@0: MacroAssemblerARMCompat::testStringTruthy(bool truthy, const ValueOperand &value) michael@0: { michael@0: Register string = value.payloadReg(); michael@0: michael@0: size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT); michael@0: ma_dtr(IsLoad, string, Imm32(JSString::offsetOfLengthAndFlags()), ScratchRegister); michael@0: // Bit clear into the scratch register. This is done because there is performs the operation michael@0: // dest <- src1 & ~ src2. There is no instruction that does this without writing michael@0: // the result somewhere, so the Scratch Register is sacrificed. michael@0: ma_bic(Imm32(~mask), ScratchRegister, SetCond); michael@0: return truthy ? Assembler::NonZero : Assembler::Zero; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::floor(FloatRegister input, Register output, Label *bail) michael@0: { michael@0: Label handleZero; michael@0: Label handleNeg; michael@0: Label fin; michael@0: compareDouble(input, InvalidFloatReg); michael@0: ma_b(&handleZero, Assembler::Equal); michael@0: ma_b(&handleNeg, Assembler::Signed); michael@0: // NaN is always a bail condition, just bail directly. michael@0: ma_b(bail, Assembler::Overflow); michael@0: michael@0: // The argument is a positive number, truncation is the path to glory; michael@0: // Since it is known to be > 0.0, explicitly convert to a larger range, michael@0: // then a value that rounds to INT_MAX is explicitly different from an michael@0: // argument that clamps to INT_MAX michael@0: ma_vcvt_F64_U32(input, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: ma_mov(output, output, SetCond); michael@0: ma_b(bail, Signed); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleZero); michael@0: // Move the top word of the double into the output reg, if it is non-zero, michael@0: // then the original value was -0.0 michael@0: as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); michael@0: ma_cmp(output, Imm32(0)); michael@0: ma_b(bail, NonZero); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleNeg); michael@0: // Negative case, negate, then start dancing michael@0: ma_vneg(input, input); michael@0: ma_vcvt_F64_U32(input, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg); michael@0: compareDouble(ScratchFloatReg, input); michael@0: ma_add(output, Imm32(1), output, NoSetCond, NotEqual); michael@0: // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, michael@0: // the result will still be a negative number michael@0: ma_rsb(output, Imm32(0), output, SetCond); michael@0: // Flip the negated input back to its original value. michael@0: ma_vneg(input, input); michael@0: // If the result looks non-negative, then this value didn't actually fit into michael@0: // the int range, and special handling is required. michael@0: // zero is also caught by this case, but floor of a negative number michael@0: // should never be zero. michael@0: ma_b(bail, NotSigned); michael@0: michael@0: bind(&fin); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label *bail) michael@0: { michael@0: Label handleZero; michael@0: Label handleNeg; michael@0: Label fin; michael@0: compareFloat(input, InvalidFloatReg); michael@0: ma_b(&handleZero, Assembler::Equal); michael@0: ma_b(&handleNeg, Assembler::Signed); michael@0: // NaN is always a bail condition, just bail directly. michael@0: ma_b(bail, Assembler::Overflow); michael@0: michael@0: // The argument is a positive number, truncation is the path to glory; michael@0: // Since it is known to be > 0.0, explicitly convert to a larger range, michael@0: // then a value that rounds to INT_MAX is explicitly different from an michael@0: // argument that clamps to INT_MAX michael@0: ma_vcvt_F32_U32(input, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: ma_mov(output, output, SetCond); michael@0: ma_b(bail, Signed); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleZero); michael@0: // Move the top word of the double into the output reg, if it is non-zero, michael@0: // then the original value was -0.0 michael@0: as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0); michael@0: ma_cmp(output, Imm32(0)); michael@0: ma_b(bail, NonZero); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleNeg); michael@0: // Negative case, negate, then start dancing michael@0: ma_vneg_f32(input, input); michael@0: ma_vcvt_F32_U32(input, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg); michael@0: compareFloat(ScratchFloatReg, input); michael@0: ma_add(output, Imm32(1), output, NoSetCond, NotEqual); michael@0: // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, michael@0: // the result will still be a negative number michael@0: ma_rsb(output, Imm32(0), output, SetCond); michael@0: // Flip the negated input back to its original value. michael@0: ma_vneg_f32(input, input); michael@0: // If the result looks non-negative, then this value didn't actually fit into michael@0: // the int range, and special handling is required. michael@0: // zero is also caught by this case, but floor of a negative number michael@0: // should never be zero. michael@0: ma_b(bail, NotSigned); michael@0: michael@0: bind(&fin); michael@0: } michael@0: michael@0: CodeOffsetLabel michael@0: MacroAssemblerARMCompat::toggledJump(Label *label) michael@0: { michael@0: // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). michael@0: michael@0: BufferOffset b = ma_b(label, Always, true); michael@0: CodeOffsetLabel ret(b.getOffset()); michael@0: return ret; michael@0: } michael@0: michael@0: CodeOffsetLabel michael@0: MacroAssemblerARMCompat::toggledCall(JitCode *target, bool enabled) michael@0: { michael@0: BufferOffset bo = nextOffset(); michael@0: CodeOffsetLabel offset(bo.getOffset()); michael@0: addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE); michael@0: ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, hasMOVWT() ? L_MOVWT : L_LDR); michael@0: if (enabled) michael@0: ma_blx(ScratchRegister); michael@0: else michael@0: ma_nop(); michael@0: JS_ASSERT(nextOffset().getOffset() - offset.offset() == ToggledCallSize()); michael@0: return offset; michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail, FloatRegister tmp) michael@0: { michael@0: Label handleZero; michael@0: Label handleNeg; michael@0: Label fin; michael@0: // Do a compare based on the original value, then do most other things based on the michael@0: // shifted value. michael@0: ma_vcmpz(input); michael@0: // Adding 0.5 is technically incorrect! michael@0: // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. michael@0: ma_vimm(0.5, ScratchFloatReg); michael@0: // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. michael@0: ma_vabs(input, tmp); michael@0: // Add 0.5, storing the result into tmp. michael@0: ma_vadd(ScratchFloatReg, tmp, tmp); michael@0: as_vmrs(pc); michael@0: ma_b(&handleZero, Assembler::Equal); michael@0: ma_b(&handleNeg, Assembler::Signed); michael@0: // NaN is always a bail condition, just bail directly. michael@0: ma_b(bail, Assembler::Overflow); michael@0: michael@0: // The argument is a positive number, truncation is the path to glory; michael@0: // Since it is known to be > 0.0, explicitly convert to a larger range, michael@0: // then a value that rounds to INT_MAX is explicitly different from an michael@0: // argument that clamps to INT_MAX michael@0: ma_vcvt_F64_U32(tmp, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: ma_mov(output, output, SetCond); michael@0: ma_b(bail, Signed); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleZero); michael@0: // Move the top word of the double into the output reg, if it is non-zero, michael@0: // then the original value was -0.0 michael@0: as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); michael@0: ma_cmp(output, Imm32(0)); michael@0: ma_b(bail, NonZero); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleNeg); michael@0: // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 michael@0: ma_vcvt_F64_U32(tmp, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: michael@0: // -output is now a correctly rounded value, unless the original value was exactly michael@0: // halfway between two integers, at which point, it has been rounded away from zero, when michael@0: // it should be rounded towards \infty. michael@0: ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg); michael@0: compareDouble(ScratchFloatReg, tmp); michael@0: ma_sub(output, Imm32(1), output, NoSetCond, Equal); michael@0: // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, michael@0: // the result will still be a negative number michael@0: ma_rsb(output, Imm32(0), output, SetCond); michael@0: michael@0: // If the result looks non-negative, then this value didn't actually fit into michael@0: // the int range, and special handling is required, or it was zero, which means michael@0: // the result is actually -0.0 which also requires special handling. michael@0: ma_b(bail, NotSigned); michael@0: michael@0: bind(&fin); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bail, FloatRegister tmp) michael@0: { michael@0: Label handleZero; michael@0: Label handleNeg; michael@0: Label fin; michael@0: // Do a compare based on the original value, then do most other things based on the michael@0: // shifted value. michael@0: ma_vcmpz_f32(input); michael@0: // Adding 0.5 is technically incorrect! michael@0: // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. michael@0: ma_vimm_f32(0.5f, ScratchFloatReg); michael@0: // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. michael@0: ma_vabs_f32(input, tmp); michael@0: // Add 0.5, storing the result into tmp. michael@0: ma_vadd_f32(ScratchFloatReg, tmp, tmp); michael@0: as_vmrs(pc); michael@0: ma_b(&handleZero, Assembler::Equal); michael@0: ma_b(&handleNeg, Assembler::Signed); michael@0: // NaN is always a bail condition, just bail directly. michael@0: ma_b(bail, Assembler::Overflow); michael@0: michael@0: // The argument is a positive number, truncation is the path to glory; michael@0: // Since it is known to be > 0.0, explicitly convert to a larger range, michael@0: // then a value that rounds to INT_MAX is explicitly different from an michael@0: // argument that clamps to INT_MAX michael@0: ma_vcvt_F32_U32(tmp, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: ma_mov(output, output, SetCond); michael@0: ma_b(bail, Signed); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleZero); michael@0: // Move the top word of the double into the output reg, if it is non-zero, michael@0: // then the original value was -0.0 michael@0: as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); michael@0: ma_cmp(output, Imm32(0)); michael@0: ma_b(bail, NonZero); michael@0: ma_b(&fin); michael@0: michael@0: bind(&handleNeg); michael@0: // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 michael@0: ma_vcvt_F32_U32(tmp, ScratchFloatReg); michael@0: ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); michael@0: michael@0: // -output is now a correctly rounded value, unless the original value was exactly michael@0: // halfway between two integers, at which point, it has been rounded away from zero, when michael@0: // it should be rounded towards \infty. michael@0: ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg); michael@0: compareFloat(ScratchFloatReg, tmp); michael@0: ma_sub(output, Imm32(1), output, NoSetCond, Equal); michael@0: // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, michael@0: // the result will still be a negative number michael@0: ma_rsb(output, Imm32(0), output, SetCond); michael@0: michael@0: // If the result looks non-negative, then this value didn't actually fit into michael@0: // the int range, and special handling is required, or it was zero, which means michael@0: // the result is actually -0.0 which also requires special handling. michael@0: ma_b(bail, NotSigned); michael@0: michael@0: bind(&fin); michael@0: } michael@0: michael@0: CodeOffsetJump michael@0: MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel *label, Condition cond) michael@0: { michael@0: ARMBuffer::PoolEntry pe; michael@0: BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond); michael@0: // Fill in a new CodeOffset with both the load and the michael@0: // pool entry that the instruction loads from. michael@0: CodeOffsetJump ret(bo.getOffset(), pe.encode()); michael@0: return ret; michael@0: } michael@0: michael@0: #ifdef JSGC_GENERATIONAL michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::branchPtrInNurseryRange(Register ptr, Register temp, Label *label) michael@0: { michael@0: JS_ASSERT(ptr != temp); michael@0: JS_ASSERT(ptr != secondScratchReg_); michael@0: michael@0: const Nursery &nursery = GetIonContext()->runtime->gcNursery(); michael@0: uintptr_t startChunk = nursery.start() >> Nursery::ChunkShift; michael@0: michael@0: ma_mov(Imm32(startChunk), secondScratchReg_); michael@0: as_rsb(secondScratchReg_, secondScratchReg_, lsr(ptr, Nursery::ChunkShift)); michael@0: branch32(Assembler::Below, secondScratchReg_, Imm32(Nursery::NumNurseryChunks), label); michael@0: } michael@0: michael@0: void michael@0: MacroAssemblerARMCompat::branchValueIsNurseryObject(ValueOperand value, Register temp, Label *label) michael@0: { michael@0: Label done; michael@0: michael@0: branchTestObject(Assembler::NotEqual, value, &done); michael@0: branchPtrInNurseryRange(value.payloadReg(), temp, label); michael@0: michael@0: bind(&done); michael@0: } michael@0: michael@0: #endif