js/src/jit/arm/MacroAssembler-arm.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/arm/MacroAssembler-arm.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1612 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#ifndef jit_arm_MacroAssembler_arm_h
    1.11 +#define jit_arm_MacroAssembler_arm_h
    1.12 +
    1.13 +#include "mozilla/DebugOnly.h"
    1.14 +
    1.15 +#include "jsopcode.h"
    1.16 +
    1.17 +#include "jit/arm/Assembler-arm.h"
    1.18 +#include "jit/IonCaches.h"
    1.19 +#include "jit/IonFrames.h"
    1.20 +#include "jit/MoveResolver.h"
    1.21 +
    1.22 +using mozilla::DebugOnly;
    1.23 +
    1.24 +namespace js {
    1.25 +namespace jit {
    1.26 +
    1.27 +static Register CallReg = ip;
    1.28 +static const int defaultShift = 3;
    1.29 +JS_STATIC_ASSERT(1 << defaultShift == sizeof(jsval));
    1.30 +
    1.31 +// MacroAssemblerARM is inheriting form Assembler defined in Assembler-arm.{h,cpp}
    1.32 +class MacroAssemblerARM : public Assembler
    1.33 +{
    1.34 +  protected:
    1.35 +    // On ARM, some instructions require a second scratch register. This register
    1.36 +    // defaults to lr, since it's non-allocatable (as it can be clobbered by some
    1.37 +    // instructions). Allow the baseline compiler to override this though, since
    1.38 +    // baseline IC stubs rely on lr holding the return address.
    1.39 +    Register secondScratchReg_;
    1.40 +
    1.41 +    // higher level tag testing code
    1.42 +    Operand ToPayload(Operand base) {
    1.43 +        return Operand(Register::FromCode(base.base()), base.disp());
    1.44 +    }
    1.45 +    Address ToPayload(Address base) {
    1.46 +        return ToPayload(Operand(base)).toAddress();
    1.47 +    }
    1.48 +    Operand ToType(Operand base) {
    1.49 +        return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void *));
    1.50 +    }
    1.51 +    Address ToType(Address base) {
    1.52 +        return ToType(Operand(base)).toAddress();
    1.53 +    }
    1.54 +
    1.55 +  public:
    1.56 +    MacroAssemblerARM()
    1.57 +      : secondScratchReg_(lr)
    1.58 +    { }
    1.59 +
    1.60 +    void setSecondScratchReg(Register reg) {
    1.61 +        JS_ASSERT(reg != ScratchRegister);
    1.62 +        secondScratchReg_ = reg;
    1.63 +    }
    1.64 +
    1.65 +    void convertBoolToInt32(Register source, Register dest);
    1.66 +    void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
    1.67 +    void convertInt32ToDouble(const Address &src, FloatRegister dest);
    1.68 +    void convertUInt32ToFloat32(const Register &src, const FloatRegister &dest);
    1.69 +    void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
    1.70 +    void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest,
    1.71 +                                Condition c = Always);
    1.72 +    void branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail);
    1.73 +    void convertDoubleToInt32(const FloatRegister &src, const Register &dest, Label *fail,
    1.74 +                              bool negativeZeroCheck = true);
    1.75 +    void convertFloat32ToInt32(const FloatRegister &src, const Register &dest, Label *fail,
    1.76 +                               bool negativeZeroCheck = true);
    1.77 +
    1.78 +    void convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest);
    1.79 +    void branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail);
    1.80 +    void convertInt32ToFloat32(const Register &src, const FloatRegister &dest);
    1.81 +    void convertInt32ToFloat32(const Address &src, FloatRegister dest);
    1.82 +
    1.83 +    void addDouble(FloatRegister src, FloatRegister dest);
    1.84 +    void subDouble(FloatRegister src, FloatRegister dest);
    1.85 +    void mulDouble(FloatRegister src, FloatRegister dest);
    1.86 +    void divDouble(FloatRegister src, FloatRegister dest);
    1.87 +
    1.88 +    void negateDouble(FloatRegister reg);
    1.89 +    void inc64(AbsoluteAddress dest);
    1.90 +
    1.91 +    // somewhat direct wrappers for the low-level assembler funcitons
    1.92 +    // bitops
    1.93 +    // attempt to encode a virtual alu instruction using
    1.94 +    // two real instructions.
    1.95 +  private:
    1.96 +    bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
    1.97 +                 SetCond_ sc, Condition c);
    1.98 +
    1.99 +  public:
   1.100 +    void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
   1.101 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.102 +    void ma_alu(Register src1, Imm32 imm, Register dest,
   1.103 +                ALUOp op,
   1.104 +                SetCond_ sc =  NoSetCond, Condition c = Always);
   1.105 +
   1.106 +    void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
   1.107 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.108 +    void ma_nop();
   1.109 +    void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
   1.110 +                         RelocStyle rs, Instruction *i = nullptr);
   1.111 +    void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
   1.112 +                         RelocStyle rs, Instruction *i = nullptr);
   1.113 +    // These should likely be wrapped up as a set of macros
   1.114 +    // or something like that.  I cannot think of a good reason
   1.115 +    // to explicitly have all of this code.
   1.116 +    // ALU based ops
   1.117 +    // mov
   1.118 +    void ma_mov(Register src, Register dest,
   1.119 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.120 +
   1.121 +    void ma_mov(Imm32 imm, Register dest,
   1.122 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.123 +    void ma_mov(ImmWord imm, Register dest,
   1.124 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.125 +
   1.126 +    void ma_mov(const ImmGCPtr &ptr, Register dest);
   1.127 +
   1.128 +    // Shifts (just a move with a shifting op2)
   1.129 +    void ma_lsl(Imm32 shift, Register src, Register dst);
   1.130 +    void ma_lsr(Imm32 shift, Register src, Register dst);
   1.131 +    void ma_asr(Imm32 shift, Register src, Register dst);
   1.132 +    void ma_ror(Imm32 shift, Register src, Register dst);
   1.133 +    void ma_rol(Imm32 shift, Register src, Register dst);
   1.134 +    // Shifts (just a move with a shifting op2)
   1.135 +    void ma_lsl(Register shift, Register src, Register dst);
   1.136 +    void ma_lsr(Register shift, Register src, Register dst);
   1.137 +    void ma_asr(Register shift, Register src, Register dst);
   1.138 +    void ma_ror(Register shift, Register src, Register dst);
   1.139 +    void ma_rol(Register shift, Register src, Register dst);
   1.140 +
   1.141 +    // Move not (dest <- ~src)
   1.142 +    void ma_mvn(Imm32 imm, Register dest,
   1.143 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.144 +
   1.145 +
   1.146 +    void ma_mvn(Register src1, Register dest,
   1.147 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.148 +
   1.149 +    // Negate (dest <- -src) implemented as rsb dest, src, 0
   1.150 +    void ma_neg(Register src, Register dest,
   1.151 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.152 +
   1.153 +    // and
   1.154 +    void ma_and(Register src, Register dest,
   1.155 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.156 +
   1.157 +    void ma_and(Register src1, Register src2, Register dest,
   1.158 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.159 +
   1.160 +    void ma_and(Imm32 imm, Register dest,
   1.161 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.162 +
   1.163 +    void ma_and(Imm32 imm, Register src1, Register dest,
   1.164 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.165 +
   1.166 +
   1.167 +
   1.168 +    // bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
   1.169 +    void ma_bic(Imm32 imm, Register dest,
   1.170 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.171 +
   1.172 +    // exclusive or
   1.173 +    void ma_eor(Register src, Register dest,
   1.174 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.175 +
   1.176 +    void ma_eor(Register src1, Register src2, Register dest,
   1.177 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.178 +
   1.179 +    void ma_eor(Imm32 imm, Register dest,
   1.180 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.181 +
   1.182 +    void ma_eor(Imm32 imm, Register src1, Register dest,
   1.183 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.184 +
   1.185 +
   1.186 +    // or
   1.187 +    void ma_orr(Register src, Register dest,
   1.188 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.189 +
   1.190 +    void ma_orr(Register src1, Register src2, Register dest,
   1.191 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.192 +
   1.193 +    void ma_orr(Imm32 imm, Register dest,
   1.194 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.195 +
   1.196 +    void ma_orr(Imm32 imm, Register src1, Register dest,
   1.197 +                SetCond_ sc = NoSetCond, Condition c = Always);
   1.198 +
   1.199 +
   1.200 +    // arithmetic based ops
   1.201 +    // add with carry
   1.202 +    void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.203 +    void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.204 +    void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.205 +
   1.206 +    // add
   1.207 +    void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.208 +    void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.209 +    void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.210 +    void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.211 +    void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.212 +
   1.213 +    // subtract with carry
   1.214 +    void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.215 +    void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.216 +    void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.217 +
   1.218 +    // subtract
   1.219 +    void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.220 +    void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.221 +    void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.222 +    void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.223 +    void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.224 +
   1.225 +    // reverse subtract
   1.226 +    void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.227 +    void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.228 +    void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.229 +    void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.230 +
   1.231 +    // reverse subtract with carry
   1.232 +    void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.233 +    void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.234 +    void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
   1.235 +
   1.236 +    // compares/tests
   1.237 +    // compare negative (sets condition codes as src1 + src2 would)
   1.238 +    void ma_cmn(Register src1, Imm32 imm, Condition c = Always);
   1.239 +    void ma_cmn(Register src1, Register src2, Condition c = Always);
   1.240 +    void ma_cmn(Register src1, Operand op, Condition c = Always);
   1.241 +
   1.242 +    // compare (src - src2)
   1.243 +    void ma_cmp(Register src1, Imm32 imm, Condition c = Always);
   1.244 +    void ma_cmp(Register src1, ImmWord ptr, Condition c = Always);
   1.245 +    void ma_cmp(Register src1, ImmGCPtr ptr, Condition c = Always);
   1.246 +    void ma_cmp(Register src1, Operand op, Condition c = Always);
   1.247 +    void ma_cmp(Register src1, Register src2, Condition c = Always);
   1.248 +
   1.249 +
   1.250 +    // test for equality, (src1^src2)
   1.251 +    void ma_teq(Register src1, Imm32 imm, Condition c = Always);
   1.252 +    void ma_teq(Register src1, Register src2, Condition c = Always);
   1.253 +    void ma_teq(Register src1, Operand op, Condition c = Always);
   1.254 +
   1.255 +
   1.256 +    // test (src1 & src2)
   1.257 +    void ma_tst(Register src1, Imm32 imm, Condition c = Always);
   1.258 +    void ma_tst(Register src1, Register src2, Condition c = Always);
   1.259 +    void ma_tst(Register src1, Operand op, Condition c = Always);
   1.260 +
   1.261 +    // multiplies.  For now, there are only two that we care about.
   1.262 +    void ma_mul(Register src1, Register src2, Register dest);
   1.263 +    void ma_mul(Register src1, Imm32 imm, Register dest);
   1.264 +    Condition ma_check_mul(Register src1, Register src2, Register dest, Condition cond);
   1.265 +    Condition ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond);
   1.266 +
   1.267 +    // fast mod, uses scratch registers, and thus needs to be in the assembler
   1.268 +    // implicitly assumes that we can overwrite dest at the beginning of the sequence
   1.269 +    void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
   1.270 +                     int32_t shift);
   1.271 +
   1.272 +    // mod, depends on integer divide instructions being supported
   1.273 +    void ma_smod(Register num, Register div, Register dest);
   1.274 +    void ma_umod(Register num, Register div, Register dest);
   1.275 +
   1.276 +    // division, depends on integer divide instructions being supported
   1.277 +    void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always);
   1.278 +    void ma_udiv(Register num, Register div, Register dest, Condition cond = Always);
   1.279 +
   1.280 +    // memory
   1.281 +    // shortcut for when we know we're transferring 32 bits of data
   1.282 +    void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
   1.283 +                Index mode = Offset, Condition cc = Always);
   1.284 +
   1.285 +    void ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
   1.286 +                Index mode = Offset, Condition cc = Always);
   1.287 +
   1.288 +
   1.289 +    void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
   1.290 +    void ma_str(Register rt, const Operand &addr, Index mode = Offset, Condition cc = Always);
   1.291 +    void ma_dtr(LoadStore ls, Register rt, const Operand &addr, Index mode, Condition cc);
   1.292 +
   1.293 +    void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
   1.294 +    void ma_ldr(const Operand &addr, Register rt, Index mode = Offset, Condition cc = Always);
   1.295 +
   1.296 +    void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
   1.297 +    void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
   1.298 +    void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
   1.299 +    void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
   1.300 +    void ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode = Offset, Condition cc = Always);
   1.301 +    void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
   1.302 +    void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
   1.303 +    void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
   1.304 +    // specialty for moving N bits of data, where n == 8,16,32,64
   1.305 +    BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
   1.306 +                          Register rn, Register rm, Register rt,
   1.307 +                          Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
   1.308 +
   1.309 +    BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
   1.310 +                          Register rn, Imm32 offset, Register rt,
   1.311 +                          Index mode = Offset, Condition cc = Always);
   1.312 +    void ma_pop(Register r);
   1.313 +    void ma_push(Register r);
   1.314 +
   1.315 +    void ma_vpop(VFPRegister r);
   1.316 +    void ma_vpush(VFPRegister r);
   1.317 +
   1.318 +    // branches when done from within arm-specific code
   1.319 +    BufferOffset ma_b(Label *dest, Condition c = Always, bool isPatchable = false);
   1.320 +    void ma_bx(Register dest, Condition c = Always);
   1.321 +
   1.322 +    void ma_b(void *target, Relocation::Kind reloc, Condition c = Always);
   1.323 +
   1.324 +    // this is almost NEVER necessary, we'll basically never be calling a label
   1.325 +    // except, possibly in the crazy bailout-table case.
   1.326 +    void ma_bl(Label *dest, Condition c = Always);
   1.327 +
   1.328 +    void ma_blx(Register dest, Condition c = Always);
   1.329 +
   1.330 +    //VFP/ALU
   1.331 +    void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.332 +    void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.333 +
   1.334 +    void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.335 +    void ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.336 +
   1.337 +    void ma_vneg(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.338 +    void ma_vmov(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.339 +    void ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.340 +    void ma_vabs(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.341 +    void ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.342 +
   1.343 +    void ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.344 +    void ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.345 +
   1.346 +    void ma_vimm(double value, FloatRegister dest, Condition cc = Always);
   1.347 +    void ma_vimm_f32(float value, FloatRegister dest, Condition cc = Always);
   1.348 +
   1.349 +    void ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc = Always);
   1.350 +    void ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc = Always);
   1.351 +    void ma_vcmpz(FloatRegister src1, Condition cc = Always);
   1.352 +    void ma_vcmpz_f32(FloatRegister src1, Condition cc = Always);
   1.353 +
   1.354 +    void ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.355 +    void ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.356 +
   1.357 +    void ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.358 +    void ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
   1.359 +
   1.360 +    void ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.361 +
   1.362 +    // source is F64, dest is I32
   1.363 +    void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.364 +    void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.365 +
   1.366 +    // source is I32, dest is F64
   1.367 +    void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.368 +    void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.369 +
   1.370 +    // source is F32, dest is I32
   1.371 +    void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.372 +    void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.373 +
   1.374 +    // source is I32, dest is F32
   1.375 +    void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.376 +    void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
   1.377 +
   1.378 +    void ma_vxfer(FloatRegister src, Register dest, Condition cc = Always);
   1.379 +    void ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc = Always);
   1.380 +
   1.381 +    void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
   1.382 +    void ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc = Always);
   1.383 +
   1.384 +    void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
   1.385 +
   1.386 +    BufferOffset ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister dest, Condition cc = Always);
   1.387 +
   1.388 +
   1.389 +    BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
   1.390 +    BufferOffset ma_vldr(const Operand &addr, VFPRegister dest, Condition cc = Always);
   1.391 +    BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
   1.392 +
   1.393 +    BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
   1.394 +    BufferOffset ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always);
   1.395 +
   1.396 +    BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
   1.397 +    // calls an Ion function, assumes that the stack is untouched (8 byte alinged)
   1.398 +    void ma_callIon(const Register reg);
   1.399 +    // callso an Ion function, assuming that sp has already been decremented
   1.400 +    void ma_callIonNoPush(const Register reg);
   1.401 +    // calls an ion function, assuming that the stack is currently not 8 byte aligned
   1.402 +    void ma_callIonHalfPush(const Register reg);
   1.403 +
   1.404 +    void ma_call(ImmPtr dest);
   1.405 +
   1.406 +    // calls reg, storing the return address into sp[0]
   1.407 +    void ma_callAndStoreRet(const Register reg, uint32_t stackArgBytes);
   1.408 +
   1.409 +    // Float registers can only be loaded/stored in continuous runs
   1.410 +    // when using vstm/vldm.
   1.411 +    // This function breaks set into continuous runs and loads/stores
   1.412 +    // them at [rm]. rm will be modified and left in a state logically
   1.413 +    // suitable for the next load/store.
   1.414 +    // Returns the offset from [dm] for the logical next load/store.
   1.415 +    int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
   1.416 +                                   Register rm, DTMMode mode)
   1.417 +    {
   1.418 +        if (mode == IA) {
   1.419 +            return transferMultipleByRunsImpl
   1.420 +                <FloatRegisterForwardIterator>(set, ls, rm, mode, 1);
   1.421 +        }
   1.422 +        if (mode == DB) {
   1.423 +            return transferMultipleByRunsImpl
   1.424 +                <FloatRegisterBackwardIterator>(set, ls, rm, mode, -1);
   1.425 +        }
   1.426 +        MOZ_ASSUME_UNREACHABLE("Invalid data transfer addressing mode");
   1.427 +    }
   1.428 +
   1.429 +private:
   1.430 +    // Implementation for transferMultipleByRuns so we can use different
   1.431 +    // iterators for forward/backward traversals.
   1.432 +    // The sign argument should be 1 if we traverse forwards, -1 if we
   1.433 +    // traverse backwards.
   1.434 +    template<typename RegisterIterator> int32_t
   1.435 +    transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
   1.436 +                               Register rm, DTMMode mode, int32_t sign)
   1.437 +    {
   1.438 +        JS_ASSERT(sign == 1 || sign == -1);
   1.439 +
   1.440 +        int32_t delta = sign * sizeof(double);
   1.441 +        int32_t offset = 0;
   1.442 +        RegisterIterator iter(set);
   1.443 +        while (iter.more()) {
   1.444 +            startFloatTransferM(ls, rm, mode, WriteBack);
   1.445 +            int32_t reg = (*iter).code_;
   1.446 +            do {
   1.447 +                offset += delta;
   1.448 +                transferFloatReg(*iter);
   1.449 +            } while ((++iter).more() && (*iter).code_ == (reg += sign));
   1.450 +            finishFloatTransfer();
   1.451 +        }
   1.452 +
   1.453 +        JS_ASSERT(offset == static_cast<int32_t>(set.size() * sizeof(double)) * sign);
   1.454 +        return offset;
   1.455 +    }
   1.456 +};
   1.457 +
   1.458 +class MacroAssemblerARMCompat : public MacroAssemblerARM
   1.459 +{
   1.460 +    // Number of bytes the stack is adjusted inside a call to C. Calls to C may
   1.461 +    // not be nested.
   1.462 +    bool inCall_;
   1.463 +    uint32_t args_;
   1.464 +    // The actual number of arguments that were passed, used to assert that
   1.465 +    // the initial number of arguments declared was correct.
   1.466 +    uint32_t passedArgs_;
   1.467 +    uint32_t passedArgTypes_;
   1.468 +
   1.469 +    // ARM treats arguments as a vector in registers/memory, that looks like:
   1.470 +    // { r0, r1, r2, r3, [sp], [sp,+4], [sp,+8] ... }
   1.471 +    // usedIntSlots_ keeps track of how many of these have been used.
   1.472 +    // It bears a passing resemblance to passedArgs_, but a single argument
   1.473 +    // can effectively use between one and three slots depending on its size and
   1.474 +    // alignment requirements
   1.475 +    uint32_t usedIntSlots_;
   1.476 +#if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
   1.477 +    uint32_t usedFloatSlots_;
   1.478 +    bool usedFloat32_;
   1.479 +    uint32_t padding_;
   1.480 +#endif
   1.481 +    bool dynamicAlignment_;
   1.482 +
   1.483 +    bool enoughMemory_;
   1.484 +
   1.485 +    // Used to work around the move resolver's lack of support for
   1.486 +    // moving into register pairs, which the softfp ABI needs.
   1.487 +    mozilla::Array<MoveOperand, 2> floatArgsInGPR;
   1.488 +    mozilla::Array<bool, 2> floatArgsInGPRValid;
   1.489 +
   1.490 +    // Compute space needed for the function call and set the properties of the
   1.491 +    // callee.  It returns the space which has to be allocated for calling the
   1.492 +    // function.
   1.493 +    //
   1.494 +    // arg            Number of arguments of the function.
   1.495 +    void setupABICall(uint32_t arg);
   1.496 +
   1.497 +  protected:
   1.498 +    MoveResolver moveResolver_;
   1.499 +
   1.500 +    // Extra bytes currently pushed onto the frame beyond frameDepth_. This is
   1.501 +    // needed to compute offsets to stack slots while temporary space has been
   1.502 +    // reserved for unexpected spills or C++ function calls. It is maintained
   1.503 +    // by functions which track stack alignment, which for clear distinction
   1.504 +    // use StudlyCaps (for example, Push, Pop).
   1.505 +    uint32_t framePushed_;
   1.506 +    void adjustFrame(int value) {
   1.507 +        setFramePushed(framePushed_ + value);
   1.508 +    }
   1.509 +  public:
   1.510 +    MacroAssemblerARMCompat()
   1.511 +      : inCall_(false),
   1.512 +        enoughMemory_(true),
   1.513 +        framePushed_(0)
   1.514 +    { }
   1.515 +    bool oom() const {
   1.516 +        return Assembler::oom() || !enoughMemory_;
   1.517 +    }
   1.518 +
   1.519 +  public:
   1.520 +    using MacroAssemblerARM::call;
   1.521 +
   1.522 +    // jumps + other functions that should be called from
   1.523 +    // non-arm specific code...
   1.524 +    // basically, an x86 front end on top of the ARM code.
   1.525 +    void j(Condition code , Label *dest)
   1.526 +    {
   1.527 +        as_b(dest, code);
   1.528 +    }
   1.529 +    void j(Label *dest)
   1.530 +    {
   1.531 +        as_b(dest, Always);
   1.532 +    }
   1.533 +
   1.534 +    void mov(Register src, Register dest) {
   1.535 +        ma_mov(src, dest);
   1.536 +    }
   1.537 +    void mov(ImmWord imm, Register dest) {
   1.538 +        ma_mov(Imm32(imm.value), dest);
   1.539 +    }
   1.540 +    void mov(ImmPtr imm, Register dest) {
   1.541 +        mov(ImmWord(uintptr_t(imm.value)), dest);
   1.542 +    }
   1.543 +    void mov(Register src, Address dest) {
   1.544 +        MOZ_ASSUME_UNREACHABLE("NYI-IC");
   1.545 +    }
   1.546 +    void mov(Address src, Register dest) {
   1.547 +        MOZ_ASSUME_UNREACHABLE("NYI-IC");
   1.548 +    }
   1.549 +
   1.550 +    void call(const Register reg) {
   1.551 +        as_blx(reg);
   1.552 +    }
   1.553 +    void call(Label *label) {
   1.554 +        // for now, assume that it'll be nearby?
   1.555 +        as_bl(label, Always);
   1.556 +    }
   1.557 +    void call(ImmWord imm) {
   1.558 +        call(ImmPtr((void*)imm.value));
   1.559 +    }
   1.560 +    void call(ImmPtr imm) {
   1.561 +        BufferOffset bo = m_buffer.nextOffset();
   1.562 +        addPendingJump(bo, imm, Relocation::HARDCODED);
   1.563 +        ma_call(imm);
   1.564 +    }
   1.565 +    void call(AsmJSImmPtr imm) {
   1.566 +        movePtr(imm, CallReg);
   1.567 +        call(CallReg);
   1.568 +    }
   1.569 +    void call(JitCode *c) {
   1.570 +        BufferOffset bo = m_buffer.nextOffset();
   1.571 +        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
   1.572 +        RelocStyle rs;
   1.573 +        if (hasMOVWT())
   1.574 +            rs = L_MOVWT;
   1.575 +        else
   1.576 +            rs = L_LDR;
   1.577 +
   1.578 +        ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
   1.579 +        ma_callIonHalfPush(ScratchRegister);
   1.580 +    }
   1.581 +
   1.582 +    void appendCallSite(const CallSiteDesc &desc) {
   1.583 +        enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_));
   1.584 +    }
   1.585 +
   1.586 +    void call(const CallSiteDesc &desc, const Register reg) {
   1.587 +        call(reg);
   1.588 +        appendCallSite(desc);
   1.589 +    }
   1.590 +    void call(const CallSiteDesc &desc, Label *label) {
   1.591 +        call(label);
   1.592 +        appendCallSite(desc);
   1.593 +    }
   1.594 +    void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
   1.595 +        call(imm);
   1.596 +        appendCallSite(desc);
   1.597 +    }
   1.598 +    void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) {
   1.599 +        movePtr(imm, CallReg);
   1.600 +        ma_callAndStoreRet(CallReg, stackArgBytes);
   1.601 +        appendCallSite(CallSiteDesc::Exit());
   1.602 +    }
   1.603 +    void callIonFromAsmJS(const Register reg) {
   1.604 +        ma_callIonNoPush(reg);
   1.605 +        appendCallSite(CallSiteDesc::Exit());
   1.606 +
   1.607 +        // The Ion ABI has the callee pop the return address off the stack.
   1.608 +        // The asm.js caller assumes that the call leaves sp unchanged, so bump
   1.609 +        // the stack.
   1.610 +        subPtr(Imm32(sizeof(void*)), sp);
   1.611 +    }
   1.612 +
   1.613 +    void branch(JitCode *c) {
   1.614 +        BufferOffset bo = m_buffer.nextOffset();
   1.615 +        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
   1.616 +        RelocStyle rs;
   1.617 +        if (hasMOVWT())
   1.618 +            rs = L_MOVWT;
   1.619 +        else
   1.620 +            rs = L_LDR;
   1.621 +
   1.622 +        ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
   1.623 +        ma_bx(ScratchRegister);
   1.624 +    }
   1.625 +    void branch(const Register reg) {
   1.626 +        ma_bx(reg);
   1.627 +    }
   1.628 +    void nop() {
   1.629 +        ma_nop();
   1.630 +    }
   1.631 +    void ret() {
   1.632 +        ma_pop(pc);
   1.633 +        m_buffer.markGuard();
   1.634 +    }
   1.635 +    void retn(Imm32 n) {
   1.636 +        // pc <- [sp]; sp += n
   1.637 +        ma_dtr(IsLoad, sp, n, pc, PostIndex);
   1.638 +        m_buffer.markGuard();
   1.639 +    }
   1.640 +    void push(Imm32 imm) {
   1.641 +        ma_mov(imm, ScratchRegister);
   1.642 +        ma_push(ScratchRegister);
   1.643 +    }
   1.644 +    void push(ImmWord imm) {
   1.645 +        push(Imm32(imm.value));
   1.646 +    }
   1.647 +    void push(ImmGCPtr imm) {
   1.648 +        ma_mov(imm, ScratchRegister);
   1.649 +        ma_push(ScratchRegister);
   1.650 +    }
   1.651 +    void push(const Address &address) {
   1.652 +        ma_ldr(Operand(address.base, address.offset), ScratchRegister);
   1.653 +        ma_push(ScratchRegister);
   1.654 +    }
   1.655 +    void push(const Register &reg) {
   1.656 +        ma_push(reg);
   1.657 +    }
   1.658 +    void push(const FloatRegister &reg) {
   1.659 +        ma_vpush(VFPRegister(reg));
   1.660 +    }
   1.661 +    void pushWithPadding(const Register &reg, const Imm32 extraSpace) {
   1.662 +        Imm32 totSpace = Imm32(extraSpace.value + 4);
   1.663 +        ma_dtr(IsStore, sp, totSpace, reg, PreIndex);
   1.664 +    }
   1.665 +    void pushWithPadding(const Imm32 &imm, const Imm32 extraSpace) {
   1.666 +        Imm32 totSpace = Imm32(extraSpace.value + 4);
   1.667 +        // ma_dtr may need the scratch register to adjust the stack, so use the
   1.668 +        // second scratch register.
   1.669 +        ma_mov(imm, secondScratchReg_);
   1.670 +        ma_dtr(IsStore, sp, totSpace, secondScratchReg_, PreIndex);
   1.671 +    }
   1.672 +
   1.673 +    void pop(const Register &reg) {
   1.674 +        ma_pop(reg);
   1.675 +    }
   1.676 +    void pop(const FloatRegister &reg) {
   1.677 +        ma_vpop(VFPRegister(reg));
   1.678 +    }
   1.679 +
   1.680 +    void popN(const Register &reg, Imm32 extraSpace) {
   1.681 +        Imm32 totSpace = Imm32(extraSpace.value + 4);
   1.682 +        ma_dtr(IsLoad, sp, totSpace, reg, PostIndex);
   1.683 +    }
   1.684 +
   1.685 +    CodeOffsetLabel toggledJump(Label *label);
   1.686 +
   1.687 +    // Emit a BLX or NOP instruction. ToggleCall can be used to patch
   1.688 +    // this instruction.
   1.689 +    CodeOffsetLabel toggledCall(JitCode *target, bool enabled);
   1.690 +
   1.691 +    static size_t ToggledCallSize() {
   1.692 +        if (hasMOVWT())
   1.693 +            // Size of a movw, movt, nop/blx instruction.
   1.694 +            return 12;
   1.695 +        // Size of a ldr, nop/blx instruction
   1.696 +        return 8;
   1.697 +    }
   1.698 +
   1.699 +    CodeOffsetLabel pushWithPatch(ImmWord imm) {
   1.700 +        CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
   1.701 +        ma_push(ScratchRegister);
   1.702 +        return label;
   1.703 +    }
   1.704 +
   1.705 +    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
   1.706 +        CodeOffsetLabel label = currentOffset();
   1.707 +        ma_movPatchable(Imm32(imm.value), dest, Always, hasMOVWT() ? L_MOVWT : L_LDR);
   1.708 +        return label;
   1.709 +    }
   1.710 +    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
   1.711 +        return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
   1.712 +    }
   1.713 +
   1.714 +    void jump(Label *label) {
   1.715 +        as_b(label);
   1.716 +    }
   1.717 +    void jump(Register reg) {
   1.718 +        ma_bx(reg);
   1.719 +    }
   1.720 +    void jump(const Address &address) {
   1.721 +        ma_ldr(Operand(address.base, address.offset), ScratchRegister);
   1.722 +        ma_bx(ScratchRegister);
   1.723 +    }
   1.724 +
   1.725 +    void neg32(Register reg) {
   1.726 +        ma_neg(reg, reg, SetCond);
   1.727 +    }
   1.728 +    void negl(Register reg) {
   1.729 +        ma_neg(reg, reg, SetCond);
   1.730 +    }
   1.731 +    void test32(Register lhs, Register rhs) {
   1.732 +        ma_tst(lhs, rhs);
   1.733 +    }
   1.734 +    void test32(Register lhs, Imm32 imm) {
   1.735 +        ma_tst(lhs, imm);
   1.736 +    }
   1.737 +    void test32(const Address &address, Imm32 imm) {
   1.738 +        ma_ldr(Operand(address.base, address.offset), ScratchRegister);
   1.739 +        ma_tst(ScratchRegister, imm);
   1.740 +    }
   1.741 +    void testPtr(Register lhs, Register rhs) {
   1.742 +        test32(lhs, rhs);
   1.743 +    }
   1.744 +
   1.745 +    // Returns the register containing the type tag.
   1.746 +    Register splitTagForTest(const ValueOperand &value) {
   1.747 +        return value.typeReg();
   1.748 +    }
   1.749 +
   1.750 +    // higher level tag testing code
   1.751 +    Condition testInt32(Condition cond, const ValueOperand &value);
   1.752 +    Condition testBoolean(Condition cond, const ValueOperand &value);
   1.753 +    Condition testDouble(Condition cond, const ValueOperand &value);
   1.754 +    Condition testNull(Condition cond, const ValueOperand &value);
   1.755 +    Condition testUndefined(Condition cond, const ValueOperand &value);
   1.756 +    Condition testString(Condition cond, const ValueOperand &value);
   1.757 +    Condition testObject(Condition cond, const ValueOperand &value);
   1.758 +    Condition testNumber(Condition cond, const ValueOperand &value);
   1.759 +    Condition testMagic(Condition cond, const ValueOperand &value);
   1.760 +
   1.761 +    Condition testPrimitive(Condition cond, const ValueOperand &value);
   1.762 +
   1.763 +    // register-based tests
   1.764 +    Condition testInt32(Condition cond, const Register &tag);
   1.765 +    Condition testBoolean(Condition cond, const Register &tag);
   1.766 +    Condition testNull(Condition cond, const Register &tag);
   1.767 +    Condition testUndefined(Condition cond, const Register &tag);
   1.768 +    Condition testString(Condition cond, const Register &tag);
   1.769 +    Condition testObject(Condition cond, const Register &tag);
   1.770 +    Condition testDouble(Condition cond, const Register &tag);
   1.771 +    Condition testNumber(Condition cond, const Register &tag);
   1.772 +    Condition testMagic(Condition cond, const Register &tag);
   1.773 +    Condition testPrimitive(Condition cond, const Register &tag);
   1.774 +
   1.775 +    Condition testGCThing(Condition cond, const Address &address);
   1.776 +    Condition testMagic(Condition cond, const Address &address);
   1.777 +    Condition testInt32(Condition cond, const Address &address);
   1.778 +    Condition testDouble(Condition cond, const Address &address);
   1.779 +    Condition testBoolean(Condition cond, const Address &address);
   1.780 +    Condition testNull(Condition cond, const Address &address);
   1.781 +    Condition testUndefined(Condition cond, const Address &address);
   1.782 +    Condition testString(Condition cond, const Address &address);
   1.783 +    Condition testObject(Condition cond, const Address &address);
   1.784 +    Condition testNumber(Condition cond, const Address &address);
   1.785 +
   1.786 +    Condition testUndefined(Condition cond, const BaseIndex &src);
   1.787 +    Condition testNull(Condition cond, const BaseIndex &src);
   1.788 +    Condition testBoolean(Condition cond, const BaseIndex &src);
   1.789 +    Condition testString(Condition cond, const BaseIndex &src);
   1.790 +    Condition testInt32(Condition cond, const BaseIndex &src);
   1.791 +    Condition testObject(Condition cond, const BaseIndex &src);
   1.792 +    Condition testDouble(Condition cond, const BaseIndex &src);
   1.793 +    Condition testMagic(Condition cond, const BaseIndex &src);
   1.794 +    Condition testGCThing(Condition cond, const BaseIndex &src);
   1.795 +
   1.796 +    template <typename T>
   1.797 +    void branchTestGCThing(Condition cond, const T &t, Label *label) {
   1.798 +        Condition c = testGCThing(cond, t);
   1.799 +        ma_b(label, c);
   1.800 +    }
   1.801 +    template <typename T>
   1.802 +    void branchTestPrimitive(Condition cond, const T &t, Label *label) {
   1.803 +        Condition c = testPrimitive(cond, t);
   1.804 +        ma_b(label, c);
   1.805 +    }
   1.806 +
   1.807 +    void branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label);
   1.808 +    void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value,
   1.809 +                         Label *label);
   1.810 +
   1.811 +    // unboxing code
   1.812 +    void unboxInt32(const ValueOperand &operand, const Register &dest);
   1.813 +    void unboxInt32(const Address &src, const Register &dest);
   1.814 +    void unboxBoolean(const ValueOperand &operand, const Register &dest);
   1.815 +    void unboxBoolean(const Address &src, const Register &dest);
   1.816 +    void unboxDouble(const ValueOperand &operand, const FloatRegister &dest);
   1.817 +    void unboxDouble(const Address &src, const FloatRegister &dest);
   1.818 +    void unboxString(const ValueOperand &operand, const Register &dest);
   1.819 +    void unboxString(const Address &src, const Register &dest);
   1.820 +    void unboxObject(const ValueOperand &src, const Register &dest);
   1.821 +    void unboxValue(const ValueOperand &src, AnyRegister dest);
   1.822 +    void unboxPrivate(const ValueOperand &src, Register dest);
   1.823 +
   1.824 +    void notBoolean(const ValueOperand &val) {
   1.825 +        ma_eor(Imm32(1), val.payloadReg());
   1.826 +    }
   1.827 +
   1.828 +    // boxing code
   1.829 +    void boxDouble(const FloatRegister &src, const ValueOperand &dest);
   1.830 +    void boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest);
   1.831 +
   1.832 +    // Extended unboxing API. If the payload is already in a register, returns
   1.833 +    // that register. Otherwise, provides a move to the given scratch register,
   1.834 +    // and returns that.
   1.835 +    Register extractObject(const Address &address, Register scratch);
   1.836 +    Register extractObject(const ValueOperand &value, Register scratch) {
   1.837 +        return value.payloadReg();
   1.838 +    }
   1.839 +    Register extractInt32(const ValueOperand &value, Register scratch) {
   1.840 +        return value.payloadReg();
   1.841 +    }
   1.842 +    Register extractBoolean(const ValueOperand &value, Register scratch) {
   1.843 +        return value.payloadReg();
   1.844 +    }
   1.845 +    Register extractTag(const Address &address, Register scratch);
   1.846 +    Register extractTag(const BaseIndex &address, Register scratch);
   1.847 +    Register extractTag(const ValueOperand &value, Register scratch) {
   1.848 +        return value.typeReg();
   1.849 +    }
   1.850 +
   1.851 +    void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
   1.852 +    void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
   1.853 +    void loadInt32OrDouble(const Operand &src, const FloatRegister &dest);
   1.854 +    void loadInt32OrDouble(Register base, Register index,
   1.855 +                           const FloatRegister &dest, int32_t shift = defaultShift);
   1.856 +    void loadConstantDouble(double dp, const FloatRegister &dest);
   1.857 +    // treat the value as a boolean, and set condition codes accordingly
   1.858 +    Condition testInt32Truthy(bool truthy, const ValueOperand &operand);
   1.859 +    Condition testBooleanTruthy(bool truthy, const ValueOperand &operand);
   1.860 +    Condition testDoubleTruthy(bool truthy, const FloatRegister &reg);
   1.861 +    Condition testStringTruthy(bool truthy, const ValueOperand &value);
   1.862 +
   1.863 +    void boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
   1.864 +    void int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
   1.865 +    void loadConstantFloat32(float f, const FloatRegister &dest);
   1.866 +
   1.867 +    template<typename T>
   1.868 +    void branchTestInt32(Condition cond, const T & t, Label *label) {
   1.869 +        Condition c = testInt32(cond, t);
   1.870 +        ma_b(label, c);
   1.871 +    }
   1.872 +    template<typename T>
   1.873 +    void branchTestBoolean(Condition cond, const T & t, Label *label) {
   1.874 +        Condition c = testBoolean(cond, t);
   1.875 +        ma_b(label, c);
   1.876 +    }
   1.877 +    void branch32(Condition cond, Register lhs, Register rhs, Label *label) {
   1.878 +        ma_cmp(lhs, rhs);
   1.879 +        ma_b(label, cond);
   1.880 +    }
   1.881 +    void branch32(Condition cond, Register lhs, Imm32 imm, Label *label) {
   1.882 +        ma_cmp(lhs, imm);
   1.883 +        ma_b(label, cond);
   1.884 +    }
   1.885 +    void branch32(Condition cond, const Operand &lhs, Register rhs, Label *label) {
   1.886 +        if (lhs.getTag() == Operand::OP2) {
   1.887 +            branch32(cond, lhs.toReg(), rhs, label);
   1.888 +        } else {
   1.889 +            ma_ldr(lhs, ScratchRegister);
   1.890 +            branch32(cond, ScratchRegister, rhs, label);
   1.891 +        }
   1.892 +    }
   1.893 +    void branch32(Condition cond, const Operand &lhs, Imm32 rhs, Label *label) {
   1.894 +        if (lhs.getTag() == Operand::OP2) {
   1.895 +            branch32(cond, lhs.toReg(), rhs, label);
   1.896 +        } else {
   1.897 +            // branch32 will use ScratchRegister.
   1.898 +            ma_ldr(lhs, secondScratchReg_);
   1.899 +            branch32(cond, secondScratchReg_, rhs, label);
   1.900 +        }
   1.901 +    }
   1.902 +    void branch32(Condition cond, const Address &lhs, Register rhs, Label *label) {
   1.903 +        load32(lhs, ScratchRegister);
   1.904 +        branch32(cond, ScratchRegister, rhs, label);
   1.905 +    }
   1.906 +    void branch32(Condition cond, const Address &lhs, Imm32 rhs, Label *label) {
   1.907 +        // branch32 will use ScratchRegister.
   1.908 +        load32(lhs, secondScratchReg_);
   1.909 +        branch32(cond, secondScratchReg_, rhs, label);
   1.910 +    }
   1.911 +    void branchPtr(Condition cond, const Address &lhs, Register rhs, Label *label) {
   1.912 +        branch32(cond, lhs, rhs, label);
   1.913 +    }
   1.914 +
   1.915 +    void branchPrivatePtr(Condition cond, const Address &lhs, ImmPtr ptr, Label *label) {
   1.916 +        branchPtr(cond, lhs, ptr, label);
   1.917 +    }
   1.918 +
   1.919 +    void branchPrivatePtr(Condition cond, const Address &lhs, Register ptr, Label *label) {
   1.920 +        branchPtr(cond, lhs, ptr, label);
   1.921 +    }
   1.922 +
   1.923 +    void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label *label) {
   1.924 +        branchPtr(cond, lhs, ptr, label);
   1.925 +    }
   1.926 +
   1.927 +    template<typename T>
   1.928 +    void branchTestDouble(Condition cond, const T & t, Label *label) {
   1.929 +        Condition c = testDouble(cond, t);
   1.930 +        ma_b(label, c);
   1.931 +    }
   1.932 +    template<typename T>
   1.933 +    void branchTestNull(Condition cond, const T & t, Label *label) {
   1.934 +        Condition c = testNull(cond, t);
   1.935 +        ma_b(label, c);
   1.936 +    }
   1.937 +    template<typename T>
   1.938 +    void branchTestObject(Condition cond, const T & t, Label *label) {
   1.939 +        Condition c = testObject(cond, t);
   1.940 +        ma_b(label, c);
   1.941 +    }
   1.942 +    template<typename T>
   1.943 +    void branchTestString(Condition cond, const T & t, Label *label) {
   1.944 +        Condition c = testString(cond, t);
   1.945 +        ma_b(label, c);
   1.946 +    }
   1.947 +    template<typename T>
   1.948 +    void branchTestUndefined(Condition cond, const T & t, Label *label) {
   1.949 +        Condition c = testUndefined(cond, t);
   1.950 +        ma_b(label, c);
   1.951 +    }
   1.952 +    template <typename T>
   1.953 +    void branchTestNumber(Condition cond, const T &t, Label *label) {
   1.954 +        cond = testNumber(cond, t);
   1.955 +        ma_b(label, cond);
   1.956 +    }
   1.957 +    template <typename T>
   1.958 +    void branchTestMagic(Condition cond, const T &t, Label *label) {
   1.959 +        cond = testMagic(cond, t);
   1.960 +        ma_b(label, cond);
   1.961 +    }
   1.962 +    void branchTestMagicValue(Condition cond, const ValueOperand &val, JSWhyMagic why,
   1.963 +                              Label *label) {
   1.964 +        JS_ASSERT(cond == Equal || cond == NotEqual);
   1.965 +        // Test for magic
   1.966 +        Label notmagic;
   1.967 +        Condition testCond = testMagic(cond, val);
   1.968 +        ma_b(&notmagic, InvertCondition(testCond));
   1.969 +        // Test magic value
   1.970 +        branch32(cond, val.payloadReg(), Imm32(static_cast<int32_t>(why)), label);
   1.971 +        bind(&notmagic);
   1.972 +    }
   1.973 +    void branchTestInt32Truthy(bool truthy, const ValueOperand &operand, Label *label) {
   1.974 +        Condition c = testInt32Truthy(truthy, operand);
   1.975 +        ma_b(label, c);
   1.976 +    }
   1.977 +    void branchTestBooleanTruthy(bool truthy, const ValueOperand &operand, Label *label) {
   1.978 +        Condition c = testBooleanTruthy(truthy, operand);
   1.979 +        ma_b(label, c);
   1.980 +    }
   1.981 +    void branchTestDoubleTruthy(bool truthy, const FloatRegister &reg, Label *label) {
   1.982 +        Condition c = testDoubleTruthy(truthy, reg);
   1.983 +        ma_b(label, c);
   1.984 +    }
   1.985 +    void branchTestStringTruthy(bool truthy, const ValueOperand &value, Label *label) {
   1.986 +        Condition c = testStringTruthy(truthy, value);
   1.987 +        ma_b(label, c);
   1.988 +    }
   1.989 +    void branchTest32(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
   1.990 +        // x86 likes test foo, foo rather than cmp foo, #0.
   1.991 +        // Convert the former into the latter.
   1.992 +        if (lhs == rhs && (cond == Zero || cond == NonZero))
   1.993 +            ma_cmp(lhs, Imm32(0));
   1.994 +        else
   1.995 +            ma_tst(lhs, rhs);
   1.996 +        ma_b(label, cond);
   1.997 +    }
   1.998 +    void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
   1.999 +        ma_tst(lhs, imm);
  1.1000 +        ma_b(label, cond);
  1.1001 +    }
  1.1002 +    void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
  1.1003 +        // branchTest32 will use ScratchRegister.
  1.1004 +        load32(address, secondScratchReg_);
  1.1005 +        branchTest32(cond, secondScratchReg_, imm, label);
  1.1006 +    }
  1.1007 +    void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
  1.1008 +        branchTest32(cond, lhs, rhs, label);
  1.1009 +    }
  1.1010 +    void branchTestPtr(Condition cond, const Register &lhs, const Imm32 rhs, Label *label) {
  1.1011 +        branchTest32(cond, lhs, rhs, label);
  1.1012 +    }
  1.1013 +    void branchTestPtr(Condition cond, const Address &lhs, Imm32 imm, Label *label) {
  1.1014 +        branchTest32(cond, lhs, imm, label);
  1.1015 +    }
  1.1016 +    void branchPtr(Condition cond, Register lhs, Register rhs, Label *label) {
  1.1017 +        branch32(cond, lhs, rhs, label);
  1.1018 +    }
  1.1019 +    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
  1.1020 +        movePtr(ptr, ScratchRegister);
  1.1021 +        branchPtr(cond, lhs, ScratchRegister, label);
  1.1022 +    }
  1.1023 +    void branchPtr(Condition cond, Register lhs, ImmWord imm, Label *label) {
  1.1024 +        branch32(cond, lhs, Imm32(imm.value), label);
  1.1025 +    }
  1.1026 +    void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label *label) {
  1.1027 +        branchPtr(cond, lhs, ImmWord(uintptr_t(imm.value)), label);
  1.1028 +    }
  1.1029 +    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label *label) {
  1.1030 +        movePtr(imm, ScratchRegister);
  1.1031 +        branchPtr(cond, lhs, ScratchRegister, label);
  1.1032 +    }
  1.1033 +    void branchPtr(Condition cond, Register lhs, Imm32 imm, Label *label) {
  1.1034 +        branch32(cond, lhs, imm, label);
  1.1035 +    }
  1.1036 +    void decBranchPtr(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
  1.1037 +        subPtr(imm, lhs);
  1.1038 +        branch32(cond, lhs, Imm32(0), label);
  1.1039 +    }
  1.1040 +    void moveValue(const Value &val, Register type, Register data);
  1.1041 +
  1.1042 +    CodeOffsetJump jumpWithPatch(RepatchLabel *label, Condition cond = Always);
  1.1043 +    template <typename T>
  1.1044 +    CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel *label) {
  1.1045 +        ma_cmp(reg, ptr);
  1.1046 +        return jumpWithPatch(label, cond);
  1.1047 +    }
  1.1048 +    template <typename T>
  1.1049 +    CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel *label) {
  1.1050 +        ma_ldr(addr, secondScratchReg_);
  1.1051 +        ma_cmp(secondScratchReg_, ptr);
  1.1052 +        return jumpWithPatch(label, cond);
  1.1053 +    }
  1.1054 +    void branchPtr(Condition cond, Address addr, ImmGCPtr ptr, Label *label) {
  1.1055 +        ma_ldr(addr, secondScratchReg_);
  1.1056 +        ma_cmp(secondScratchReg_, ptr);
  1.1057 +        ma_b(label, cond);
  1.1058 +    }
  1.1059 +    void branchPtr(Condition cond, Address addr, ImmWord ptr, Label *label) {
  1.1060 +        ma_ldr(addr, secondScratchReg_);
  1.1061 +        ma_cmp(secondScratchReg_, ptr);
  1.1062 +        ma_b(label, cond);
  1.1063 +    }
  1.1064 +    void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label *label) {
  1.1065 +        branchPtr(cond, addr, ImmWord(uintptr_t(ptr.value)), label);
  1.1066 +    }
  1.1067 +    void branchPtr(Condition cond, const AbsoluteAddress &addr, const Register &ptr, Label *label) {
  1.1068 +        loadPtr(addr, ScratchRegister);
  1.1069 +        ma_cmp(ScratchRegister, ptr);
  1.1070 +        ma_b(label, cond);
  1.1071 +    }
  1.1072 +    void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr, Label *label) {
  1.1073 +        loadPtr(addr, ScratchRegister);
  1.1074 +        ma_cmp(ScratchRegister, ptr);
  1.1075 +        ma_b(label, cond);
  1.1076 +    }
  1.1077 +    void branch32(Condition cond, const AbsoluteAddress &lhs, Imm32 rhs, Label *label) {
  1.1078 +        loadPtr(lhs, secondScratchReg_); // ma_cmp will use the scratch register.
  1.1079 +        ma_cmp(secondScratchReg_, rhs);
  1.1080 +        ma_b(label, cond);
  1.1081 +    }
  1.1082 +    void branch32(Condition cond, const AbsoluteAddress &lhs, const Register &rhs, Label *label) {
  1.1083 +        loadPtr(lhs, secondScratchReg_); // ma_cmp will use the scratch register.
  1.1084 +        ma_cmp(secondScratchReg_, rhs);
  1.1085 +        ma_b(label, cond);
  1.1086 +    }
  1.1087 +
  1.1088 +    void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
  1.1089 +        if (dest.isFloat())
  1.1090 +            loadInt32OrDouble(Operand(address), dest.fpu());
  1.1091 +        else
  1.1092 +            ma_ldr(address, dest.gpr());
  1.1093 +    }
  1.1094 +
  1.1095 +    void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
  1.1096 +        if (dest.isFloat())
  1.1097 +            loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
  1.1098 +        else
  1.1099 +            load32(address, dest.gpr());
  1.1100 +    }
  1.1101 +
  1.1102 +    void moveValue(const Value &val, const ValueOperand &dest);
  1.1103 +
  1.1104 +    void moveValue(const ValueOperand &src, const ValueOperand &dest) {
  1.1105 +        Register s0 = src.typeReg(), d0 = dest.typeReg(),
  1.1106 +                 s1 = src.payloadReg(), d1 = dest.payloadReg();
  1.1107 +
  1.1108 +        // Either one or both of the source registers could be the same as a
  1.1109 +        // destination register.
  1.1110 +        if (s1 == d0) {
  1.1111 +            if (s0 == d1) {
  1.1112 +                // If both are, this is just a swap of two registers.
  1.1113 +                JS_ASSERT(d1 != ScratchRegister);
  1.1114 +                JS_ASSERT(d0 != ScratchRegister);
  1.1115 +                ma_mov(d1, ScratchRegister);
  1.1116 +                ma_mov(d0, d1);
  1.1117 +                ma_mov(ScratchRegister, d0);
  1.1118 +                return;
  1.1119 +            }
  1.1120 +            // If only one is, copy that source first.
  1.1121 +            mozilla::Swap(s0, s1);
  1.1122 +            mozilla::Swap(d0, d1);
  1.1123 +        }
  1.1124 +
  1.1125 +        if (s0 != d0)
  1.1126 +            ma_mov(s0, d0);
  1.1127 +        if (s1 != d1)
  1.1128 +            ma_mov(s1, d1);
  1.1129 +    }
  1.1130 +
  1.1131 +    void storeValue(ValueOperand val, Operand dst);
  1.1132 +    void storeValue(ValueOperand val, const BaseIndex &dest);
  1.1133 +    void storeValue(JSValueType type, Register reg, BaseIndex dest) {
  1.1134 +        // Harder cases not handled yet.
  1.1135 +        JS_ASSERT(dest.offset == 0);
  1.1136 +        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
  1.1137 +        storeValue(type, reg, Address(ScratchRegister, 0));
  1.1138 +    }
  1.1139 +    void storeValue(ValueOperand val, const Address &dest) {
  1.1140 +        storeValue(val, Operand(dest));
  1.1141 +    }
  1.1142 +    void storeValue(JSValueType type, Register reg, Address dest) {
  1.1143 +        ma_str(reg, dest);
  1.1144 +        ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), secondScratchReg_);
  1.1145 +        ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
  1.1146 +    }
  1.1147 +    void storeValue(const Value &val, Address dest) {
  1.1148 +        jsval_layout jv = JSVAL_TO_IMPL(val);
  1.1149 +        ma_mov(Imm32(jv.s.tag), secondScratchReg_);
  1.1150 +        ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
  1.1151 +        if (val.isMarkable())
  1.1152 +            ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), secondScratchReg_);
  1.1153 +        else
  1.1154 +            ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_);
  1.1155 +        ma_str(secondScratchReg_, dest);
  1.1156 +    }
  1.1157 +    void storeValue(const Value &val, BaseIndex dest) {
  1.1158 +        // Harder cases not handled yet.
  1.1159 +        JS_ASSERT(dest.offset == 0);
  1.1160 +        ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
  1.1161 +        storeValue(val, Address(ScratchRegister, 0));
  1.1162 +    }
  1.1163 +
  1.1164 +    void loadValue(Address src, ValueOperand val);
  1.1165 +    void loadValue(Operand dest, ValueOperand val) {
  1.1166 +        loadValue(dest.toAddress(), val);
  1.1167 +    }
  1.1168 +    void loadValue(const BaseIndex &addr, ValueOperand val);
  1.1169 +    void tagValue(JSValueType type, Register payload, ValueOperand dest);
  1.1170 +
  1.1171 +    void pushValue(ValueOperand val);
  1.1172 +    void popValue(ValueOperand val);
  1.1173 +    void pushValue(const Value &val) {
  1.1174 +        jsval_layout jv = JSVAL_TO_IMPL(val);
  1.1175 +        push(Imm32(jv.s.tag));
  1.1176 +        if (val.isMarkable())
  1.1177 +            push(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
  1.1178 +        else
  1.1179 +            push(Imm32(jv.s.payload.i32));
  1.1180 +    }
  1.1181 +    void pushValue(JSValueType type, Register reg) {
  1.1182 +        push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
  1.1183 +        ma_push(reg);
  1.1184 +    }
  1.1185 +    void pushValue(const Address &addr);
  1.1186 +    void Push(const ValueOperand &val) {
  1.1187 +        pushValue(val);
  1.1188 +        framePushed_ += sizeof(Value);
  1.1189 +    }
  1.1190 +    void Pop(const ValueOperand &val) {
  1.1191 +        popValue(val);
  1.1192 +        framePushed_ -= sizeof(Value);
  1.1193 +    }
  1.1194 +    void storePayload(const Value &val, Operand dest);
  1.1195 +    void storePayload(Register src, Operand dest);
  1.1196 +    void storePayload(const Value &val, Register base, Register index, int32_t shift = defaultShift);
  1.1197 +    void storePayload(Register src, Register base, Register index, int32_t shift = defaultShift);
  1.1198 +    void storeTypeTag(ImmTag tag, Operand dest);
  1.1199 +    void storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift = defaultShift);
  1.1200 +
  1.1201 +    void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
  1.1202 +        ma_lsl(Imm32(FRAMESIZE_SHIFT), frameSizeReg, frameSizeReg);
  1.1203 +        ma_orr(Imm32(type), frameSizeReg);
  1.1204 +    }
  1.1205 +
  1.1206 +    void linkExitFrame();
  1.1207 +    void linkParallelExitFrame(const Register &pt);
  1.1208 +    void handleFailureWithHandler(void *handler);
  1.1209 +    void handleFailureWithHandlerTail();
  1.1210 +
  1.1211 +    /////////////////////////////////////////////////////////////////
  1.1212 +    // Common interface.
  1.1213 +    /////////////////////////////////////////////////////////////////
  1.1214 +  public:
  1.1215 +    // The following functions are exposed for use in platform-shared code.
  1.1216 +    void Push(const Register &reg) {
  1.1217 +        ma_push(reg);
  1.1218 +        adjustFrame(sizeof(intptr_t));
  1.1219 +    }
  1.1220 +    void Push(const Imm32 imm) {
  1.1221 +        push(imm);
  1.1222 +        adjustFrame(sizeof(intptr_t));
  1.1223 +    }
  1.1224 +    void Push(const ImmWord imm) {
  1.1225 +        push(imm);
  1.1226 +        adjustFrame(sizeof(intptr_t));
  1.1227 +    }
  1.1228 +    void Push(const ImmPtr imm) {
  1.1229 +        Push(ImmWord(uintptr_t(imm.value)));
  1.1230 +    }
  1.1231 +    void Push(const ImmGCPtr ptr) {
  1.1232 +        push(ptr);
  1.1233 +        adjustFrame(sizeof(intptr_t));
  1.1234 +    }
  1.1235 +    void Push(const FloatRegister &t) {
  1.1236 +        VFPRegister r = VFPRegister(t);
  1.1237 +        ma_vpush(VFPRegister(t));
  1.1238 +        adjustFrame(r.size());
  1.1239 +    }
  1.1240 +
  1.1241 +    CodeOffsetLabel PushWithPatch(const ImmWord &word) {
  1.1242 +        framePushed_ += sizeof(word.value);
  1.1243 +        return pushWithPatch(word);
  1.1244 +    }
  1.1245 +    CodeOffsetLabel PushWithPatch(const ImmPtr &imm) {
  1.1246 +        return PushWithPatch(ImmWord(uintptr_t(imm.value)));
  1.1247 +    }
  1.1248 +
  1.1249 +    void PushWithPadding(const Register &reg, const Imm32 extraSpace) {
  1.1250 +        pushWithPadding(reg, extraSpace);
  1.1251 +        adjustFrame(sizeof(intptr_t) + extraSpace.value);
  1.1252 +    }
  1.1253 +    void PushWithPadding(const Imm32 imm, const Imm32 extraSpace) {
  1.1254 +        pushWithPadding(imm, extraSpace);
  1.1255 +        adjustFrame(sizeof(intptr_t) + extraSpace.value);
  1.1256 +    }
  1.1257 +
  1.1258 +    void Pop(const Register &reg) {
  1.1259 +        ma_pop(reg);
  1.1260 +        adjustFrame(-sizeof(intptr_t));
  1.1261 +    }
  1.1262 +    void implicitPop(uint32_t args) {
  1.1263 +        JS_ASSERT(args % sizeof(intptr_t) == 0);
  1.1264 +        adjustFrame(-args);
  1.1265 +    }
  1.1266 +    uint32_t framePushed() const {
  1.1267 +        return framePushed_;
  1.1268 +    }
  1.1269 +    void setFramePushed(uint32_t framePushed) {
  1.1270 +        framePushed_ = framePushed;
  1.1271 +    }
  1.1272 +
  1.1273 +    // Builds an exit frame on the stack, with a return address to an internal
  1.1274 +    // non-function. Returns offset to be passed to markSafepointAt().
  1.1275 +    bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
  1.1276 +
  1.1277 +    void callWithExitFrame(JitCode *target);
  1.1278 +    void callWithExitFrame(JitCode *target, Register dynStack);
  1.1279 +
  1.1280 +    // Makes an Ion call using the only two methods that it is sane for
  1.1281 +    // indep code to make a call
  1.1282 +    void callIon(const Register &callee);
  1.1283 +
  1.1284 +    void reserveStack(uint32_t amount);
  1.1285 +    void freeStack(uint32_t amount);
  1.1286 +    void freeStack(Register amount);
  1.1287 +
  1.1288 +    void add32(Register src, Register dest);
  1.1289 +    void add32(Imm32 imm, Register dest);
  1.1290 +    void add32(Imm32 imm, const Address &dest);
  1.1291 +    void sub32(Imm32 imm, Register dest);
  1.1292 +    void sub32(Register src, Register dest);
  1.1293 +    template <typename T>
  1.1294 +    void branchAdd32(Condition cond, T src, Register dest, Label *label) {
  1.1295 +        add32(src, dest);
  1.1296 +        j(cond, label);
  1.1297 +    }
  1.1298 +    template <typename T>
  1.1299 +    void branchSub32(Condition cond, T src, Register dest, Label *label) {
  1.1300 +        sub32(src, dest);
  1.1301 +        j(cond, label);
  1.1302 +    }
  1.1303 +    void xor32(Imm32 imm, Register dest);
  1.1304 +
  1.1305 +    void and32(Imm32 imm, Register dest);
  1.1306 +    void and32(Imm32 imm, const Address &dest);
  1.1307 +    void or32(Imm32 imm, const Address &dest);
  1.1308 +    void xorPtr(Imm32 imm, Register dest);
  1.1309 +    void xorPtr(Register src, Register dest);
  1.1310 +    void orPtr(Imm32 imm, Register dest);
  1.1311 +    void orPtr(Register src, Register dest);
  1.1312 +    void andPtr(Imm32 imm, Register dest);
  1.1313 +    void andPtr(Register src, Register dest);
  1.1314 +    void addPtr(Register src, Register dest);
  1.1315 +    void addPtr(const Address &src, Register dest);
  1.1316 +    void not32(Register reg);
  1.1317 +
  1.1318 +    void move32(const Imm32 &imm, const Register &dest);
  1.1319 +    void move32(const Register &src, const Register &dest);
  1.1320 +
  1.1321 +    void movePtr(const Register &src, const Register &dest);
  1.1322 +    void movePtr(const ImmWord &imm, const Register &dest);
  1.1323 +    void movePtr(const ImmPtr &imm, const Register &dest);
  1.1324 +    void movePtr(const AsmJSImmPtr &imm, const Register &dest);
  1.1325 +    void movePtr(const ImmGCPtr &imm, const Register &dest);
  1.1326 +
  1.1327 +    void load8SignExtend(const Address &address, const Register &dest);
  1.1328 +    void load8SignExtend(const BaseIndex &src, const Register &dest);
  1.1329 +
  1.1330 +    void load8ZeroExtend(const Address &address, const Register &dest);
  1.1331 +    void load8ZeroExtend(const BaseIndex &src, const Register &dest);
  1.1332 +
  1.1333 +    void load16SignExtend(const Address &address, const Register &dest);
  1.1334 +    void load16SignExtend(const BaseIndex &src, const Register &dest);
  1.1335 +
  1.1336 +    void load16ZeroExtend(const Address &address, const Register &dest);
  1.1337 +    void load16ZeroExtend(const BaseIndex &src, const Register &dest);
  1.1338 +
  1.1339 +    void load32(const Address &address, const Register &dest);
  1.1340 +    void load32(const BaseIndex &address, const Register &dest);
  1.1341 +    void load32(const AbsoluteAddress &address, const Register &dest);
  1.1342 +
  1.1343 +    void loadPtr(const Address &address, const Register &dest);
  1.1344 +    void loadPtr(const BaseIndex &src, const Register &dest);
  1.1345 +    void loadPtr(const AbsoluteAddress &address, const Register &dest);
  1.1346 +    void loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest);
  1.1347 +
  1.1348 +    void loadPrivate(const Address &address, const Register &dest);
  1.1349 +
  1.1350 +    void loadDouble(const Address &addr, const FloatRegister &dest);
  1.1351 +    void loadDouble(const BaseIndex &src, const FloatRegister &dest);
  1.1352 +
  1.1353 +    // Load a float value into a register, then expand it to a double.
  1.1354 +    void loadFloatAsDouble(const Address &addr, const FloatRegister &dest);
  1.1355 +    void loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest);
  1.1356 +
  1.1357 +    void loadFloat32(const Address &addr, const FloatRegister &dest);
  1.1358 +    void loadFloat32(const BaseIndex &src, const FloatRegister &dest);
  1.1359 +
  1.1360 +    void store8(const Register &src, const Address &address);
  1.1361 +    void store8(const Imm32 &imm, const Address &address);
  1.1362 +    void store8(const Register &src, const BaseIndex &address);
  1.1363 +    void store8(const Imm32 &imm, const BaseIndex &address);
  1.1364 +
  1.1365 +    void store16(const Register &src, const Address &address);
  1.1366 +    void store16(const Imm32 &imm, const Address &address);
  1.1367 +    void store16(const Register &src, const BaseIndex &address);
  1.1368 +    void store16(const Imm32 &imm, const BaseIndex &address);
  1.1369 +
  1.1370 +    void store32(const Register &src, const AbsoluteAddress &address);
  1.1371 +    void store32(const Register &src, const Address &address);
  1.1372 +    void store32(const Register &src, const BaseIndex &address);
  1.1373 +    void store32(const Imm32 &src, const Address &address);
  1.1374 +    void store32(const Imm32 &src, const BaseIndex &address);
  1.1375 +
  1.1376 +    void storePtr(ImmWord imm, const Address &address);
  1.1377 +    void storePtr(ImmPtr imm, const Address &address);
  1.1378 +    void storePtr(ImmGCPtr imm, const Address &address);
  1.1379 +    void storePtr(Register src, const Address &address);
  1.1380 +    void storePtr(const Register &src, const AbsoluteAddress &dest);
  1.1381 +    void storeDouble(FloatRegister src, Address addr) {
  1.1382 +        ma_vstr(src, Operand(addr));
  1.1383 +    }
  1.1384 +    void storeDouble(FloatRegister src, BaseIndex addr) {
  1.1385 +        // Harder cases not handled yet.
  1.1386 +        JS_ASSERT(addr.offset == 0);
  1.1387 +        uint32_t scale = Imm32::ShiftOf(addr.scale).value;
  1.1388 +        ma_vstr(src, addr.base, addr.index, scale);
  1.1389 +    }
  1.1390 +    void moveDouble(FloatRegister src, FloatRegister dest) {
  1.1391 +        ma_vmov(src, dest);
  1.1392 +    }
  1.1393 +
  1.1394 +    void storeFloat32(FloatRegister src, Address addr) {
  1.1395 +        ma_vstr(VFPRegister(src).singleOverlay(), Operand(addr));
  1.1396 +    }
  1.1397 +    void storeFloat32(FloatRegister src, BaseIndex addr) {
  1.1398 +        // Harder cases not handled yet.
  1.1399 +        JS_ASSERT(addr.offset == 0);
  1.1400 +        uint32_t scale = Imm32::ShiftOf(addr.scale).value;
  1.1401 +        ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale);
  1.1402 +    }
  1.1403 +
  1.1404 +    void clampIntToUint8(Register reg) {
  1.1405 +        // look at (reg >> 8) if it is 0, then reg shouldn't be clamped
  1.1406 +        // if it is <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
  1.1407 +        as_mov(ScratchRegister, asr(reg, 8), SetCond);
  1.1408 +        ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
  1.1409 +        ma_mov(Imm32(0), reg, NoSetCond, Signed);
  1.1410 +    }
  1.1411 +
  1.1412 +    void cmp32(const Register &lhs, const Imm32 &rhs);
  1.1413 +    void cmp32(const Register &lhs, const Register &rhs);
  1.1414 +    void cmp32(const Operand &lhs, const Imm32 &rhs);
  1.1415 +    void cmp32(const Operand &lhs, const Register &rhs);
  1.1416 +
  1.1417 +    void cmpPtr(const Register &lhs, const ImmWord &rhs);
  1.1418 +    void cmpPtr(const Register &lhs, const ImmPtr &rhs);
  1.1419 +    void cmpPtr(const Register &lhs, const Register &rhs);
  1.1420 +    void cmpPtr(const Register &lhs, const ImmGCPtr &rhs);
  1.1421 +    void cmpPtr(const Register &lhs, const Imm32 &rhs);
  1.1422 +    void cmpPtr(const Address &lhs, const Register &rhs);
  1.1423 +    void cmpPtr(const Address &lhs, const ImmWord &rhs);
  1.1424 +    void cmpPtr(const Address &lhs, const ImmPtr &rhs);
  1.1425 +
  1.1426 +    void subPtr(Imm32 imm, const Register dest);
  1.1427 +    void subPtr(const Address &addr, const Register dest);
  1.1428 +    void subPtr(const Register &src, const Register &dest);
  1.1429 +    void subPtr(const Register &src, const Address &dest);
  1.1430 +    void addPtr(Imm32 imm, const Register dest);
  1.1431 +    void addPtr(Imm32 imm, const Address &dest);
  1.1432 +    void addPtr(ImmWord imm, const Register dest) {
  1.1433 +        addPtr(Imm32(imm.value), dest);
  1.1434 +    }
  1.1435 +    void addPtr(ImmPtr imm, const Register dest) {
  1.1436 +        addPtr(ImmWord(uintptr_t(imm.value)), dest);
  1.1437 +    }
  1.1438 +
  1.1439 +    void setStackArg(const Register &reg, uint32_t arg);
  1.1440 +
  1.1441 +    void breakpoint();
  1.1442 +    // conditional breakpoint
  1.1443 +    void breakpoint(Condition cc);
  1.1444 +
  1.1445 +    void compareDouble(FloatRegister lhs, FloatRegister rhs);
  1.1446 +    void branchDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
  1.1447 +                      Label *label);
  1.1448 +
  1.1449 +    void compareFloat(FloatRegister lhs, FloatRegister rhs);
  1.1450 +    void branchFloat(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
  1.1451 +                     Label *label);
  1.1452 +
  1.1453 +    void checkStackAlignment();
  1.1454 +
  1.1455 +    void rshiftPtr(Imm32 imm, Register dest) {
  1.1456 +        ma_lsr(imm, dest, dest);
  1.1457 +    }
  1.1458 +    void lshiftPtr(Imm32 imm, Register dest) {
  1.1459 +        ma_lsl(imm, dest, dest);
  1.1460 +    }
  1.1461 +
  1.1462 +    // If source is a double, load it into dest. If source is int32,
  1.1463 +    // convert it to double. Else, branch to failure.
  1.1464 +    void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure);
  1.1465 +
  1.1466 +    void
  1.1467 +    emitSet(Assembler::Condition cond, const Register &dest)
  1.1468 +    {
  1.1469 +        ma_mov(Imm32(0), dest);
  1.1470 +        ma_mov(Imm32(1), dest, NoSetCond, cond);
  1.1471 +    }
  1.1472 +
  1.1473 +    template <typename T1, typename T2>
  1.1474 +    void cmpPtrSet(Assembler::Condition cond, T1 lhs, T2 rhs, const Register &dest)
  1.1475 +    {
  1.1476 +        cmpPtr(lhs, rhs);
  1.1477 +        emitSet(cond, dest);
  1.1478 +    }
  1.1479 +    template <typename T1, typename T2>
  1.1480 +    void cmp32Set(Assembler::Condition cond, T1 lhs, T2 rhs, const Register &dest)
  1.1481 +    {
  1.1482 +        cmp32(lhs, rhs);
  1.1483 +        emitSet(cond, dest);
  1.1484 +    }
  1.1485 +
  1.1486 +    void testNullSet(Condition cond, const ValueOperand &value, Register dest) {
  1.1487 +        cond = testNull(cond, value);
  1.1488 +        emitSet(cond, dest);
  1.1489 +    }
  1.1490 +    void testUndefinedSet(Condition cond, const ValueOperand &value, Register dest) {
  1.1491 +        cond = testUndefined(cond, value);
  1.1492 +        emitSet(cond, dest);
  1.1493 +    }
  1.1494 +
  1.1495 +    // Setup a call to C/C++ code, given the number of general arguments it
  1.1496 +    // takes. Note that this only supports cdecl.
  1.1497 +    //
  1.1498 +    // In order for alignment to work correctly, the MacroAssembler must have a
  1.1499 +    // consistent view of the stack displacement. It is okay to call "push"
  1.1500 +    // manually, however, if the stack alignment were to change, the macro
  1.1501 +    // assembler should be notified before starting a call.
  1.1502 +    void setupAlignedABICall(uint32_t args);
  1.1503 +
  1.1504 +    // Sets up an ABI call for when the alignment is not known. This may need a
  1.1505 +    // scratch register.
  1.1506 +    void setupUnalignedABICall(uint32_t args, const Register &scratch);
  1.1507 +
  1.1508 +    // Arguments must be assigned in a left-to-right order. This process may
  1.1509 +    // temporarily use more stack, in which case esp-relative addresses will be
  1.1510 +    // automatically adjusted. It is extremely important that esp-relative
  1.1511 +    // addresses are computed *after* setupABICall(). Furthermore, no
  1.1512 +    // operations should be emitted while setting arguments.
  1.1513 +    void passABIArg(const MoveOperand &from, MoveOp::Type type);
  1.1514 +    void passABIArg(const Register &reg);
  1.1515 +    void passABIArg(const FloatRegister &reg, MoveOp::Type type);
  1.1516 +    void passABIArg(const ValueOperand &regs);
  1.1517 +
  1.1518 +  private:
  1.1519 +    void passHardFpABIArg(const MoveOperand &from, MoveOp::Type type);
  1.1520 +    void passSoftFpABIArg(const MoveOperand &from, MoveOp::Type type);
  1.1521 +
  1.1522 +  protected:
  1.1523 +    bool buildOOLFakeExitFrame(void *fakeReturnAddr);
  1.1524 +
  1.1525 +  private:
  1.1526 +    void callWithABIPre(uint32_t *stackAdjust);
  1.1527 +    void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
  1.1528 +
  1.1529 +  public:
  1.1530 +    // Emits a call to a C/C++ function, resolving all argument moves.
  1.1531 +    void callWithABI(void *fun, MoveOp::Type result = MoveOp::GENERAL);
  1.1532 +    void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
  1.1533 +    void callWithABI(const Address &fun, MoveOp::Type result = MoveOp::GENERAL);
  1.1534 +
  1.1535 +    CodeOffsetLabel labelForPatch() {
  1.1536 +        return CodeOffsetLabel(nextOffset().getOffset());
  1.1537 +    }
  1.1538 +
  1.1539 +    void computeEffectiveAddress(const Address &address, Register dest) {
  1.1540 +        ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
  1.1541 +    }
  1.1542 +    void computeEffectiveAddress(const BaseIndex &address, Register dest) {
  1.1543 +        ma_alu(address.base, lsl(address.index, address.scale), dest, op_add, NoSetCond);
  1.1544 +        if (address.offset)
  1.1545 +            ma_add(dest, Imm32(address.offset), dest, NoSetCond);
  1.1546 +    }
  1.1547 +    void floor(FloatRegister input, Register output, Label *handleNotAnInt);
  1.1548 +    void floorf(FloatRegister input, Register output, Label *handleNotAnInt);
  1.1549 +    void round(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
  1.1550 +    void roundf(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
  1.1551 +
  1.1552 +    void clampCheck(Register r, Label *handleNotAnInt) {
  1.1553 +        // check explicitly for r == INT_MIN || r == INT_MAX
  1.1554 +        // this is the instruction sequence that gcc generated for this
  1.1555 +        // operation.
  1.1556 +        ma_sub(r, Imm32(0x80000001), ScratchRegister);
  1.1557 +        ma_cmn(ScratchRegister, Imm32(3));
  1.1558 +        ma_b(handleNotAnInt, Above);
  1.1559 +    }
  1.1560 +
  1.1561 +    void memIntToValue(Address Source, Address Dest) {
  1.1562 +        load32(Source, lr);
  1.1563 +        storeValue(JSVAL_TYPE_INT32, lr, Dest);
  1.1564 +    }
  1.1565 +    void memMove32(Address Source, Address Dest) {
  1.1566 +        loadPtr(Source, lr);
  1.1567 +        storePtr(lr, Dest);
  1.1568 +    }
  1.1569 +    void memMove64(Address Source, Address Dest) {
  1.1570 +        loadPtr(Source, lr);
  1.1571 +        storePtr(lr, Dest);
  1.1572 +        loadPtr(Address(Source.base, Source.offset+4), lr);
  1.1573 +        storePtr(lr, Address(Dest.base, Dest.offset+4));
  1.1574 +    }
  1.1575 +
  1.1576 +    void lea(Operand addr, Register dest) {
  1.1577 +        ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
  1.1578 +    }
  1.1579 +
  1.1580 +    void stackCheck(ImmWord limitAddr, Label *label) {
  1.1581 +        int *foo = 0;
  1.1582 +        *foo = 5;
  1.1583 +        movePtr(limitAddr, ScratchRegister);
  1.1584 +        ma_ldr(Address(ScratchRegister, 0), ScratchRegister);
  1.1585 +        ma_cmp(ScratchRegister, StackPointer);
  1.1586 +        ma_b(label, Assembler::AboveOrEqual);
  1.1587 +    }
  1.1588 +    void abiret() {
  1.1589 +        as_bx(lr);
  1.1590 +    }
  1.1591 +
  1.1592 +    void ma_storeImm(Imm32 c, const Operand &dest) {
  1.1593 +        ma_mov(c, lr);
  1.1594 +        ma_str(lr, dest);
  1.1595 +    }
  1.1596 +    BufferOffset ma_BoundsCheck(Register bounded) {
  1.1597 +        return as_cmp(bounded, Imm8(0));
  1.1598 +    }
  1.1599 +
  1.1600 +    void moveFloat32(FloatRegister src, FloatRegister dest) {
  1.1601 +        as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay());
  1.1602 +    }
  1.1603 +
  1.1604 +#ifdef JSGC_GENERATIONAL
  1.1605 +    void branchPtrInNurseryRange(Register ptr, Register temp, Label *label);
  1.1606 +    void branchValueIsNurseryObject(ValueOperand value, Register temp, Label *label);
  1.1607 +#endif
  1.1608 +};
  1.1609 +
  1.1610 +typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
  1.1611 +
  1.1612 +} // namespace jit
  1.1613 +} // namespace js
  1.1614 +
  1.1615 +#endif /* jit_arm_MacroAssembler_arm_h */

mercurial