js/src/jit/arm/CodeGenerator-arm.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/arm/CodeGenerator-arm.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,2334 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#include "jit/arm/CodeGenerator-arm.h"
    1.11 +
    1.12 +#include "mozilla/MathAlgorithms.h"
    1.13 +
    1.14 +#include "jscntxt.h"
    1.15 +#include "jscompartment.h"
    1.16 +#include "jsnum.h"
    1.17 +
    1.18 +#include "jit/CodeGenerator.h"
    1.19 +#include "jit/IonFrames.h"
    1.20 +#include "jit/JitCompartment.h"
    1.21 +#include "jit/MIR.h"
    1.22 +#include "jit/MIRGraph.h"
    1.23 +#include "vm/Shape.h"
    1.24 +#include "vm/TraceLogging.h"
    1.25 +
    1.26 +#include "jsscriptinlines.h"
    1.27 +
    1.28 +#include "jit/shared/CodeGenerator-shared-inl.h"
    1.29 +
    1.30 +using namespace js;
    1.31 +using namespace js::jit;
    1.32 +
    1.33 +using mozilla::FloorLog2;
    1.34 +using mozilla::NegativeInfinity;
    1.35 +using JS::GenericNaN;
    1.36 +
    1.37 +// shared
    1.38 +CodeGeneratorARM::CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
    1.39 +  : CodeGeneratorShared(gen, graph, masm)
    1.40 +{
    1.41 +}
    1.42 +
    1.43 +bool
    1.44 +CodeGeneratorARM::generatePrologue()
    1.45 +{
    1.46 +    JS_ASSERT(!gen->compilingAsmJS());
    1.47 +
    1.48 +    // Note that this automatically sets MacroAssembler::framePushed().
    1.49 +    masm.reserveStack(frameSize());
    1.50 +    masm.checkStackAlignment();
    1.51 +    return true;
    1.52 +}
    1.53 +
    1.54 +bool
    1.55 +CodeGeneratorARM::generateAsmJSPrologue(Label *stackOverflowLabel)
    1.56 +{
    1.57 +    JS_ASSERT(gen->compilingAsmJS());
    1.58 +
    1.59 +    masm.Push(lr);
    1.60 +
    1.61 +    // The asm.js over-recursed handler wants to be able to assume that SP
    1.62 +    // points to the return address, so perform the check after pushing lr but
    1.63 +    // before pushing frameDepth.
    1.64 +    if (!omitOverRecursedCheck()) {
    1.65 +        masm.branchPtr(Assembler::AboveOrEqual,
    1.66 +                       AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
    1.67 +                       StackPointer,
    1.68 +                       stackOverflowLabel);
    1.69 +    }
    1.70 +
    1.71 +    // Note that this automatically sets MacroAssembler::framePushed().
    1.72 +    masm.reserveStack(frameDepth_);
    1.73 +    masm.checkStackAlignment();
    1.74 +    return true;
    1.75 +}
    1.76 +
    1.77 +bool
    1.78 +CodeGeneratorARM::generateEpilogue()
    1.79 +{
    1.80 +    masm.bind(&returnLabel_);
    1.81 +
    1.82 +#ifdef JS_TRACE_LOGGING
    1.83 +    if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) {
    1.84 +        if (!emitTracelogStopEvent(TraceLogger::IonMonkey))
    1.85 +            return false;
    1.86 +        if (!emitTracelogScriptStop())
    1.87 +            return false;
    1.88 +    }
    1.89 +#endif
    1.90 +
    1.91 +    if (gen->compilingAsmJS()) {
    1.92 +        // Pop the stack we allocated at the start of the function.
    1.93 +        masm.freeStack(frameDepth_);
    1.94 +        masm.Pop(pc);
    1.95 +        JS_ASSERT(masm.framePushed() == 0);
    1.96 +        //masm.as_bkpt();
    1.97 +    } else {
    1.98 +        // Pop the stack we allocated at the start of the function.
    1.99 +        masm.freeStack(frameSize());
   1.100 +        JS_ASSERT(masm.framePushed() == 0);
   1.101 +        masm.ma_pop(pc);
   1.102 +    }
   1.103 +    masm.dumpPool();
   1.104 +    return true;
   1.105 +}
   1.106 +
   1.107 +void
   1.108 +CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse)
   1.109 +{
   1.110 +    if (isNextBlock(mirFalse->lir())) {
   1.111 +        jumpToBlock(mirTrue, cond);
   1.112 +    } else {
   1.113 +        jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
   1.114 +        jumpToBlock(mirTrue);
   1.115 +    }
   1.116 +}
   1.117 +
   1.118 +
   1.119 +bool
   1.120 +OutOfLineBailout::accept(CodeGeneratorARM *codegen)
   1.121 +{
   1.122 +    return codegen->visitOutOfLineBailout(this);
   1.123 +}
   1.124 +
   1.125 +bool
   1.126 +CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch *test)
   1.127 +{
   1.128 +    const LAllocation *opd = test->getOperand(0);
   1.129 +    MBasicBlock *ifTrue = test->ifTrue();
   1.130 +    MBasicBlock *ifFalse = test->ifFalse();
   1.131 +
   1.132 +    // Test the operand
   1.133 +    masm.ma_cmp(ToRegister(opd), Imm32(0));
   1.134 +
   1.135 +    if (isNextBlock(ifFalse->lir())) {
   1.136 +        jumpToBlock(ifTrue, Assembler::NonZero);
   1.137 +    } else if (isNextBlock(ifTrue->lir())) {
   1.138 +        jumpToBlock(ifFalse, Assembler::Zero);
   1.139 +    } else {
   1.140 +        jumpToBlock(ifFalse, Assembler::Zero);
   1.141 +        jumpToBlock(ifTrue);
   1.142 +    }
   1.143 +    return true;
   1.144 +}
   1.145 +
   1.146 +bool
   1.147 +CodeGeneratorARM::visitCompare(LCompare *comp)
   1.148 +{
   1.149 +    Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
   1.150 +    const LAllocation *left = comp->getOperand(0);
   1.151 +    const LAllocation *right = comp->getOperand(1);
   1.152 +    const LDefinition *def = comp->getDef(0);
   1.153 +
   1.154 +    if (right->isConstant())
   1.155 +        masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)));
   1.156 +    else
   1.157 +        masm.ma_cmp(ToRegister(left), ToOperand(right));
   1.158 +    masm.ma_mov(Imm32(0), ToRegister(def));
   1.159 +    masm.ma_mov(Imm32(1), ToRegister(def), NoSetCond, cond);
   1.160 +    return true;
   1.161 +}
   1.162 +
   1.163 +bool
   1.164 +CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch *comp)
   1.165 +{
   1.166 +    Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop());
   1.167 +    if (comp->right()->isConstant())
   1.168 +        masm.ma_cmp(ToRegister(comp->left()), Imm32(ToInt32(comp->right())));
   1.169 +    else
   1.170 +        masm.ma_cmp(ToRegister(comp->left()), ToOperand(comp->right()));
   1.171 +    emitBranch(cond, comp->ifTrue(), comp->ifFalse());
   1.172 +    return true;
   1.173 +
   1.174 +}
   1.175 +
   1.176 +bool
   1.177 +CodeGeneratorARM::generateOutOfLineCode()
   1.178 +{
   1.179 +    if (!CodeGeneratorShared::generateOutOfLineCode())
   1.180 +        return false;
   1.181 +
   1.182 +    if (deoptLabel_.used()) {
   1.183 +        // All non-table-based bailouts will go here.
   1.184 +        masm.bind(&deoptLabel_);
   1.185 +
   1.186 +        // Push the frame size, so the handler can recover the IonScript.
   1.187 +        masm.ma_mov(Imm32(frameSize()), lr);
   1.188 +
   1.189 +        JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
   1.190 +        masm.branch(handler);
   1.191 +    }
   1.192 +
   1.193 +    return true;
   1.194 +}
   1.195 +
   1.196 +bool
   1.197 +CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot)
   1.198 +{
   1.199 +    CompileInfo &info = snapshot->mir()->block()->info();
   1.200 +    switch (info.executionMode()) {
   1.201 +
   1.202 +      case ParallelExecution: {
   1.203 +        // in parallel mode, make no attempt to recover, just signal an error.
   1.204 +        OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported,
   1.205 +                                             snapshot->mir()->block(),
   1.206 +                                             snapshot->mir()->pc());
   1.207 +        masm.ma_b(ool->entry(), condition);
   1.208 +        return true;
   1.209 +      }
   1.210 +      case SequentialExecution:
   1.211 +        break;
   1.212 +      default:
   1.213 +        MOZ_ASSUME_UNREACHABLE("No such execution mode");
   1.214 +    }
   1.215 +    if (!encode(snapshot))
   1.216 +        return false;
   1.217 +
   1.218 +    // Though the assembler doesn't track all frame pushes, at least make sure
   1.219 +    // the known value makes sense. We can't use bailout tables if the stack
   1.220 +    // isn't properly aligned to the static frame size.
   1.221 +    JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
   1.222 +                 frameClass_.frameSize() == masm.framePushed());
   1.223 +
   1.224 +    if (assignBailoutId(snapshot)) {
   1.225 +        uint8_t *code = deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE;
   1.226 +        masm.ma_b(code, Relocation::HARDCODED, condition);
   1.227 +        return true;
   1.228 +    }
   1.229 +
   1.230 +    // We could not use a jump table, either because all bailout IDs were
   1.231 +    // reserved, or a jump table is not optimal for this frame size or
   1.232 +    // platform. Whatever, we will generate a lazy bailout.
   1.233 +    OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
   1.234 +    if (!addOutOfLineCode(ool))
   1.235 +        return false;
   1.236 +
   1.237 +    masm.ma_b(ool->entry(), condition);
   1.238 +
   1.239 +    return true;
   1.240 +}
   1.241 +bool
   1.242 +CodeGeneratorARM::bailoutFrom(Label *label, LSnapshot *snapshot)
   1.243 +{
   1.244 +    if (masm.bailed())
   1.245 +        return false;
   1.246 +    JS_ASSERT(label->used());
   1.247 +    JS_ASSERT(!label->bound());
   1.248 +
   1.249 +    CompileInfo &info = snapshot->mir()->block()->info();
   1.250 +    switch (info.executionMode()) {
   1.251 +
   1.252 +      case ParallelExecution: {
   1.253 +        // in parallel mode, make no attempt to recover, just signal an error.
   1.254 +        OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported,
   1.255 +                                             snapshot->mir()->block(),
   1.256 +                                             snapshot->mir()->pc());
   1.257 +        masm.retarget(label, ool->entry());
   1.258 +        return true;
   1.259 +      }
   1.260 +      case SequentialExecution:
   1.261 +        break;
   1.262 +      default:
   1.263 +        MOZ_ASSUME_UNREACHABLE("No such execution mode");
   1.264 +    }
   1.265 +
   1.266 +    if (!encode(snapshot))
   1.267 +        return false;
   1.268 +
   1.269 +    // Though the assembler doesn't track all frame pushes, at least make sure
   1.270 +    // the known value makes sense. We can't use bailout tables if the stack
   1.271 +    // isn't properly aligned to the static frame size.
   1.272 +    JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
   1.273 +                 frameClass_.frameSize() == masm.framePushed());
   1.274 +
   1.275 +    // On ARM we don't use a bailout table.
   1.276 +    OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
   1.277 +    if (!addOutOfLineCode(ool)) {
   1.278 +        return false;
   1.279 +    }
   1.280 +
   1.281 +    masm.retarget(label, ool->entry());
   1.282 +
   1.283 +    return true;
   1.284 +}
   1.285 +
   1.286 +bool
   1.287 +CodeGeneratorARM::bailout(LSnapshot *snapshot)
   1.288 +{
   1.289 +    Label label;
   1.290 +    masm.ma_b(&label);
   1.291 +    return bailoutFrom(&label, snapshot);
   1.292 +}
   1.293 +
   1.294 +bool
   1.295 +CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout *ool)
   1.296 +{
   1.297 +    masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), ScratchRegister);
   1.298 +    masm.ma_push(ScratchRegister); // BailoutStack::padding_
   1.299 +    masm.ma_push(ScratchRegister); // BailoutStack::snapshotOffset_
   1.300 +    masm.ma_b(&deoptLabel_);
   1.301 +    return true;
   1.302 +}
   1.303 +
   1.304 +bool
   1.305 +CodeGeneratorARM::visitMinMaxD(LMinMaxD *ins)
   1.306 +{
   1.307 +    FloatRegister first = ToFloatRegister(ins->first());
   1.308 +    FloatRegister second = ToFloatRegister(ins->second());
   1.309 +    FloatRegister output = ToFloatRegister(ins->output());
   1.310 +
   1.311 +    JS_ASSERT(first == output);
   1.312 +
   1.313 +    Assembler::Condition cond = ins->mir()->isMax()
   1.314 +        ? Assembler::VFP_LessThanOrEqual
   1.315 +        : Assembler::VFP_GreaterThanOrEqual;
   1.316 +    Label nan, equal, returnSecond, done;
   1.317 +
   1.318 +    masm.compareDouble(first, second);
   1.319 +    masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN.
   1.320 +    masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right.
   1.321 +    masm.ma_b(&returnSecond, cond);
   1.322 +    masm.ma_b(&done);
   1.323 +
   1.324 +    // Check for zero.
   1.325 +    masm.bind(&equal);
   1.326 +    masm.compareDouble(first, InvalidFloatReg);
   1.327 +    masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it.
   1.328 +    // So now both operands are either -0 or 0.
   1.329 +    if (ins->mir()->isMax()) {
   1.330 +        masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0.
   1.331 +    } else {
   1.332 +        masm.ma_vneg(first, first);
   1.333 +        masm.ma_vsub(first, second, first);
   1.334 +        masm.ma_vneg(first, first);
   1.335 +    }
   1.336 +    masm.ma_b(&done);
   1.337 +
   1.338 +    masm.bind(&nan);
   1.339 +    masm.loadConstantDouble(GenericNaN(), output);
   1.340 +    masm.ma_b(&done);
   1.341 +
   1.342 +    masm.bind(&returnSecond);
   1.343 +    masm.ma_vmov(second, output);
   1.344 +
   1.345 +    masm.bind(&done);
   1.346 +    return true;
   1.347 +}
   1.348 +
   1.349 +bool
   1.350 +CodeGeneratorARM::visitAbsD(LAbsD *ins)
   1.351 +{
   1.352 +    FloatRegister input = ToFloatRegister(ins->input());
   1.353 +    JS_ASSERT(input == ToFloatRegister(ins->output()));
   1.354 +    masm.ma_vabs(input, input);
   1.355 +    return true;
   1.356 +}
   1.357 +
   1.358 +bool
   1.359 +CodeGeneratorARM::visitAbsF(LAbsF *ins)
   1.360 +{
   1.361 +    FloatRegister input = ToFloatRegister(ins->input());
   1.362 +    JS_ASSERT(input == ToFloatRegister(ins->output()));
   1.363 +    masm.ma_vabs_f32(input, input);
   1.364 +    return true;
   1.365 +}
   1.366 +
   1.367 +bool
   1.368 +CodeGeneratorARM::visitSqrtD(LSqrtD *ins)
   1.369 +{
   1.370 +    FloatRegister input = ToFloatRegister(ins->input());
   1.371 +    FloatRegister output = ToFloatRegister(ins->output());
   1.372 +    masm.ma_vsqrt(input, output);
   1.373 +    return true;
   1.374 +}
   1.375 +
   1.376 +bool
   1.377 +CodeGeneratorARM::visitSqrtF(LSqrtF *ins)
   1.378 +{
   1.379 +    FloatRegister input = ToFloatRegister(ins->input());
   1.380 +    FloatRegister output = ToFloatRegister(ins->output());
   1.381 +    masm.ma_vsqrt_f32(input, output);
   1.382 +    return true;
   1.383 +}
   1.384 +
   1.385 +bool
   1.386 +CodeGeneratorARM::visitAddI(LAddI *ins)
   1.387 +{
   1.388 +    const LAllocation *lhs = ins->getOperand(0);
   1.389 +    const LAllocation *rhs = ins->getOperand(1);
   1.390 +    const LDefinition *dest = ins->getDef(0);
   1.391 +
   1.392 +    if (rhs->isConstant())
   1.393 +        masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
   1.394 +    else
   1.395 +        masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
   1.396 +
   1.397 +    if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
   1.398 +        return false;
   1.399 +
   1.400 +    return true;
   1.401 +}
   1.402 +
   1.403 +bool
   1.404 +CodeGeneratorARM::visitSubI(LSubI *ins)
   1.405 +{
   1.406 +    const LAllocation *lhs = ins->getOperand(0);
   1.407 +    const LAllocation *rhs = ins->getOperand(1);
   1.408 +    const LDefinition *dest = ins->getDef(0);
   1.409 +
   1.410 +    if (rhs->isConstant())
   1.411 +        masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond);
   1.412 +    else
   1.413 +        masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond);
   1.414 +
   1.415 +    if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot()))
   1.416 +        return false;
   1.417 +    return true;
   1.418 +}
   1.419 +
   1.420 +bool
   1.421 +CodeGeneratorARM::visitMulI(LMulI *ins)
   1.422 +{
   1.423 +    const LAllocation *lhs = ins->getOperand(0);
   1.424 +    const LAllocation *rhs = ins->getOperand(1);
   1.425 +    const LDefinition *dest = ins->getDef(0);
   1.426 +    MMul *mul = ins->mir();
   1.427 +    JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
   1.428 +
   1.429 +    if (rhs->isConstant()) {
   1.430 +        // Bailout when this condition is met.
   1.431 +        Assembler::Condition c = Assembler::Overflow;
   1.432 +        // Bailout on -0.0
   1.433 +        int32_t constant = ToInt32(rhs);
   1.434 +        if (mul->canBeNegativeZero() && constant <= 0) {
   1.435 +            Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
   1.436 +            masm.ma_cmp(ToRegister(lhs), Imm32(0));
   1.437 +            if (!bailoutIf(bailoutCond, ins->snapshot()))
   1.438 +                return false;
   1.439 +        }
   1.440 +        // TODO: move these to ma_mul.
   1.441 +        switch (constant) {
   1.442 +          case -1:
   1.443 +            masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond);
   1.444 +            break;
   1.445 +          case 0:
   1.446 +            masm.ma_mov(Imm32(0), ToRegister(dest));
   1.447 +            return true; // escape overflow check;
   1.448 +          case 1:
   1.449 +            // nop
   1.450 +            masm.ma_mov(ToRegister(lhs), ToRegister(dest));
   1.451 +            return true; // escape overflow check;
   1.452 +          case 2:
   1.453 +            masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond);
   1.454 +            // Overflow is handled later.
   1.455 +            break;
   1.456 +          default: {
   1.457 +            bool handled = false;
   1.458 +            if (constant > 0) {
   1.459 +                // Try shift and add sequences for a positive constant.
   1.460 +                if (!mul->canOverflow()) {
   1.461 +                    // If it cannot overflow, we can do lots of optimizations
   1.462 +                    Register src = ToRegister(lhs);
   1.463 +                    uint32_t shift = FloorLog2(constant);
   1.464 +                    uint32_t rest = constant - (1 << shift);
   1.465 +                    // See if the constant has one bit set, meaning it can be encoded as a bitshift
   1.466 +                    if ((1 << shift) == constant) {
   1.467 +                        masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
   1.468 +                        handled = true;
   1.469 +                    } else {
   1.470 +                        // If the constant cannot be encoded as (1<<C1), see if it can be encoded as
   1.471 +                        // (1<<C1) | (1<<C2), which can be computed using an add and a shift
   1.472 +                        uint32_t shift_rest = FloorLog2(rest);
   1.473 +                        if ((1u << shift_rest) == rest) {
   1.474 +                            masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest));
   1.475 +                            if (shift_rest != 0)
   1.476 +                                masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest));
   1.477 +                            handled = true;
   1.478 +                        }
   1.479 +                    }
   1.480 +                } else if (ToRegister(lhs) != ToRegister(dest)) {
   1.481 +                    // To stay on the safe side, only optimize things that are a
   1.482 +                    // power of 2.
   1.483 +
   1.484 +                    uint32_t shift = FloorLog2(constant);
   1.485 +                    if ((1 << shift) == constant) {
   1.486 +                        // dest = lhs * pow(2,shift)
   1.487 +                        masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
   1.488 +                        // At runtime, check (lhs == dest >> shift), if this does not hold,
   1.489 +                        // some bits were lost due to overflow, and the computation should
   1.490 +                        // be resumed as a double.
   1.491 +                        masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
   1.492 +                        c = Assembler::NotEqual;
   1.493 +                        handled = true;
   1.494 +                    }
   1.495 +                }
   1.496 +            }
   1.497 +
   1.498 +            if (!handled) {
   1.499 +                if (mul->canOverflow())
   1.500 +                    c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
   1.501 +                else
   1.502 +                    masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
   1.503 +            }
   1.504 +          }
   1.505 +        }
   1.506 +        // Bailout on overflow
   1.507 +        if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
   1.508 +            return false;
   1.509 +    } else {
   1.510 +        Assembler::Condition c = Assembler::Overflow;
   1.511 +
   1.512 +        //masm.imull(ToOperand(rhs), ToRegister(lhs));
   1.513 +        if (mul->canOverflow())
   1.514 +            c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
   1.515 +        else
   1.516 +            masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
   1.517 +
   1.518 +        // Bailout on overflow
   1.519 +        if (mul->canOverflow() && !bailoutIf(c, ins->snapshot()))
   1.520 +            return false;
   1.521 +
   1.522 +        if (mul->canBeNegativeZero()) {
   1.523 +            Label done;
   1.524 +            masm.ma_cmp(ToRegister(dest), Imm32(0));
   1.525 +            masm.ma_b(&done, Assembler::NotEqual);
   1.526 +
   1.527 +            // Result is -0 if lhs or rhs is negative.
   1.528 +            masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
   1.529 +            if (!bailoutIf(Assembler::Signed, ins->snapshot()))
   1.530 +                return false;
   1.531 +
   1.532 +            masm.bind(&done);
   1.533 +        }
   1.534 +    }
   1.535 +
   1.536 +    return true;
   1.537 +}
   1.538 +
   1.539 +bool
   1.540 +CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register output,
   1.541 +                             LSnapshot *snapshot, Label &done)
   1.542 +{
   1.543 +    if (mir->canBeNegativeOverflow()) {
   1.544 +        // Handle INT32_MIN / -1;
   1.545 +        // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
   1.546 +        masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN
   1.547 +        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1
   1.548 +        if (mir->canTruncateOverflow()) {
   1.549 +            // (-INT32_MIN)|0 = INT32_MIN
   1.550 +            Label skip;
   1.551 +            masm.ma_b(&skip, Assembler::NotEqual);
   1.552 +            masm.ma_mov(Imm32(INT32_MIN), output);
   1.553 +            masm.ma_b(&done);
   1.554 +            masm.bind(&skip);
   1.555 +        } else {
   1.556 +            JS_ASSERT(mir->fallible());
   1.557 +            if (!bailoutIf(Assembler::Equal, snapshot))
   1.558 +                return false;
   1.559 +        }
   1.560 +    }
   1.561 +
   1.562 +    // Handle divide by zero.
   1.563 +    if (mir->canBeDivideByZero()) {
   1.564 +        masm.ma_cmp(rhs, Imm32(0));
   1.565 +        if (mir->canTruncateInfinities()) {
   1.566 +            // Infinity|0 == 0
   1.567 +            Label skip;
   1.568 +            masm.ma_b(&skip, Assembler::NotEqual);
   1.569 +            masm.ma_mov(Imm32(0), output);
   1.570 +            masm.ma_b(&done);
   1.571 +            masm.bind(&skip);
   1.572 +        } else {
   1.573 +            JS_ASSERT(mir->fallible());
   1.574 +            if (!bailoutIf(Assembler::Equal, snapshot))
   1.575 +                return false;
   1.576 +        }
   1.577 +    }
   1.578 +
   1.579 +    // Handle negative 0.
   1.580 +    if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
   1.581 +        Label nonzero;
   1.582 +        masm.ma_cmp(lhs, Imm32(0));
   1.583 +        masm.ma_b(&nonzero, Assembler::NotEqual);
   1.584 +        masm.ma_cmp(rhs, Imm32(0));
   1.585 +        JS_ASSERT(mir->fallible());
   1.586 +        if (!bailoutIf(Assembler::LessThan, snapshot))
   1.587 +            return false;
   1.588 +        masm.bind(&nonzero);
   1.589 +    }
   1.590 +
   1.591 +    return true;
   1.592 +}
   1.593 +
   1.594 +bool
   1.595 +CodeGeneratorARM::visitDivI(LDivI *ins)
   1.596 +{
   1.597 +    // Extract the registers from this instruction
   1.598 +    Register lhs = ToRegister(ins->lhs());
   1.599 +    Register rhs = ToRegister(ins->rhs());
   1.600 +    Register temp = ToRegister(ins->getTemp(0));
   1.601 +    Register output = ToRegister(ins->output());
   1.602 +    MDiv *mir = ins->mir();
   1.603 +
   1.604 +    Label done;
   1.605 +    if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
   1.606 +        return false;
   1.607 +
   1.608 +    if (mir->canTruncateRemainder()) {
   1.609 +        masm.ma_sdiv(lhs, rhs, output);
   1.610 +    } else {
   1.611 +        masm.ma_sdiv(lhs, rhs, ScratchRegister);
   1.612 +        masm.ma_mul(ScratchRegister, rhs, temp);
   1.613 +        masm.ma_cmp(lhs, temp);
   1.614 +        if (!bailoutIf(Assembler::NotEqual, ins->snapshot()))
   1.615 +            return false;
   1.616 +        masm.ma_mov(ScratchRegister, output);
   1.617 +    }
   1.618 +
   1.619 +    masm.bind(&done);
   1.620 +
   1.621 +    return true;
   1.622 +}
   1.623 +
   1.624 +extern "C" {
   1.625 +    extern int64_t __aeabi_idivmod(int,int);
   1.626 +    extern int64_t __aeabi_uidivmod(int,int);
   1.627 +}
   1.628 +
   1.629 +bool
   1.630 +CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins)
   1.631 +{
   1.632 +    // Extract the registers from this instruction
   1.633 +    Register lhs = ToRegister(ins->lhs());
   1.634 +    Register rhs = ToRegister(ins->rhs());
   1.635 +    Register output = ToRegister(ins->output());
   1.636 +    MDiv *mir = ins->mir();
   1.637 +
   1.638 +    Label done;
   1.639 +    if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done))
   1.640 +        return false;
   1.641 +
   1.642 +    masm.setupAlignedABICall(2);
   1.643 +    masm.passABIArg(lhs);
   1.644 +    masm.passABIArg(rhs);
   1.645 +    if (gen->compilingAsmJS())
   1.646 +        masm.callWithABI(AsmJSImm_aeabi_idivmod);
   1.647 +    else
   1.648 +        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
   1.649 +    // idivmod returns the quotient in r0, and the remainder in r1.
   1.650 +    if (!mir->canTruncateRemainder()) {
   1.651 +        JS_ASSERT(mir->fallible());
   1.652 +        masm.ma_cmp(r1, Imm32(0));
   1.653 +        if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
   1.654 +            return false;
   1.655 +    }
   1.656 +
   1.657 +    masm.bind(&done);
   1.658 +
   1.659 +    return true;
   1.660 +}
   1.661 +
   1.662 +bool
   1.663 +CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI *ins)
   1.664 +{
   1.665 +    Register lhs = ToRegister(ins->numerator());
   1.666 +    Register output = ToRegister(ins->output());
   1.667 +    int32_t shift = ins->shift();
   1.668 +
   1.669 +    if (shift != 0) {
   1.670 +        MDiv *mir = ins->mir();
   1.671 +        if (!mir->isTruncated()) {
   1.672 +            // If the remainder is != 0, bailout since this must be a double.
   1.673 +            masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCond);
   1.674 +            if (!bailoutIf(Assembler::NonZero, ins->snapshot()))
   1.675 +                return false;
   1.676 +        }
   1.677 +
   1.678 +        if (!mir->canBeNegativeDividend()) {
   1.679 +            // Numerator is unsigned, so needs no adjusting. Do the shift.
   1.680 +            masm.as_mov(output, asr(lhs, shift));
   1.681 +            return true;
   1.682 +        }
   1.683 +
   1.684 +        // Adjust the value so that shifting produces a correctly rounded result
   1.685 +        // when the numerator is negative. See 10-1 "Signed Division by a Known
   1.686 +        // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
   1.687 +        if (shift > 1) {
   1.688 +            masm.as_mov(ScratchRegister, asr(lhs, 31));
   1.689 +            masm.as_add(ScratchRegister, lhs, lsr(ScratchRegister, 32 - shift));
   1.690 +        } else
   1.691 +            masm.as_add(ScratchRegister, lhs, lsr(lhs, 32 - shift));
   1.692 +
   1.693 +        // Do the shift.
   1.694 +        masm.as_mov(output, asr(ScratchRegister, shift));
   1.695 +    } else {
   1.696 +        masm.ma_mov(lhs, output);
   1.697 +    }
   1.698 +
   1.699 +    return true;
   1.700 +}
   1.701 +
   1.702 +bool
   1.703 +CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output,
   1.704 +                             LSnapshot *snapshot, Label &done)
   1.705 +{
   1.706 +    // 0/X (with X < 0) is bad because both of these values *should* be doubles, and
   1.707 +    // the result should be -0.0, which cannot be represented in integers.
   1.708 +    // X/0 is bad because it will give garbage (or abort), when it should give
   1.709 +    // either \infty, -\infty or NAN.
   1.710 +
   1.711 +    // Prevent 0 / X (with X < 0) and X / 0
   1.712 +    // testing X / Y.  Compare Y with 0.
   1.713 +    // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
   1.714 +    // If (Y < 0), then we compare X with 0, and bail if X == 0
   1.715 +    // If (Y == 0), then we simply want to bail.  Since this does not set
   1.716 +    // the flags necessary for LT to trigger, we don't test X, and take the
   1.717 +    // bailout because the EQ flag is set.
   1.718 +    // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout.
   1.719 +    if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
   1.720 +        masm.ma_cmp(rhs, Imm32(0));
   1.721 +        masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
   1.722 +        if (mir->isTruncated()) {
   1.723 +            // NaN|0 == 0 and (0 % -X)|0 == 0
   1.724 +            Label skip;
   1.725 +            masm.ma_b(&skip, Assembler::NotEqual);
   1.726 +            masm.ma_mov(Imm32(0), output);
   1.727 +            masm.ma_b(&done);
   1.728 +            masm.bind(&skip);
   1.729 +        } else {
   1.730 +            JS_ASSERT(mir->fallible());
   1.731 +            if (!bailoutIf(Assembler::Equal, snapshot))
   1.732 +                return false;
   1.733 +        }
   1.734 +    }
   1.735 +
   1.736 +    return true;
   1.737 +}
   1.738 +
   1.739 +bool
   1.740 +CodeGeneratorARM::visitModI(LModI *ins)
   1.741 +{
   1.742 +    Register lhs = ToRegister(ins->lhs());
   1.743 +    Register rhs = ToRegister(ins->rhs());
   1.744 +    Register output = ToRegister(ins->output());
   1.745 +    Register callTemp = ToRegister(ins->callTemp());
   1.746 +    MMod *mir = ins->mir();
   1.747 +
   1.748 +    // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
   1.749 +    masm.ma_mov(lhs, callTemp);
   1.750 +
   1.751 +    Label done;
   1.752 +    if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
   1.753 +        return false;
   1.754 +
   1.755 +    masm.ma_smod(lhs, rhs, output);
   1.756 +
   1.757 +    // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
   1.758 +    if (mir->canBeNegativeDividend()) {
   1.759 +        if (mir->isTruncated()) {
   1.760 +            // -0.0|0 == 0
   1.761 +        } else {
   1.762 +            JS_ASSERT(mir->fallible());
   1.763 +            // See if X < 0
   1.764 +            masm.ma_cmp(output, Imm32(0));
   1.765 +            masm.ma_b(&done, Assembler::NotEqual);
   1.766 +            masm.ma_cmp(callTemp, Imm32(0));
   1.767 +            if (!bailoutIf(Assembler::Signed, ins->snapshot()))
   1.768 +                return false;
   1.769 +        }
   1.770 +    }
   1.771 +
   1.772 +    masm.bind(&done);
   1.773 +    return true;
   1.774 +}
   1.775 +
   1.776 +bool
   1.777 +CodeGeneratorARM::visitSoftModI(LSoftModI *ins)
   1.778 +{
   1.779 +    // Extract the registers from this instruction
   1.780 +    Register lhs = ToRegister(ins->lhs());
   1.781 +    Register rhs = ToRegister(ins->rhs());
   1.782 +    Register output = ToRegister(ins->output());
   1.783 +    Register callTemp = ToRegister(ins->callTemp());
   1.784 +    MMod *mir = ins->mir();
   1.785 +    Label done;
   1.786 +
   1.787 +    // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0.
   1.788 +    JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
   1.789 +    masm.ma_mov(lhs, callTemp);
   1.790 +
   1.791 +    // Prevent INT_MIN % -1;
   1.792 +    // The integer division will give INT_MIN, but we want -(double)INT_MIN.
   1.793 +    if (mir->canBeNegativeDividend()) {
   1.794 +        masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN
   1.795 +        masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1
   1.796 +        if (mir->isTruncated()) {
   1.797 +            // (INT_MIN % -1)|0 == 0
   1.798 +            Label skip;
   1.799 +            masm.ma_b(&skip, Assembler::NotEqual);
   1.800 +            masm.ma_mov(Imm32(0), output);
   1.801 +            masm.ma_b(&done);
   1.802 +            masm.bind(&skip);
   1.803 +        } else {
   1.804 +            JS_ASSERT(mir->fallible());
   1.805 +            if (!bailoutIf(Assembler::Equal, ins->snapshot()))
   1.806 +                return false;
   1.807 +        }
   1.808 +    }
   1.809 +
   1.810 +    if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done))
   1.811 +        return false;
   1.812 +
   1.813 +    masm.setupAlignedABICall(2);
   1.814 +    masm.passABIArg(lhs);
   1.815 +    masm.passABIArg(rhs);
   1.816 +    if (gen->compilingAsmJS())
   1.817 +        masm.callWithABI(AsmJSImm_aeabi_idivmod);
   1.818 +    else
   1.819 +        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod));
   1.820 +
   1.821 +    // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
   1.822 +    if (mir->canBeNegativeDividend()) {
   1.823 +        if (mir->isTruncated()) {
   1.824 +            // -0.0|0 == 0
   1.825 +        } else {
   1.826 +            JS_ASSERT(mir->fallible());
   1.827 +            // See if X < 0
   1.828 +            masm.ma_cmp(r1, Imm32(0));
   1.829 +            masm.ma_b(&done, Assembler::NotEqual);
   1.830 +            masm.ma_cmp(callTemp, Imm32(0));
   1.831 +            if (!bailoutIf(Assembler::Signed, ins->snapshot()))
   1.832 +                return false;
   1.833 +        }
   1.834 +    }
   1.835 +    masm.bind(&done);
   1.836 +    return true;
   1.837 +}
   1.838 +
   1.839 +bool
   1.840 +CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins)
   1.841 +{
   1.842 +    Register in = ToRegister(ins->getOperand(0));
   1.843 +    Register out = ToRegister(ins->getDef(0));
   1.844 +    MMod *mir = ins->mir();
   1.845 +    Label fin;
   1.846 +    // bug 739870, jbramley has a different sequence that may help with speed here
   1.847 +    masm.ma_mov(in, out, SetCond);
   1.848 +    masm.ma_b(&fin, Assembler::Zero);
   1.849 +    masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed);
   1.850 +    masm.ma_and(Imm32((1<<ins->shift())-1), out);
   1.851 +    masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed);
   1.852 +    if (mir->canBeNegativeDividend()) {
   1.853 +        if (!mir->isTruncated()) {
   1.854 +            JS_ASSERT(mir->fallible());
   1.855 +            if (!bailoutIf(Assembler::Zero, ins->snapshot()))
   1.856 +                return false;
   1.857 +        } else {
   1.858 +            // -0|0 == 0
   1.859 +        }
   1.860 +    }
   1.861 +    masm.bind(&fin);
   1.862 +    return true;
   1.863 +}
   1.864 +
   1.865 +bool
   1.866 +CodeGeneratorARM::visitModMaskI(LModMaskI *ins)
   1.867 +{
   1.868 +    Register src = ToRegister(ins->getOperand(0));
   1.869 +    Register dest = ToRegister(ins->getDef(0));
   1.870 +    Register tmp1 = ToRegister(ins->getTemp(0));
   1.871 +    Register tmp2 = ToRegister(ins->getTemp(1));
   1.872 +    MMod *mir = ins->mir();
   1.873 +    masm.ma_mod_mask(src, dest, tmp1, tmp2, ins->shift());
   1.874 +    if (mir->canBeNegativeDividend()) {
   1.875 +        if (!mir->isTruncated()) {
   1.876 +            JS_ASSERT(mir->fallible());
   1.877 +            if (!bailoutIf(Assembler::Zero, ins->snapshot()))
   1.878 +                return false;
   1.879 +        } else {
   1.880 +            // -0|0 == 0
   1.881 +        }
   1.882 +    }
   1.883 +    return true;
   1.884 +}
   1.885 +bool
   1.886 +CodeGeneratorARM::visitBitNotI(LBitNotI *ins)
   1.887 +{
   1.888 +    const LAllocation *input = ins->getOperand(0);
   1.889 +    const LDefinition *dest = ins->getDef(0);
   1.890 +    // this will not actually be true on arm.
   1.891 +    // We can not an imm8m in order to get a wider range
   1.892 +    // of numbers
   1.893 +    JS_ASSERT(!input->isConstant());
   1.894 +
   1.895 +    masm.ma_mvn(ToRegister(input), ToRegister(dest));
   1.896 +    return true;
   1.897 +}
   1.898 +
   1.899 +bool
   1.900 +CodeGeneratorARM::visitBitOpI(LBitOpI *ins)
   1.901 +{
   1.902 +    const LAllocation *lhs = ins->getOperand(0);
   1.903 +    const LAllocation *rhs = ins->getOperand(1);
   1.904 +    const LDefinition *dest = ins->getDef(0);
   1.905 +    // all of these bitops should be either imm32's, or integer registers.
   1.906 +    switch (ins->bitop()) {
   1.907 +      case JSOP_BITOR:
   1.908 +        if (rhs->isConstant())
   1.909 +            masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
   1.910 +        else
   1.911 +            masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
   1.912 +        break;
   1.913 +      case JSOP_BITXOR:
   1.914 +        if (rhs->isConstant())
   1.915 +            masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
   1.916 +        else
   1.917 +            masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
   1.918 +        break;
   1.919 +      case JSOP_BITAND:
   1.920 +        if (rhs->isConstant())
   1.921 +            masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
   1.922 +        else
   1.923 +            masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
   1.924 +        break;
   1.925 +      default:
   1.926 +        MOZ_ASSUME_UNREACHABLE("unexpected binary opcode");
   1.927 +    }
   1.928 +
   1.929 +    return true;
   1.930 +}
   1.931 +
   1.932 +bool
   1.933 +CodeGeneratorARM::visitShiftI(LShiftI *ins)
   1.934 +{
   1.935 +    Register lhs = ToRegister(ins->lhs());
   1.936 +    const LAllocation *rhs = ins->rhs();
   1.937 +    Register dest = ToRegister(ins->output());
   1.938 +
   1.939 +    if (rhs->isConstant()) {
   1.940 +        int32_t shift = ToInt32(rhs) & 0x1F;
   1.941 +        switch (ins->bitop()) {
   1.942 +          case JSOP_LSH:
   1.943 +            if (shift)
   1.944 +                masm.ma_lsl(Imm32(shift), lhs, dest);
   1.945 +            else
   1.946 +                masm.ma_mov(lhs, dest);
   1.947 +            break;
   1.948 +          case JSOP_RSH:
   1.949 +            if (shift)
   1.950 +                masm.ma_asr(Imm32(shift), lhs, dest);
   1.951 +            else
   1.952 +                masm.ma_mov(lhs, dest);
   1.953 +            break;
   1.954 +          case JSOP_URSH:
   1.955 +            if (shift) {
   1.956 +                masm.ma_lsr(Imm32(shift), lhs, dest);
   1.957 +            } else {
   1.958 +                // x >>> 0 can overflow.
   1.959 +                masm.ma_mov(lhs, dest);
   1.960 +                if (ins->mir()->toUrsh()->fallible()) {
   1.961 +                    masm.ma_cmp(dest, Imm32(0));
   1.962 +                    if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
   1.963 +                        return false;
   1.964 +                }
   1.965 +            }
   1.966 +            break;
   1.967 +          default:
   1.968 +            MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
   1.969 +        }
   1.970 +    } else {
   1.971 +        // The shift amounts should be AND'ed into the 0-31 range since arm
   1.972 +        // shifts by the lower byte of the register (it will attempt to shift
   1.973 +        // by 250 if you ask it to).
   1.974 +        masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
   1.975 +
   1.976 +        switch (ins->bitop()) {
   1.977 +          case JSOP_LSH:
   1.978 +            masm.ma_lsl(dest, lhs, dest);
   1.979 +            break;
   1.980 +          case JSOP_RSH:
   1.981 +            masm.ma_asr(dest, lhs, dest);
   1.982 +            break;
   1.983 +          case JSOP_URSH:
   1.984 +            masm.ma_lsr(dest, lhs, dest);
   1.985 +            if (ins->mir()->toUrsh()->fallible()) {
   1.986 +                // x >>> 0 can overflow.
   1.987 +                masm.ma_cmp(dest, Imm32(0));
   1.988 +                if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
   1.989 +                    return false;
   1.990 +            }
   1.991 +            break;
   1.992 +          default:
   1.993 +            MOZ_ASSUME_UNREACHABLE("Unexpected shift op");
   1.994 +        }
   1.995 +    }
   1.996 +
   1.997 +    return true;
   1.998 +}
   1.999 +
  1.1000 +bool
  1.1001 +CodeGeneratorARM::visitUrshD(LUrshD *ins)
  1.1002 +{
  1.1003 +    Register lhs = ToRegister(ins->lhs());
  1.1004 +    Register temp = ToRegister(ins->temp());
  1.1005 +
  1.1006 +    const LAllocation *rhs = ins->rhs();
  1.1007 +    FloatRegister out = ToFloatRegister(ins->output());
  1.1008 +
  1.1009 +    if (rhs->isConstant()) {
  1.1010 +        int32_t shift = ToInt32(rhs) & 0x1F;
  1.1011 +        if (shift)
  1.1012 +            masm.ma_lsr(Imm32(shift), lhs, temp);
  1.1013 +        else
  1.1014 +            masm.ma_mov(lhs, temp);
  1.1015 +    } else {
  1.1016 +        masm.ma_and(Imm32(0x1F), ToRegister(rhs), temp);
  1.1017 +        masm.ma_lsr(temp, lhs, temp);
  1.1018 +    }
  1.1019 +
  1.1020 +    masm.convertUInt32ToDouble(temp, out);
  1.1021 +    return true;
  1.1022 +}
  1.1023 +
  1.1024 +bool
  1.1025 +CodeGeneratorARM::visitPowHalfD(LPowHalfD *ins)
  1.1026 +{
  1.1027 +    FloatRegister input = ToFloatRegister(ins->input());
  1.1028 +    FloatRegister output = ToFloatRegister(ins->output());
  1.1029 +
  1.1030 +    Label done;
  1.1031 +
  1.1032 +    // Masm.pow(-Infinity, 0.5) == Infinity.
  1.1033 +    masm.ma_vimm(NegativeInfinity<double>(), ScratchFloatReg);
  1.1034 +    masm.compareDouble(input, ScratchFloatReg);
  1.1035 +    masm.ma_vneg(ScratchFloatReg, output, Assembler::Equal);
  1.1036 +    masm.ma_b(&done, Assembler::Equal);
  1.1037 +
  1.1038 +    // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
  1.1039 +    masm.ma_vimm(0.0, ScratchFloatReg);
  1.1040 +    masm.ma_vadd(ScratchFloatReg, input, output);
  1.1041 +    masm.ma_vsqrt(output, output);
  1.1042 +
  1.1043 +    masm.bind(&done);
  1.1044 +    return true;
  1.1045 +}
  1.1046 +
  1.1047 +MoveOperand
  1.1048 +CodeGeneratorARM::toMoveOperand(const LAllocation *a) const
  1.1049 +{
  1.1050 +    if (a->isGeneralReg())
  1.1051 +        return MoveOperand(ToRegister(a));
  1.1052 +    if (a->isFloatReg())
  1.1053 +        return MoveOperand(ToFloatRegister(a));
  1.1054 +    JS_ASSERT((ToStackOffset(a) & 3) == 0);
  1.1055 +    int32_t offset = ToStackOffset(a);
  1.1056 +
  1.1057 +    // The way the stack slots work, we assume that everything from depth == 0 downwards is writable
  1.1058 +    // however, since our frame is included in this, ensure that the frame gets skipped
  1.1059 +    if (gen->compilingAsmJS())
  1.1060 +        offset -= AlignmentMidPrologue;
  1.1061 +
  1.1062 +    return MoveOperand(StackPointer, offset);
  1.1063 +}
  1.1064 +
  1.1065 +class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM>
  1.1066 +{
  1.1067 +    MTableSwitch *mir_;
  1.1068 +    Vector<CodeLabel, 8, IonAllocPolicy> codeLabels_;
  1.1069 +
  1.1070 +    bool accept(CodeGeneratorARM *codegen) {
  1.1071 +        return codegen->visitOutOfLineTableSwitch(this);
  1.1072 +    }
  1.1073 +
  1.1074 +  public:
  1.1075 +    OutOfLineTableSwitch(TempAllocator &alloc, MTableSwitch *mir)
  1.1076 +      : mir_(mir),
  1.1077 +        codeLabels_(alloc)
  1.1078 +    {}
  1.1079 +
  1.1080 +    MTableSwitch *mir() const {
  1.1081 +        return mir_;
  1.1082 +    }
  1.1083 +
  1.1084 +    bool addCodeLabel(CodeLabel label) {
  1.1085 +        return codeLabels_.append(label);
  1.1086 +    }
  1.1087 +    CodeLabel codeLabel(unsigned i) {
  1.1088 +        return codeLabels_[i];
  1.1089 +    }
  1.1090 +};
  1.1091 +
  1.1092 +bool
  1.1093 +CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool)
  1.1094 +{
  1.1095 +    MTableSwitch *mir = ool->mir();
  1.1096 +
  1.1097 +    size_t numCases = mir->numCases();
  1.1098 +    for (size_t i = 0; i < numCases; i++) {
  1.1099 +        LBlock *caseblock = mir->getCase(numCases - 1 - i)->lir();
  1.1100 +        Label *caseheader = caseblock->label();
  1.1101 +        uint32_t caseoffset = caseheader->offset();
  1.1102 +
  1.1103 +        // The entries of the jump table need to be absolute addresses and thus
  1.1104 +        // must be patched after codegen is finished.
  1.1105 +        CodeLabel cl = ool->codeLabel(i);
  1.1106 +        cl.src()->bind(caseoffset);
  1.1107 +        if (!masm.addCodeLabel(cl))
  1.1108 +            return false;
  1.1109 +    }
  1.1110 +
  1.1111 +    return true;
  1.1112 +}
  1.1113 +
  1.1114 +bool
  1.1115 +CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index,
  1.1116 +                                          const Register &base)
  1.1117 +{
  1.1118 +    // the code generated by this is utter hax.
  1.1119 +    // the end result looks something like:
  1.1120 +    // SUBS index, input, #base
  1.1121 +    // RSBSPL index, index, #max
  1.1122 +    // LDRPL pc, pc, index lsl 2
  1.1123 +    // B default
  1.1124 +
  1.1125 +    // If the range of targets in N through M, we first subtract off the lowest
  1.1126 +    // case (N), which both shifts the arguments into the range 0 to (M-N) with
  1.1127 +    // and sets the MInus flag if the argument was out of range on the low end.
  1.1128 +
  1.1129 +    // Then we a reverse subtract with the size of the jump table, which will
  1.1130 +    // reverse the order of range (It is size through 0, rather than 0 through
  1.1131 +    // size).  The main purpose of this is that we set the same flag as the lower
  1.1132 +    // bound check for the upper bound check.  Lastly, we do this conditionally
  1.1133 +    // on the previous check succeeding.
  1.1134 +
  1.1135 +    // Then we conditionally load the pc offset by the (reversed) index (times
  1.1136 +    // the address size) into the pc, which branches to the correct case.
  1.1137 +    // NOTE: when we go to read the pc, the value that we get back is the pc of
  1.1138 +    // the current instruction *PLUS 8*.  This means that ldr foo, [pc, +0]
  1.1139 +    // reads $pc+8.  In other words, there is an empty word after the branch into
  1.1140 +    // the switch table before the table actually starts.  Since the only other
  1.1141 +    // unhandled case is the default case (both out of range high and out of range low)
  1.1142 +    // I then insert a branch to default case into the extra slot, which ensures
  1.1143 +    // we don't attempt to execute the address table.
  1.1144 +    Label *defaultcase = mir->getDefault()->lir()->label();
  1.1145 +
  1.1146 +    int32_t cases = mir->numCases();
  1.1147 +    // Lower value with low value
  1.1148 +    masm.ma_sub(index, Imm32(mir->low()), index, SetCond);
  1.1149 +    masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned);
  1.1150 +    AutoForbidPools afp(&masm);
  1.1151 +    masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned);
  1.1152 +    masm.ma_b(defaultcase);
  1.1153 +
  1.1154 +    // To fill in the CodeLabels for the case entries, we need to first
  1.1155 +    // generate the case entries (we don't yet know their offsets in the
  1.1156 +    // instruction stream).
  1.1157 +    OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
  1.1158 +    for (int32_t i = 0; i < cases; i++) {
  1.1159 +        CodeLabel cl;
  1.1160 +        masm.writeCodePointer(cl.dest());
  1.1161 +        if (!ool->addCodeLabel(cl))
  1.1162 +            return false;
  1.1163 +    }
  1.1164 +    if (!addOutOfLineCode(ool))
  1.1165 +        return false;
  1.1166 +
  1.1167 +    return true;
  1.1168 +}
  1.1169 +
  1.1170 +bool
  1.1171 +CodeGeneratorARM::visitMathD(LMathD *math)
  1.1172 +{
  1.1173 +    const LAllocation *src1 = math->getOperand(0);
  1.1174 +    const LAllocation *src2 = math->getOperand(1);
  1.1175 +    const LDefinition *output = math->getDef(0);
  1.1176 +
  1.1177 +    switch (math->jsop()) {
  1.1178 +      case JSOP_ADD:
  1.1179 +        masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1180 +        break;
  1.1181 +      case JSOP_SUB:
  1.1182 +        masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1183 +        break;
  1.1184 +      case JSOP_MUL:
  1.1185 +        masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1186 +        break;
  1.1187 +      case JSOP_DIV:
  1.1188 +        masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1189 +        break;
  1.1190 +      default:
  1.1191 +        MOZ_ASSUME_UNREACHABLE("unexpected opcode");
  1.1192 +    }
  1.1193 +    return true;
  1.1194 +}
  1.1195 +
  1.1196 +bool
  1.1197 +CodeGeneratorARM::visitMathF(LMathF *math)
  1.1198 +{
  1.1199 +    const LAllocation *src1 = math->getOperand(0);
  1.1200 +    const LAllocation *src2 = math->getOperand(1);
  1.1201 +    const LDefinition *output = math->getDef(0);
  1.1202 +
  1.1203 +    switch (math->jsop()) {
  1.1204 +      case JSOP_ADD:
  1.1205 +        masm.ma_vadd_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1206 +        break;
  1.1207 +      case JSOP_SUB:
  1.1208 +        masm.ma_vsub_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1209 +        break;
  1.1210 +      case JSOP_MUL:
  1.1211 +        masm.ma_vmul_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1212 +        break;
  1.1213 +      case JSOP_DIV:
  1.1214 +        masm.ma_vdiv_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output));
  1.1215 +        break;
  1.1216 +      default:
  1.1217 +        MOZ_ASSUME_UNREACHABLE("unexpected opcode");
  1.1218 +    }
  1.1219 +    return true;
  1.1220 +}
  1.1221 +
  1.1222 +bool
  1.1223 +CodeGeneratorARM::visitFloor(LFloor *lir)
  1.1224 +{
  1.1225 +    FloatRegister input = ToFloatRegister(lir->input());
  1.1226 +    Register output = ToRegister(lir->output());
  1.1227 +    Label bail;
  1.1228 +    masm.floor(input, output, &bail);
  1.1229 +    if (!bailoutFrom(&bail, lir->snapshot()))
  1.1230 +        return false;
  1.1231 +    return true;
  1.1232 +}
  1.1233 +
  1.1234 +bool
  1.1235 +CodeGeneratorARM::visitFloorF(LFloorF *lir)
  1.1236 +{
  1.1237 +    FloatRegister input = ToFloatRegister(lir->input());
  1.1238 +    Register output = ToRegister(lir->output());
  1.1239 +    Label bail;
  1.1240 +    masm.floorf(input, output, &bail);
  1.1241 +    if (!bailoutFrom(&bail, lir->snapshot()))
  1.1242 +        return false;
  1.1243 +    return true;
  1.1244 +}
  1.1245 +
  1.1246 +bool
  1.1247 +CodeGeneratorARM::visitRound(LRound *lir)
  1.1248 +{
  1.1249 +    FloatRegister input = ToFloatRegister(lir->input());
  1.1250 +    Register output = ToRegister(lir->output());
  1.1251 +    FloatRegister tmp = ToFloatRegister(lir->temp());
  1.1252 +    Label bail;
  1.1253 +    // Output is either correct, or clamped.  All -0 cases have been translated to a clamped
  1.1254 +    // case.a
  1.1255 +    masm.round(input, output, &bail, tmp);
  1.1256 +    if (!bailoutFrom(&bail, lir->snapshot()))
  1.1257 +        return false;
  1.1258 +    return true;
  1.1259 +}
  1.1260 +
  1.1261 +bool
  1.1262 +CodeGeneratorARM::visitRoundF(LRoundF *lir)
  1.1263 +{
  1.1264 +    FloatRegister input = ToFloatRegister(lir->input());
  1.1265 +    Register output = ToRegister(lir->output());
  1.1266 +    FloatRegister tmp = ToFloatRegister(lir->temp());
  1.1267 +    Label bail;
  1.1268 +    // Output is either correct, or clamped.  All -0 cases have been translated to a clamped
  1.1269 +    // case.a
  1.1270 +    masm.roundf(input, output, &bail, tmp);
  1.1271 +    if (!bailoutFrom(&bail, lir->snapshot()))
  1.1272 +        return false;
  1.1273 +    return true;
  1.1274 +}
  1.1275 +
  1.1276 +void
  1.1277 +CodeGeneratorARM::emitRoundDouble(const FloatRegister &src, const Register &dest, Label *fail)
  1.1278 +{
  1.1279 +    masm.ma_vcvt_F64_I32(src, ScratchFloatReg);
  1.1280 +    masm.ma_vxfer(ScratchFloatReg, dest);
  1.1281 +    masm.ma_cmp(dest, Imm32(0x7fffffff));
  1.1282 +    masm.ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
  1.1283 +    masm.ma_b(fail, Assembler::Equal);
  1.1284 +}
  1.1285 +
  1.1286 +bool
  1.1287 +CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32 *ins)
  1.1288 +{
  1.1289 +    return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()));
  1.1290 +}
  1.1291 +
  1.1292 +bool
  1.1293 +CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32 *ins)
  1.1294 +{
  1.1295 +    return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()));
  1.1296 +}
  1.1297 +
  1.1298 +static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
  1.1299 +
  1.1300 +FrameSizeClass
  1.1301 +FrameSizeClass::FromDepth(uint32_t frameDepth)
  1.1302 +{
  1.1303 +    for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
  1.1304 +        if (frameDepth < FrameSizes[i])
  1.1305 +            return FrameSizeClass(i);
  1.1306 +    }
  1.1307 +
  1.1308 +    return FrameSizeClass::None();
  1.1309 +}
  1.1310 +
  1.1311 +FrameSizeClass
  1.1312 +FrameSizeClass::ClassLimit()
  1.1313 +{
  1.1314 +    return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
  1.1315 +}
  1.1316 +
  1.1317 +uint32_t
  1.1318 +FrameSizeClass::frameSize() const
  1.1319 +{
  1.1320 +    JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
  1.1321 +    JS_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
  1.1322 +
  1.1323 +    return FrameSizes[class_];
  1.1324 +}
  1.1325 +
  1.1326 +ValueOperand
  1.1327 +CodeGeneratorARM::ToValue(LInstruction *ins, size_t pos)
  1.1328 +{
  1.1329 +    Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
  1.1330 +    Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
  1.1331 +    return ValueOperand(typeReg, payloadReg);
  1.1332 +}
  1.1333 +
  1.1334 +ValueOperand
  1.1335 +CodeGeneratorARM::ToOutValue(LInstruction *ins)
  1.1336 +{
  1.1337 +    Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
  1.1338 +    Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
  1.1339 +    return ValueOperand(typeReg, payloadReg);
  1.1340 +}
  1.1341 +
  1.1342 +ValueOperand
  1.1343 +CodeGeneratorARM::ToTempValue(LInstruction *ins, size_t pos)
  1.1344 +{
  1.1345 +    Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
  1.1346 +    Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
  1.1347 +    return ValueOperand(typeReg, payloadReg);
  1.1348 +}
  1.1349 +
  1.1350 +bool
  1.1351 +CodeGeneratorARM::visitValue(LValue *value)
  1.1352 +{
  1.1353 +    const ValueOperand out = ToOutValue(value);
  1.1354 +
  1.1355 +    masm.moveValue(value->value(), out);
  1.1356 +    return true;
  1.1357 +}
  1.1358 +
  1.1359 +bool
  1.1360 +CodeGeneratorARM::visitBox(LBox *box)
  1.1361 +{
  1.1362 +    const LDefinition *type = box->getDef(TYPE_INDEX);
  1.1363 +
  1.1364 +    JS_ASSERT(!box->getOperand(0)->isConstant());
  1.1365 +
  1.1366 +    // On x86, the input operand and the output payload have the same
  1.1367 +    // virtual register. All that needs to be written is the type tag for
  1.1368 +    // the type definition.
  1.1369 +    masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
  1.1370 +    return true;
  1.1371 +}
  1.1372 +
  1.1373 +bool
  1.1374 +CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint *box)
  1.1375 +{
  1.1376 +    const LDefinition *payload = box->getDef(PAYLOAD_INDEX);
  1.1377 +    const LDefinition *type = box->getDef(TYPE_INDEX);
  1.1378 +    const LAllocation *in = box->getOperand(0);
  1.1379 +
  1.1380 +    FloatRegister reg = ToFloatRegister(in);
  1.1381 +    if (box->type() == MIRType_Float32) {
  1.1382 +        masm.convertFloat32ToDouble(reg, ScratchFloatReg);
  1.1383 +        reg = ScratchFloatReg;
  1.1384 +    }
  1.1385 +
  1.1386 +    //masm.as_vxfer(ToRegister(payload), ToRegister(type),
  1.1387 +    //              VFPRegister(ToFloatRegister(in)), Assembler::FloatToCore);
  1.1388 +    masm.ma_vxfer(VFPRegister(reg), ToRegister(payload), ToRegister(type));
  1.1389 +    return true;
  1.1390 +}
  1.1391 +
  1.1392 +bool
  1.1393 +CodeGeneratorARM::visitUnbox(LUnbox *unbox)
  1.1394 +{
  1.1395 +    // Note that for unbox, the type and payload indexes are switched on the
  1.1396 +    // inputs.
  1.1397 +    MUnbox *mir = unbox->mir();
  1.1398 +    Register type = ToRegister(unbox->type());
  1.1399 +
  1.1400 +    if (mir->fallible()) {
  1.1401 +        masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type())));
  1.1402 +        if (!bailoutIf(Assembler::NotEqual, unbox->snapshot()))
  1.1403 +            return false;
  1.1404 +    }
  1.1405 +    return true;
  1.1406 +}
  1.1407 +
  1.1408 +bool
  1.1409 +CodeGeneratorARM::visitDouble(LDouble *ins)
  1.1410 +{
  1.1411 +
  1.1412 +    const LDefinition *out = ins->getDef(0);
  1.1413 +
  1.1414 +    masm.ma_vimm(ins->getDouble(), ToFloatRegister(out));
  1.1415 +    return true;
  1.1416 +}
  1.1417 +
  1.1418 +bool
  1.1419 +CodeGeneratorARM::visitFloat32(LFloat32 *ins)
  1.1420 +{
  1.1421 +    const LDefinition *out = ins->getDef(0);
  1.1422 +    masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
  1.1423 +    return true;
  1.1424 +}
  1.1425 +
  1.1426 +Register
  1.1427 +CodeGeneratorARM::splitTagForTest(const ValueOperand &value)
  1.1428 +{
  1.1429 +    return value.typeReg();
  1.1430 +}
  1.1431 +
  1.1432 +bool
  1.1433 +CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test)
  1.1434 +{
  1.1435 +    const LAllocation *opd = test->input();
  1.1436 +    masm.ma_vcmpz(ToFloatRegister(opd));
  1.1437 +    masm.as_vmrs(pc);
  1.1438 +
  1.1439 +    MBasicBlock *ifTrue = test->ifTrue();
  1.1440 +    MBasicBlock *ifFalse = test->ifFalse();
  1.1441 +    // If the compare set the  0 bit, then the result
  1.1442 +    // is definately false.
  1.1443 +    jumpToBlock(ifFalse, Assembler::Zero);
  1.1444 +    // it is also false if one of the operands is NAN, which is
  1.1445 +    // shown as Overflow.
  1.1446 +    jumpToBlock(ifFalse, Assembler::Overflow);
  1.1447 +    jumpToBlock(ifTrue);
  1.1448 +    return true;
  1.1449 +}
  1.1450 +
  1.1451 +bool
  1.1452 +CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test)
  1.1453 +{
  1.1454 +    const LAllocation *opd = test->input();
  1.1455 +    masm.ma_vcmpz_f32(ToFloatRegister(opd));
  1.1456 +    masm.as_vmrs(pc);
  1.1457 +
  1.1458 +    MBasicBlock *ifTrue = test->ifTrue();
  1.1459 +    MBasicBlock *ifFalse = test->ifFalse();
  1.1460 +    // If the compare set the  0 bit, then the result
  1.1461 +    // is definately false.
  1.1462 +    jumpToBlock(ifFalse, Assembler::Zero);
  1.1463 +    // it is also false if one of the operands is NAN, which is
  1.1464 +    // shown as Overflow.
  1.1465 +    jumpToBlock(ifFalse, Assembler::Overflow);
  1.1466 +    jumpToBlock(ifTrue);
  1.1467 +    return true;
  1.1468 +}
  1.1469 +
  1.1470 +bool
  1.1471 +CodeGeneratorARM::visitCompareD(LCompareD *comp)
  1.1472 +{
  1.1473 +    FloatRegister lhs = ToFloatRegister(comp->left());
  1.1474 +    FloatRegister rhs = ToFloatRegister(comp->right());
  1.1475 +
  1.1476 +    Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
  1.1477 +    masm.compareDouble(lhs, rhs);
  1.1478 +    masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
  1.1479 +    return true;
  1.1480 +}
  1.1481 +
  1.1482 +bool
  1.1483 +CodeGeneratorARM::visitCompareF(LCompareF *comp)
  1.1484 +{
  1.1485 +    FloatRegister lhs = ToFloatRegister(comp->left());
  1.1486 +    FloatRegister rhs = ToFloatRegister(comp->right());
  1.1487 +
  1.1488 +    Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
  1.1489 +    masm.compareFloat(lhs, rhs);
  1.1490 +    masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()));
  1.1491 +    return true;
  1.1492 +}
  1.1493 +
  1.1494 +bool
  1.1495 +CodeGeneratorARM::visitCompareDAndBranch(LCompareDAndBranch *comp)
  1.1496 +{
  1.1497 +    FloatRegister lhs = ToFloatRegister(comp->left());
  1.1498 +    FloatRegister rhs = ToFloatRegister(comp->right());
  1.1499 +
  1.1500 +    Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
  1.1501 +    masm.compareDouble(lhs, rhs);
  1.1502 +    emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
  1.1503 +    return true;
  1.1504 +}
  1.1505 +
  1.1506 +bool
  1.1507 +CodeGeneratorARM::visitCompareFAndBranch(LCompareFAndBranch *comp)
  1.1508 +{
  1.1509 +    FloatRegister lhs = ToFloatRegister(comp->left());
  1.1510 +    FloatRegister rhs = ToFloatRegister(comp->right());
  1.1511 +
  1.1512 +    Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
  1.1513 +    masm.compareFloat(lhs, rhs);
  1.1514 +    emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse());
  1.1515 +    return true;
  1.1516 +}
  1.1517 +
  1.1518 +bool
  1.1519 +CodeGeneratorARM::visitCompareB(LCompareB *lir)
  1.1520 +{
  1.1521 +    MCompare *mir = lir->mir();
  1.1522 +
  1.1523 +    const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
  1.1524 +    const LAllocation *rhs = lir->rhs();
  1.1525 +    const Register output = ToRegister(lir->output());
  1.1526 +
  1.1527 +    JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
  1.1528 +
  1.1529 +    Label notBoolean, done;
  1.1530 +    masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
  1.1531 +    {
  1.1532 +        if (rhs->isConstant())
  1.1533 +            masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
  1.1534 +        else
  1.1535 +            masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
  1.1536 +        masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
  1.1537 +        masm.jump(&done);
  1.1538 +    }
  1.1539 +
  1.1540 +    masm.bind(&notBoolean);
  1.1541 +    {
  1.1542 +        masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
  1.1543 +    }
  1.1544 +
  1.1545 +    masm.bind(&done);
  1.1546 +    return true;
  1.1547 +}
  1.1548 +
  1.1549 +bool
  1.1550 +CodeGeneratorARM::visitCompareBAndBranch(LCompareBAndBranch *lir)
  1.1551 +{
  1.1552 +    MCompare *mir = lir->cmpMir();
  1.1553 +    const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
  1.1554 +    const LAllocation *rhs = lir->rhs();
  1.1555 +
  1.1556 +    JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
  1.1557 +
  1.1558 +    Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
  1.1559 +    jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
  1.1560 +
  1.1561 +    if (rhs->isConstant())
  1.1562 +        masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
  1.1563 +    else
  1.1564 +        masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
  1.1565 +    emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
  1.1566 +    return true;
  1.1567 +}
  1.1568 +
  1.1569 +bool
  1.1570 +CodeGeneratorARM::visitCompareV(LCompareV *lir)
  1.1571 +{
  1.1572 +    MCompare *mir = lir->mir();
  1.1573 +    Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
  1.1574 +    const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput);
  1.1575 +    const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
  1.1576 +    const Register output = ToRegister(lir->output());
  1.1577 +
  1.1578 +    JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
  1.1579 +              mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
  1.1580 +
  1.1581 +    Label notEqual, done;
  1.1582 +    masm.cmp32(lhs.typeReg(), rhs.typeReg());
  1.1583 +    masm.j(Assembler::NotEqual, &notEqual);
  1.1584 +    {
  1.1585 +        masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
  1.1586 +        masm.emitSet(cond, output);
  1.1587 +        masm.jump(&done);
  1.1588 +    }
  1.1589 +    masm.bind(&notEqual);
  1.1590 +    {
  1.1591 +        masm.move32(Imm32(cond == Assembler::NotEqual), output);
  1.1592 +    }
  1.1593 +
  1.1594 +    masm.bind(&done);
  1.1595 +    return true;
  1.1596 +}
  1.1597 +
  1.1598 +bool
  1.1599 +CodeGeneratorARM::visitCompareVAndBranch(LCompareVAndBranch *lir)
  1.1600 +{
  1.1601 +    MCompare *mir = lir->cmpMir();
  1.1602 +    Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
  1.1603 +    const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput);
  1.1604 +    const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput);
  1.1605 +
  1.1606 +    JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
  1.1607 +              mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
  1.1608 +
  1.1609 +    MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
  1.1610 +
  1.1611 +    masm.cmp32(lhs.typeReg(), rhs.typeReg());
  1.1612 +    jumpToBlock(notEqual, Assembler::NotEqual);
  1.1613 +    masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
  1.1614 +    emitBranch(cond, lir->ifTrue(), lir->ifFalse());
  1.1615 +
  1.1616 +    return true;
  1.1617 +}
  1.1618 +
  1.1619 +bool
  1.1620 +CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch *baab)
  1.1621 +{
  1.1622 +    if (baab->right()->isConstant())
  1.1623 +        masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
  1.1624 +    else
  1.1625 +        masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right()));
  1.1626 +    emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
  1.1627 +    return true;
  1.1628 +}
  1.1629 +
  1.1630 +bool
  1.1631 +CodeGeneratorARM::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir)
  1.1632 +{
  1.1633 +    masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
  1.1634 +    return true;
  1.1635 +}
  1.1636 +
  1.1637 +bool
  1.1638 +CodeGeneratorARM::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir)
  1.1639 +{
  1.1640 +    masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
  1.1641 +    return true;
  1.1642 +}
  1.1643 +
  1.1644 +bool
  1.1645 +CodeGeneratorARM::visitNotI(LNotI *ins)
  1.1646 +{
  1.1647 +    // It is hard to optimize !x, so just do it the basic way for now.
  1.1648 +    masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
  1.1649 +    masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
  1.1650 +    return true;
  1.1651 +}
  1.1652 +
  1.1653 +bool
  1.1654 +CodeGeneratorARM::visitNotD(LNotD *ins)
  1.1655 +{
  1.1656 +    // Since this operation is not, we want to set a bit if
  1.1657 +    // the double is falsey, which means 0.0, -0.0 or NaN.
  1.1658 +    // when comparing with 0, an input of 0 will set the Z bit (30)
  1.1659 +    // and NaN will set the V bit (28) of the APSR.
  1.1660 +    FloatRegister opd = ToFloatRegister(ins->input());
  1.1661 +    Register dest = ToRegister(ins->output());
  1.1662 +
  1.1663 +    // Do the compare
  1.1664 +    masm.ma_vcmpz(opd);
  1.1665 +    // TODO There are three variations here to compare performance-wise.
  1.1666 +    bool nocond = true;
  1.1667 +    if (nocond) {
  1.1668 +        // Load the value into the dest register
  1.1669 +        masm.as_vmrs(dest);
  1.1670 +        masm.ma_lsr(Imm32(28), dest, dest);
  1.1671 +        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
  1.1672 +        masm.ma_and(Imm32(1), dest);
  1.1673 +    } else {
  1.1674 +        masm.as_vmrs(pc);
  1.1675 +        masm.ma_mov(Imm32(0), dest);
  1.1676 +        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
  1.1677 +        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
  1.1678 +    }
  1.1679 +    return true;
  1.1680 +}
  1.1681 +
  1.1682 +bool
  1.1683 +CodeGeneratorARM::visitNotF(LNotF *ins)
  1.1684 +{
  1.1685 +    // Since this operation is not, we want to set a bit if
  1.1686 +    // the double is falsey, which means 0.0, -0.0 or NaN.
  1.1687 +    // when comparing with 0, an input of 0 will set the Z bit (30)
  1.1688 +    // and NaN will set the V bit (28) of the APSR.
  1.1689 +    FloatRegister opd = ToFloatRegister(ins->input());
  1.1690 +    Register dest = ToRegister(ins->output());
  1.1691 +
  1.1692 +    // Do the compare
  1.1693 +    masm.ma_vcmpz_f32(opd);
  1.1694 +    // TODO There are three variations here to compare performance-wise.
  1.1695 +    bool nocond = true;
  1.1696 +    if (nocond) {
  1.1697 +        // Load the value into the dest register
  1.1698 +        masm.as_vmrs(dest);
  1.1699 +        masm.ma_lsr(Imm32(28), dest, dest);
  1.1700 +        masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30
  1.1701 +        masm.ma_and(Imm32(1), dest);
  1.1702 +    } else {
  1.1703 +        masm.as_vmrs(pc);
  1.1704 +        masm.ma_mov(Imm32(0), dest);
  1.1705 +        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal);
  1.1706 +        masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow);
  1.1707 +    }
  1.1708 +    return true;
  1.1709 +}
  1.1710 +
  1.1711 +bool
  1.1712 +CodeGeneratorARM::visitLoadSlotV(LLoadSlotV *load)
  1.1713 +{
  1.1714 +    const ValueOperand out = ToOutValue(load);
  1.1715 +    Register base = ToRegister(load->input());
  1.1716 +    int32_t offset = load->mir()->slot() * sizeof(js::Value);
  1.1717 +
  1.1718 +    masm.loadValue(Address(base, offset), out);
  1.1719 +    return true;
  1.1720 +}
  1.1721 +
  1.1722 +bool
  1.1723 +CodeGeneratorARM::visitLoadSlotT(LLoadSlotT *load)
  1.1724 +{
  1.1725 +    Register base = ToRegister(load->input());
  1.1726 +    int32_t offset = load->mir()->slot() * sizeof(js::Value);
  1.1727 +
  1.1728 +    if (load->mir()->type() == MIRType_Double)
  1.1729 +        masm.loadInt32OrDouble(Operand(base, offset), ToFloatRegister(load->output()));
  1.1730 +    else
  1.1731 +        masm.ma_ldr(Operand(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output()));
  1.1732 +    return true;
  1.1733 +}
  1.1734 +
  1.1735 +bool
  1.1736 +CodeGeneratorARM::visitStoreSlotT(LStoreSlotT *store)
  1.1737 +{
  1.1738 +
  1.1739 +    Register base = ToRegister(store->slots());
  1.1740 +    int32_t offset = store->mir()->slot() * sizeof(js::Value);
  1.1741 +
  1.1742 +    const LAllocation *value = store->value();
  1.1743 +    MIRType valueType = store->mir()->value()->type();
  1.1744 +
  1.1745 +    if (store->mir()->needsBarrier())
  1.1746 +        emitPreBarrier(Address(base, offset), store->mir()->slotType());
  1.1747 +
  1.1748 +    if (valueType == MIRType_Double) {
  1.1749 +        masm.ma_vstr(ToFloatRegister(value), Operand(base, offset));
  1.1750 +        return true;
  1.1751 +    }
  1.1752 +
  1.1753 +    // Store the type tag if needed.
  1.1754 +    if (valueType != store->mir()->slotType())
  1.1755 +        masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(base, offset));
  1.1756 +
  1.1757 +    // Store the payload.
  1.1758 +    if (value->isConstant())
  1.1759 +        masm.storePayload(*value->toConstant(), Operand(base, offset));
  1.1760 +    else
  1.1761 +        masm.storePayload(ToRegister(value), Operand(base, offset));
  1.1762 +
  1.1763 +    return true;
  1.1764 +}
  1.1765 +
  1.1766 +bool
  1.1767 +CodeGeneratorARM::visitLoadElementT(LLoadElementT *load)
  1.1768 +{
  1.1769 +    Register base = ToRegister(load->elements());
  1.1770 +    if (load->mir()->type() == MIRType_Double) {
  1.1771 +        FloatRegister fpreg = ToFloatRegister(load->output());
  1.1772 +        if (load->index()->isConstant()) {
  1.1773 +            Address source(base, ToInt32(load->index()) * sizeof(Value));
  1.1774 +            if (load->mir()->loadDoubles())
  1.1775 +                masm.loadDouble(source, fpreg);
  1.1776 +            else
  1.1777 +                masm.loadInt32OrDouble(source, fpreg);
  1.1778 +        } else {
  1.1779 +            Register index = ToRegister(load->index());
  1.1780 +            if (load->mir()->loadDoubles())
  1.1781 +                masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg);
  1.1782 +            else
  1.1783 +                masm.loadInt32OrDouble(base, index, fpreg);
  1.1784 +        }
  1.1785 +    } else {
  1.1786 +        if (load->index()->isConstant()) {
  1.1787 +            Address source(base, ToInt32(load->index()) * sizeof(Value));
  1.1788 +            masm.load32(source, ToRegister(load->output()));
  1.1789 +        } else {
  1.1790 +            masm.ma_ldr(DTRAddr(base, DtrRegImmShift(ToRegister(load->index()), LSL, 3)),
  1.1791 +                        ToRegister(load->output()));
  1.1792 +        }
  1.1793 +    }
  1.1794 +    JS_ASSERT(!load->mir()->needsHoleCheck());
  1.1795 +    return true;
  1.1796 +}
  1.1797 +
  1.1798 +void
  1.1799 +CodeGeneratorARM::storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType,
  1.1800 +                                    const Register &elements, const LAllocation *index)
  1.1801 +{
  1.1802 +    if (index->isConstant()) {
  1.1803 +        Address dest = Address(elements, ToInt32(index) * sizeof(Value));
  1.1804 +        if (valueType == MIRType_Double) {
  1.1805 +            masm.ma_vstr(ToFloatRegister(value), Operand(dest));
  1.1806 +            return;
  1.1807 +        }
  1.1808 +
  1.1809 +        // Store the type tag if needed.
  1.1810 +        if (valueType != elementType)
  1.1811 +            masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
  1.1812 +
  1.1813 +        // Store the payload.
  1.1814 +        if (value->isConstant())
  1.1815 +            masm.storePayload(*value->toConstant(), dest);
  1.1816 +        else
  1.1817 +            masm.storePayload(ToRegister(value), dest);
  1.1818 +    } else {
  1.1819 +        Register indexReg = ToRegister(index);
  1.1820 +        if (valueType == MIRType_Double) {
  1.1821 +            masm.ma_vstr(ToFloatRegister(value), elements, indexReg);
  1.1822 +            return;
  1.1823 +        }
  1.1824 +
  1.1825 +        // Store the type tag if needed.
  1.1826 +        if (valueType != elementType)
  1.1827 +            masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg);
  1.1828 +
  1.1829 +        // Store the payload.
  1.1830 +        if (value->isConstant())
  1.1831 +            masm.storePayload(*value->toConstant(), elements, indexReg);
  1.1832 +        else
  1.1833 +            masm.storePayload(ToRegister(value), elements, indexReg);
  1.1834 +    }
  1.1835 +}
  1.1836 +
  1.1837 +bool
  1.1838 +CodeGeneratorARM::visitGuardShape(LGuardShape *guard)
  1.1839 +{
  1.1840 +    Register obj = ToRegister(guard->input());
  1.1841 +    Register tmp = ToRegister(guard->tempInt());
  1.1842 +
  1.1843 +    masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfShape())), tmp);
  1.1844 +    masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()));
  1.1845 +
  1.1846 +    return bailoutIf(Assembler::NotEqual, guard->snapshot());
  1.1847 +}
  1.1848 +
  1.1849 +bool
  1.1850 +CodeGeneratorARM::visitGuardObjectType(LGuardObjectType *guard)
  1.1851 +{
  1.1852 +    Register obj = ToRegister(guard->input());
  1.1853 +    Register tmp = ToRegister(guard->tempInt());
  1.1854 +
  1.1855 +    masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfType())), tmp);
  1.1856 +    masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->typeObject()));
  1.1857 +
  1.1858 +    Assembler::Condition cond =
  1.1859 +        guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
  1.1860 +    return bailoutIf(cond, guard->snapshot());
  1.1861 +}
  1.1862 +
  1.1863 +bool
  1.1864 +CodeGeneratorARM::visitGuardClass(LGuardClass *guard)
  1.1865 +{
  1.1866 +    Register obj = ToRegister(guard->input());
  1.1867 +    Register tmp = ToRegister(guard->tempInt());
  1.1868 +
  1.1869 +    masm.loadObjClass(obj, tmp);
  1.1870 +    masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass()));
  1.1871 +    if (!bailoutIf(Assembler::NotEqual, guard->snapshot()))
  1.1872 +        return false;
  1.1873 +    return true;
  1.1874 +}
  1.1875 +
  1.1876 +bool
  1.1877 +CodeGeneratorARM::visitImplicitThis(LImplicitThis *lir)
  1.1878 +{
  1.1879 +    Register callee = ToRegister(lir->callee());
  1.1880 +    const ValueOperand out = ToOutValue(lir);
  1.1881 +
  1.1882 +    // The implicit |this| is always |undefined| if the function's environment
  1.1883 +    // is the current global.
  1.1884 +    masm.ma_ldr(DTRAddr(callee, DtrOffImm(JSFunction::offsetOfEnvironment())), out.typeReg());
  1.1885 +    masm.ma_cmp(out.typeReg(), ImmGCPtr(&gen->info().script()->global()));
  1.1886 +
  1.1887 +    // TODO: OOL stub path.
  1.1888 +    if (!bailoutIf(Assembler::NotEqual, lir->snapshot()))
  1.1889 +        return false;
  1.1890 +
  1.1891 +    masm.moveValue(UndefinedValue(), out);
  1.1892 +    return true;
  1.1893 +}
  1.1894 +
  1.1895 +bool
  1.1896 +CodeGeneratorARM::visitInterruptCheck(LInterruptCheck *lir)
  1.1897 +{
  1.1898 +    OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing());
  1.1899 +    if (!ool)
  1.1900 +        return false;
  1.1901 +
  1.1902 +    void *interrupt = (void*)GetIonContext()->runtime->addressOfInterrupt();
  1.1903 +    masm.load32(AbsoluteAddress(interrupt), lr);
  1.1904 +    masm.ma_cmp(lr, Imm32(0));
  1.1905 +    masm.ma_b(ool->entry(), Assembler::NonZero);
  1.1906 +    masm.bind(ool->rejoin());
  1.1907 +    return true;
  1.1908 +}
  1.1909 +
  1.1910 +bool
  1.1911 +CodeGeneratorARM::generateInvalidateEpilogue()
  1.1912 +{
  1.1913 +    // Ensure that there is enough space in the buffer for the OsiPoint
  1.1914 +    // patching to occur. Otherwise, we could overwrite the invalidation
  1.1915 +    // epilogue.
  1.1916 +    for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize())
  1.1917 +        masm.nop();
  1.1918 +
  1.1919 +    masm.bind(&invalidate_);
  1.1920 +
  1.1921 +    // Push the return address of the point that we bailed out at onto the stack
  1.1922 +    masm.Push(lr);
  1.1923 +
  1.1924 +    // Push the Ion script onto the stack (when we determine what that pointer is).
  1.1925 +    invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
  1.1926 +    JitCode *thunk = gen->jitRuntime()->getInvalidationThunk();
  1.1927 +
  1.1928 +    masm.branch(thunk);
  1.1929 +
  1.1930 +    // We should never reach this point in JIT code -- the invalidation thunk should
  1.1931 +    // pop the invalidated JS frame and return directly to its caller.
  1.1932 +    masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
  1.1933 +    return true;
  1.1934 +}
  1.1935 +
  1.1936 +void
  1.1937 +DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
  1.1938 +{
  1.1939 +    // Can always use the scratch register on ARM.
  1.1940 +    addState->dispatchScratch = ScratchRegister;
  1.1941 +}
  1.1942 +
  1.1943 +template <class U>
  1.1944 +Register
  1.1945 +getBase(U *mir)
  1.1946 +{
  1.1947 +    switch (mir->base()) {
  1.1948 +      case U::Heap: return HeapReg;
  1.1949 +      case U::Global: return GlobalReg;
  1.1950 +    }
  1.1951 +    return InvalidReg;
  1.1952 +}
  1.1953 +
  1.1954 +bool
  1.1955 +CodeGeneratorARM::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
  1.1956 +{
  1.1957 +    MOZ_ASSUME_UNREACHABLE("NYI");
  1.1958 +}
  1.1959 +
  1.1960 +bool
  1.1961 +CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
  1.1962 +{
  1.1963 +    MOZ_ASSUME_UNREACHABLE("NYI");
  1.1964 +}
  1.1965 +
  1.1966 +bool
  1.1967 +CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
  1.1968 +{
  1.1969 +    const MAsmJSLoadHeap *mir = ins->mir();
  1.1970 +    bool isSigned;
  1.1971 +    int size;
  1.1972 +    bool isFloat = false;
  1.1973 +    switch (mir->viewType()) {
  1.1974 +      case ArrayBufferView::TYPE_INT8:    isSigned = true;  size =  8; break;
  1.1975 +      case ArrayBufferView::TYPE_UINT8:   isSigned = false; size =  8; break;
  1.1976 +      case ArrayBufferView::TYPE_INT16:   isSigned = true;  size = 16; break;
  1.1977 +      case ArrayBufferView::TYPE_UINT16:  isSigned = false; size = 16; break;
  1.1978 +      case ArrayBufferView::TYPE_INT32:
  1.1979 +      case ArrayBufferView::TYPE_UINT32:  isSigned = true;  size = 32; break;
  1.1980 +      case ArrayBufferView::TYPE_FLOAT64: isFloat = true;   size = 64; break;
  1.1981 +      case ArrayBufferView::TYPE_FLOAT32: isFloat = true;   size = 32; break;
  1.1982 +      default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
  1.1983 +    }
  1.1984 +
  1.1985 +    const LAllocation *ptr = ins->ptr();
  1.1986 +
  1.1987 +    if (ptr->isConstant()) {
  1.1988 +        JS_ASSERT(mir->skipBoundsCheck());
  1.1989 +        int32_t ptrImm = ptr->toConstant()->toInt32();
  1.1990 +        JS_ASSERT(ptrImm >= 0);
  1.1991 +        if (isFloat) {
  1.1992 +            VFPRegister vd(ToFloatRegister(ins->output()));
  1.1993 +            if (size == 32)
  1.1994 +                masm.ma_vldr(Operand(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
  1.1995 +            else
  1.1996 +                masm.ma_vldr(Operand(HeapReg, ptrImm), vd, Assembler::Always);
  1.1997 +        }  else {
  1.1998 +            masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
  1.1999 +                                  ToRegister(ins->output()), Offset, Assembler::Always);
  1.2000 +        }
  1.2001 +        return true;
  1.2002 +    }
  1.2003 +
  1.2004 +    Register ptrReg = ToRegister(ptr);
  1.2005 +
  1.2006 +    if (mir->skipBoundsCheck()) {
  1.2007 +        if (isFloat) {
  1.2008 +            VFPRegister vd(ToFloatRegister(ins->output()));
  1.2009 +            if (size == 32)
  1.2010 +                masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always);
  1.2011 +            else
  1.2012 +                masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Always);
  1.2013 +        } else {
  1.2014 +            masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg,
  1.2015 +                                  ToRegister(ins->output()), Offset, Assembler::Always);
  1.2016 +        }
  1.2017 +        return true;
  1.2018 +    }
  1.2019 +
  1.2020 +    BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
  1.2021 +    if (isFloat) {
  1.2022 +        FloatRegister dst = ToFloatRegister(ins->output());
  1.2023 +        VFPRegister vd(dst);
  1.2024 +        if (size == 32) {
  1.2025 +            masm.convertDoubleToFloat32(NANReg, dst, Assembler::AboveOrEqual);
  1.2026 +            masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
  1.2027 +        } else {
  1.2028 +            masm.ma_vmov(NANReg, dst, Assembler::AboveOrEqual);
  1.2029 +            masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below);
  1.2030 +        }
  1.2031 +    } else {
  1.2032 +        Register d = ToRegister(ins->output());
  1.2033 +        masm.ma_mov(Imm32(0), d, NoSetCond, Assembler::AboveOrEqual);
  1.2034 +        masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
  1.2035 +    }
  1.2036 +    return masm.append(AsmJSHeapAccess(bo.getOffset()));
  1.2037 +}
  1.2038 +
  1.2039 +bool
  1.2040 +CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
  1.2041 +{
  1.2042 +    const MAsmJSStoreHeap *mir = ins->mir();
  1.2043 +    bool isSigned;
  1.2044 +    int size;
  1.2045 +    bool isFloat = false;
  1.2046 +    switch (mir->viewType()) {
  1.2047 +      case ArrayBufferView::TYPE_INT8:
  1.2048 +      case ArrayBufferView::TYPE_UINT8:   isSigned = false; size = 8; break;
  1.2049 +      case ArrayBufferView::TYPE_INT16:
  1.2050 +      case ArrayBufferView::TYPE_UINT16:  isSigned = false; size = 16; break;
  1.2051 +      case ArrayBufferView::TYPE_INT32:
  1.2052 +      case ArrayBufferView::TYPE_UINT32:  isSigned = true;  size = 32; break;
  1.2053 +      case ArrayBufferView::TYPE_FLOAT64: isFloat  = true;  size = 64; break;
  1.2054 +      case ArrayBufferView::TYPE_FLOAT32: isFloat = true;   size = 32; break;
  1.2055 +      default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
  1.2056 +    }
  1.2057 +    const LAllocation *ptr = ins->ptr();
  1.2058 +    if (ptr->isConstant()) {
  1.2059 +        JS_ASSERT(mir->skipBoundsCheck());
  1.2060 +        int32_t ptrImm = ptr->toConstant()->toInt32();
  1.2061 +        JS_ASSERT(ptrImm >= 0);
  1.2062 +        if (isFloat) {
  1.2063 +            VFPRegister vd(ToFloatRegister(ins->value()));
  1.2064 +            if (size == 32)
  1.2065 +                masm.ma_vstr(vd.singleOverlay(), Operand(HeapReg, ptrImm), Assembler::Always);
  1.2066 +            else
  1.2067 +                masm.ma_vstr(vd, Operand(HeapReg, ptrImm), Assembler::Always);
  1.2068 +        } else {
  1.2069 +            masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
  1.2070 +                                  ToRegister(ins->value()), Offset, Assembler::Always);
  1.2071 +        }
  1.2072 +        return true;
  1.2073 +    }
  1.2074 +
  1.2075 +    Register ptrReg = ToRegister(ptr);
  1.2076 +
  1.2077 +    if (mir->skipBoundsCheck()) {
  1.2078 +        Register ptrReg = ToRegister(ptr);
  1.2079 +        if (isFloat) {
  1.2080 +            VFPRegister vd(ToFloatRegister(ins->value()));
  1.2081 +            if (size == 32)
  1.2082 +                masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always);
  1.2083 +            else
  1.2084 +                masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Always);
  1.2085 +        } else {
  1.2086 +            masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
  1.2087 +                                  ToRegister(ins->value()), Offset, Assembler::Always);
  1.2088 +        }
  1.2089 +        return true;
  1.2090 +    }
  1.2091 +
  1.2092 +    BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
  1.2093 +    if (isFloat) {
  1.2094 +        VFPRegister vd(ToFloatRegister(ins->value()));
  1.2095 +        if (size == 32)
  1.2096 +            masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
  1.2097 +        else
  1.2098 +            masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Below);
  1.2099 +    } else {
  1.2100 +        masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
  1.2101 +                              ToRegister(ins->value()), Offset, Assembler::Below);
  1.2102 +    }
  1.2103 +    return masm.append(AsmJSHeapAccess(bo.getOffset()));
  1.2104 +}
  1.2105 +
  1.2106 +bool
  1.2107 +CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
  1.2108 +{
  1.2109 +    const MAsmJSPassStackArg *mir = ins->mir();
  1.2110 +    Operand dst(StackPointer, mir->spOffset());
  1.2111 +    if (ins->arg()->isConstant()) {
  1.2112 +        //masm.as_bkpt();
  1.2113 +        masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
  1.2114 +    } else {
  1.2115 +        if (ins->arg()->isGeneralReg())
  1.2116 +            masm.ma_str(ToRegister(ins->arg()), dst);
  1.2117 +        else
  1.2118 +            masm.ma_vstr(ToFloatRegister(ins->arg()), dst);
  1.2119 +    }
  1.2120 +
  1.2121 +    return true;
  1.2122 +}
  1.2123 +
  1.2124 +bool
  1.2125 +CodeGeneratorARM::visitUDiv(LUDiv *ins)
  1.2126 +{
  1.2127 +    Register lhs = ToRegister(ins->lhs());
  1.2128 +    Register rhs = ToRegister(ins->rhs());
  1.2129 +    Register output = ToRegister(ins->output());
  1.2130 +
  1.2131 +    Label done;
  1.2132 +    if (ins->mir()->canBeDivideByZero()) {
  1.2133 +        masm.ma_cmp(rhs, Imm32(0));
  1.2134 +        if (ins->mir()->isTruncated()) {
  1.2135 +            // Infinity|0 == 0
  1.2136 +            Label skip;
  1.2137 +            masm.ma_b(&skip, Assembler::NotEqual);
  1.2138 +            masm.ma_mov(Imm32(0), output);
  1.2139 +            masm.ma_b(&done);
  1.2140 +            masm.bind(&skip);
  1.2141 +        } else {
  1.2142 +            JS_ASSERT(ins->mir()->fallible());
  1.2143 +            if (!bailoutIf(Assembler::Equal, ins->snapshot()))
  1.2144 +                return false;
  1.2145 +        }
  1.2146 +    }
  1.2147 +
  1.2148 +    masm.ma_udiv(lhs, rhs, output);
  1.2149 +
  1.2150 +    if (!ins->mir()->isTruncated()) {
  1.2151 +        masm.ma_cmp(output, Imm32(0));
  1.2152 +        if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
  1.2153 +            return false;
  1.2154 +    }
  1.2155 +
  1.2156 +    masm.bind(&done);
  1.2157 +    return true;
  1.2158 +}
  1.2159 +
  1.2160 +bool
  1.2161 +CodeGeneratorARM::visitUMod(LUMod *ins)
  1.2162 +{
  1.2163 +    Register lhs = ToRegister(ins->lhs());
  1.2164 +    Register rhs = ToRegister(ins->rhs());
  1.2165 +    Register output = ToRegister(ins->output());
  1.2166 +    Label done;
  1.2167 +
  1.2168 +    if (ins->mir()->canBeDivideByZero()) {
  1.2169 +        masm.ma_cmp(rhs, Imm32(0));
  1.2170 +        if (ins->mir()->isTruncated()) {
  1.2171 +            // Infinity|0 == 0
  1.2172 +            Label skip;
  1.2173 +            masm.ma_b(&skip, Assembler::NotEqual);
  1.2174 +            masm.ma_mov(Imm32(0), output);
  1.2175 +            masm.ma_b(&done);
  1.2176 +            masm.bind(&skip);
  1.2177 +        } else {
  1.2178 +            JS_ASSERT(ins->mir()->fallible());
  1.2179 +            if (!bailoutIf(Assembler::Equal, ins->snapshot()))
  1.2180 +                return false;
  1.2181 +        }
  1.2182 +    }
  1.2183 +
  1.2184 +    masm.ma_umod(lhs, rhs, output);
  1.2185 +
  1.2186 +    if (!ins->mir()->isTruncated()) {
  1.2187 +        masm.ma_cmp(output, Imm32(0));
  1.2188 +        if (!bailoutIf(Assembler::LessThan, ins->snapshot()))
  1.2189 +            return false;
  1.2190 +    }
  1.2191 +
  1.2192 +    masm.bind(&done);
  1.2193 +    return true;
  1.2194 +}
  1.2195 +
  1.2196 +bool
  1.2197 +CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod *ins)
  1.2198 +{
  1.2199 +    Register lhs = ToRegister(ins->lhs());
  1.2200 +    Register rhs = ToRegister(ins->rhs());
  1.2201 +    Register output = ToRegister(ins->output());
  1.2202 +
  1.2203 +    JS_ASSERT(lhs == r0);
  1.2204 +    JS_ASSERT(rhs == r1);
  1.2205 +    JS_ASSERT(ins->mirRaw()->isDiv() || ins->mirRaw()->isMod());
  1.2206 +    JS_ASSERT_IF(ins->mirRaw()->isDiv(), output == r0);
  1.2207 +    JS_ASSERT_IF(ins->mirRaw()->isMod(), output == r1);
  1.2208 +
  1.2209 +    Label afterDiv;
  1.2210 +
  1.2211 +    masm.ma_cmp(rhs, Imm32(0));
  1.2212 +    Label notzero;
  1.2213 +    masm.ma_b(&notzero, Assembler::NonZero);
  1.2214 +    masm.ma_mov(Imm32(0), output);
  1.2215 +    masm.ma_b(&afterDiv);
  1.2216 +    masm.bind(&notzero);
  1.2217 +
  1.2218 +    masm.setupAlignedABICall(2);
  1.2219 +    masm.passABIArg(lhs);
  1.2220 +    masm.passABIArg(rhs);
  1.2221 +    if (gen->compilingAsmJS())
  1.2222 +        masm.callWithABI(AsmJSImm_aeabi_uidivmod);
  1.2223 +    else
  1.2224 +        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod));
  1.2225 +
  1.2226 +    masm.bind(&afterDiv);
  1.2227 +    return true;
  1.2228 +}
  1.2229 +
  1.2230 +bool
  1.2231 +CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress *ins)
  1.2232 +{
  1.2233 +    const MEffectiveAddress *mir = ins->mir();
  1.2234 +    Register base = ToRegister(ins->base());
  1.2235 +    Register index = ToRegister(ins->index());
  1.2236 +    Register output = ToRegister(ins->output());
  1.2237 +    masm.as_add(output, base, lsl(index, mir->scale()));
  1.2238 +    masm.ma_add(Imm32(mir->displacement()), output);
  1.2239 +    return true;
  1.2240 +}
  1.2241 +
  1.2242 +bool
  1.2243 +CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
  1.2244 +{
  1.2245 +    const MAsmJSLoadGlobalVar *mir = ins->mir();
  1.2246 +    unsigned addr = mir->globalDataOffset();
  1.2247 +    if (mir->type() == MIRType_Int32) {
  1.2248 +        masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
  1.2249 +    } else if (mir->type() == MIRType_Float32) {
  1.2250 +        VFPRegister vd(ToFloatRegister(ins->output()));
  1.2251 +        masm.ma_vldr(Operand(GlobalReg, addr), vd.singleOverlay());
  1.2252 +    } else {
  1.2253 +        masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output()));
  1.2254 +    }
  1.2255 +    return true;
  1.2256 +}
  1.2257 +
  1.2258 +bool
  1.2259 +CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
  1.2260 +{
  1.2261 +    const MAsmJSStoreGlobalVar *mir = ins->mir();
  1.2262 +
  1.2263 +    MIRType type = mir->value()->type();
  1.2264 +    JS_ASSERT(IsNumberType(type));
  1.2265 +    unsigned addr = mir->globalDataOffset();
  1.2266 +    if (mir->value()->type() == MIRType_Int32) {
  1.2267 +        masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
  1.2268 +    } else if (mir->value()->type() == MIRType_Float32) {
  1.2269 +        VFPRegister vd(ToFloatRegister(ins->value()));
  1.2270 +        masm.ma_vstr(vd.singleOverlay(), Operand(GlobalReg, addr));
  1.2271 +    } else {
  1.2272 +        masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr));
  1.2273 +    }
  1.2274 +    return true;
  1.2275 +}
  1.2276 +
  1.2277 +bool
  1.2278 +CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins)
  1.2279 +{
  1.2280 +    const MAsmJSLoadFuncPtr *mir = ins->mir();
  1.2281 +
  1.2282 +    Register index = ToRegister(ins->index());
  1.2283 +    Register tmp = ToRegister(ins->temp());
  1.2284 +    Register out = ToRegister(ins->output());
  1.2285 +    unsigned addr = mir->globalDataOffset();
  1.2286 +    masm.ma_mov(Imm32(addr), tmp);
  1.2287 +    masm.as_add(tmp, tmp, lsl(index, 2));
  1.2288 +    masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out);
  1.2289 +
  1.2290 +    return true;
  1.2291 +}
  1.2292 +
  1.2293 +bool
  1.2294 +CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
  1.2295 +{
  1.2296 +    const MAsmJSLoadFFIFunc *mir = ins->mir();
  1.2297 +
  1.2298 +    masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output()));
  1.2299 +
  1.2300 +    return true;
  1.2301 +}
  1.2302 +
  1.2303 +bool
  1.2304 +CodeGeneratorARM::visitNegI(LNegI *ins)
  1.2305 +{
  1.2306 +    Register input = ToRegister(ins->input());
  1.2307 +    masm.ma_neg(input, ToRegister(ins->output()));
  1.2308 +    return true;
  1.2309 +}
  1.2310 +
  1.2311 +bool
  1.2312 +CodeGeneratorARM::visitNegD(LNegD *ins)
  1.2313 +{
  1.2314 +    FloatRegister input = ToFloatRegister(ins->input());
  1.2315 +    masm.ma_vneg(input, ToFloatRegister(ins->output()));
  1.2316 +    return true;
  1.2317 +}
  1.2318 +
  1.2319 +bool
  1.2320 +CodeGeneratorARM::visitNegF(LNegF *ins)
  1.2321 +{
  1.2322 +    FloatRegister input = ToFloatRegister(ins->input());
  1.2323 +    masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
  1.2324 +    return true;
  1.2325 +}
  1.2326 +
  1.2327 +bool
  1.2328 +CodeGeneratorARM::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
  1.2329 +{
  1.2330 +    MOZ_ASSUME_UNREACHABLE("NYI");
  1.2331 +}
  1.2332 +
  1.2333 +JitCode *
  1.2334 +JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
  1.2335 +{
  1.2336 +    MOZ_ASSUME_UNREACHABLE("NYI");
  1.2337 +}

mercurial