michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/arm/CodeGenerator-arm.h" michael@0: michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "jscntxt.h" michael@0: #include "jscompartment.h" michael@0: #include "jsnum.h" michael@0: michael@0: #include "jit/CodeGenerator.h" michael@0: #include "jit/IonFrames.h" michael@0: #include "jit/JitCompartment.h" michael@0: #include "jit/MIR.h" michael@0: #include "jit/MIRGraph.h" michael@0: #include "vm/Shape.h" michael@0: #include "vm/TraceLogging.h" michael@0: michael@0: #include "jsscriptinlines.h" michael@0: michael@0: #include "jit/shared/CodeGenerator-shared-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::FloorLog2; michael@0: using mozilla::NegativeInfinity; michael@0: using JS::GenericNaN; michael@0: michael@0: // shared michael@0: CodeGeneratorARM::CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm) michael@0: : CodeGeneratorShared(gen, graph, masm) michael@0: { michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::generatePrologue() michael@0: { michael@0: JS_ASSERT(!gen->compilingAsmJS()); michael@0: michael@0: // Note that this automatically sets MacroAssembler::framePushed(). michael@0: masm.reserveStack(frameSize()); michael@0: masm.checkStackAlignment(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::generateAsmJSPrologue(Label *stackOverflowLabel) michael@0: { michael@0: JS_ASSERT(gen->compilingAsmJS()); michael@0: michael@0: masm.Push(lr); michael@0: michael@0: // The asm.js over-recursed handler wants to be able to assume that SP michael@0: // points to the return address, so perform the check after pushing lr but michael@0: // before pushing frameDepth. michael@0: if (!omitOverRecursedCheck()) { michael@0: masm.branchPtr(Assembler::AboveOrEqual, michael@0: AsmJSAbsoluteAddress(AsmJSImm_StackLimit), michael@0: StackPointer, michael@0: stackOverflowLabel); michael@0: } michael@0: michael@0: // Note that this automatically sets MacroAssembler::framePushed(). michael@0: masm.reserveStack(frameDepth_); michael@0: masm.checkStackAlignment(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::generateEpilogue() michael@0: { michael@0: masm.bind(&returnLabel_); michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) { michael@0: if (!emitTracelogStopEvent(TraceLogger::IonMonkey)) michael@0: return false; michael@0: if (!emitTracelogScriptStop()) michael@0: return false; michael@0: } michael@0: #endif michael@0: michael@0: if (gen->compilingAsmJS()) { michael@0: // Pop the stack we allocated at the start of the function. michael@0: masm.freeStack(frameDepth_); michael@0: masm.Pop(pc); michael@0: JS_ASSERT(masm.framePushed() == 0); michael@0: //masm.as_bkpt(); michael@0: } else { michael@0: // Pop the stack we allocated at the start of the function. michael@0: masm.freeStack(frameSize()); michael@0: JS_ASSERT(masm.framePushed() == 0); michael@0: masm.ma_pop(pc); michael@0: } michael@0: masm.dumpPool(); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse) michael@0: { michael@0: if (isNextBlock(mirFalse->lir())) { michael@0: jumpToBlock(mirTrue, cond); michael@0: } else { michael@0: jumpToBlock(mirFalse, Assembler::InvertCondition(cond)); michael@0: jumpToBlock(mirTrue); michael@0: } michael@0: } michael@0: michael@0: michael@0: bool michael@0: OutOfLineBailout::accept(CodeGeneratorARM *codegen) michael@0: { michael@0: return codegen->visitOutOfLineBailout(this); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->getOperand(0); michael@0: MBasicBlock *ifTrue = test->ifTrue(); michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: michael@0: // Test the operand michael@0: masm.ma_cmp(ToRegister(opd), Imm32(0)); michael@0: michael@0: if (isNextBlock(ifFalse->lir())) { michael@0: jumpToBlock(ifTrue, Assembler::NonZero); michael@0: } else if (isNextBlock(ifTrue->lir())) { michael@0: jumpToBlock(ifFalse, Assembler::Zero); michael@0: } else { michael@0: jumpToBlock(ifFalse, Assembler::Zero); michael@0: jumpToBlock(ifTrue); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompare(LCompare *comp) michael@0: { michael@0: Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop()); michael@0: const LAllocation *left = comp->getOperand(0); michael@0: const LAllocation *right = comp->getOperand(1); michael@0: const LDefinition *def = comp->getDef(0); michael@0: michael@0: if (right->isConstant()) michael@0: masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right))); michael@0: else michael@0: masm.ma_cmp(ToRegister(left), ToOperand(right)); michael@0: masm.ma_mov(Imm32(0), ToRegister(def)); michael@0: masm.ma_mov(Imm32(1), ToRegister(def), NoSetCond, cond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch *comp) michael@0: { michael@0: Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop()); michael@0: if (comp->right()->isConstant()) michael@0: masm.ma_cmp(ToRegister(comp->left()), Imm32(ToInt32(comp->right()))); michael@0: else michael@0: masm.ma_cmp(ToRegister(comp->left()), ToOperand(comp->right())); michael@0: emitBranch(cond, comp->ifTrue(), comp->ifFalse()); michael@0: return true; michael@0: michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::generateOutOfLineCode() michael@0: { michael@0: if (!CodeGeneratorShared::generateOutOfLineCode()) michael@0: return false; michael@0: michael@0: if (deoptLabel_.used()) { michael@0: // All non-table-based bailouts will go here. michael@0: masm.bind(&deoptLabel_); michael@0: michael@0: // Push the frame size, so the handler can recover the IonScript. michael@0: masm.ma_mov(Imm32(frameSize()), lr); michael@0: michael@0: JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler(); michael@0: masm.branch(handler); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot) michael@0: { michael@0: CompileInfo &info = snapshot->mir()->block()->info(); michael@0: switch (info.executionMode()) { michael@0: michael@0: case ParallelExecution: { michael@0: // in parallel mode, make no attempt to recover, just signal an error. michael@0: OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported, michael@0: snapshot->mir()->block(), michael@0: snapshot->mir()->pc()); michael@0: masm.ma_b(ool->entry(), condition); michael@0: return true; michael@0: } michael@0: case SequentialExecution: michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: if (!encode(snapshot)) michael@0: return false; michael@0: michael@0: // Though the assembler doesn't track all frame pushes, at least make sure michael@0: // the known value makes sense. We can't use bailout tables if the stack michael@0: // isn't properly aligned to the static frame size. michael@0: JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(), michael@0: frameClass_.frameSize() == masm.framePushed()); michael@0: michael@0: if (assignBailoutId(snapshot)) { michael@0: uint8_t *code = deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE; michael@0: masm.ma_b(code, Relocation::HARDCODED, condition); michael@0: return true; michael@0: } michael@0: michael@0: // We could not use a jump table, either because all bailout IDs were michael@0: // reserved, or a jump table is not optimal for this frame size or michael@0: // platform. Whatever, we will generate a lazy bailout. michael@0: OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed()); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: masm.ma_b(ool->entry(), condition); michael@0: michael@0: return true; michael@0: } michael@0: bool michael@0: CodeGeneratorARM::bailoutFrom(Label *label, LSnapshot *snapshot) michael@0: { michael@0: if (masm.bailed()) michael@0: return false; michael@0: JS_ASSERT(label->used()); michael@0: JS_ASSERT(!label->bound()); michael@0: michael@0: CompileInfo &info = snapshot->mir()->block()->info(); michael@0: switch (info.executionMode()) { michael@0: michael@0: case ParallelExecution: { michael@0: // in parallel mode, make no attempt to recover, just signal an error. michael@0: OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported, michael@0: snapshot->mir()->block(), michael@0: snapshot->mir()->pc()); michael@0: masm.retarget(label, ool->entry()); michael@0: return true; michael@0: } michael@0: case SequentialExecution: michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: michael@0: if (!encode(snapshot)) michael@0: return false; michael@0: michael@0: // Though the assembler doesn't track all frame pushes, at least make sure michael@0: // the known value makes sense. We can't use bailout tables if the stack michael@0: // isn't properly aligned to the static frame size. michael@0: JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(), michael@0: frameClass_.frameSize() == masm.framePushed()); michael@0: michael@0: // On ARM we don't use a bailout table. michael@0: OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed()); michael@0: if (!addOutOfLineCode(ool)) { michael@0: return false; michael@0: } michael@0: michael@0: masm.retarget(label, ool->entry()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::bailout(LSnapshot *snapshot) michael@0: { michael@0: Label label; michael@0: masm.ma_b(&label); michael@0: return bailoutFrom(&label, snapshot); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout *ool) michael@0: { michael@0: masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), ScratchRegister); michael@0: masm.ma_push(ScratchRegister); // BailoutStack::padding_ michael@0: masm.ma_push(ScratchRegister); // BailoutStack::snapshotOffset_ michael@0: masm.ma_b(&deoptLabel_); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitMinMaxD(LMinMaxD *ins) michael@0: { michael@0: FloatRegister first = ToFloatRegister(ins->first()); michael@0: FloatRegister second = ToFloatRegister(ins->second()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: michael@0: JS_ASSERT(first == output); michael@0: michael@0: Assembler::Condition cond = ins->mir()->isMax() michael@0: ? Assembler::VFP_LessThanOrEqual michael@0: : Assembler::VFP_GreaterThanOrEqual; michael@0: Label nan, equal, returnSecond, done; michael@0: michael@0: masm.compareDouble(first, second); michael@0: masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN. michael@0: masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right. michael@0: masm.ma_b(&returnSecond, cond); michael@0: masm.ma_b(&done); michael@0: michael@0: // Check for zero. michael@0: masm.bind(&equal); michael@0: masm.compareDouble(first, InvalidFloatReg); michael@0: masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it. michael@0: // So now both operands are either -0 or 0. michael@0: if (ins->mir()->isMax()) { michael@0: masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0. michael@0: } else { michael@0: masm.ma_vneg(first, first); michael@0: masm.ma_vsub(first, second, first); michael@0: masm.ma_vneg(first, first); michael@0: } michael@0: masm.ma_b(&done); michael@0: michael@0: masm.bind(&nan); michael@0: masm.loadConstantDouble(GenericNaN(), output); michael@0: masm.ma_b(&done); michael@0: michael@0: masm.bind(&returnSecond); michael@0: masm.ma_vmov(second, output); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAbsD(LAbsD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: masm.ma_vabs(input, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAbsF(LAbsF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: masm.ma_vabs_f32(input, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitSqrtD(LSqrtD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: masm.ma_vsqrt(input, output); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitSqrtF(LSqrtF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: masm.ma_vsqrt_f32(input, output); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAddI(LAddI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: michael@0: if (rhs->isConstant()) michael@0: masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond); michael@0: else michael@0: masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond); michael@0: michael@0: if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitSubI(LSubI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: michael@0: if (rhs->isConstant()) michael@0: masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond); michael@0: else michael@0: masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond); michael@0: michael@0: if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitMulI(LMulI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: MMul *mul = ins->mir(); michael@0: JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: // Bailout when this condition is met. michael@0: Assembler::Condition c = Assembler::Overflow; michael@0: // Bailout on -0.0 michael@0: int32_t constant = ToInt32(rhs); michael@0: if (mul->canBeNegativeZero() && constant <= 0) { michael@0: Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal; michael@0: masm.ma_cmp(ToRegister(lhs), Imm32(0)); michael@0: if (!bailoutIf(bailoutCond, ins->snapshot())) michael@0: return false; michael@0: } michael@0: // TODO: move these to ma_mul. michael@0: switch (constant) { michael@0: case -1: michael@0: masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond); michael@0: break; michael@0: case 0: michael@0: masm.ma_mov(Imm32(0), ToRegister(dest)); michael@0: return true; // escape overflow check; michael@0: case 1: michael@0: // nop michael@0: masm.ma_mov(ToRegister(lhs), ToRegister(dest)); michael@0: return true; // escape overflow check; michael@0: case 2: michael@0: masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond); michael@0: // Overflow is handled later. michael@0: break; michael@0: default: { michael@0: bool handled = false; michael@0: if (constant > 0) { michael@0: // Try shift and add sequences for a positive constant. michael@0: if (!mul->canOverflow()) { michael@0: // If it cannot overflow, we can do lots of optimizations michael@0: Register src = ToRegister(lhs); michael@0: uint32_t shift = FloorLog2(constant); michael@0: uint32_t rest = constant - (1 << shift); michael@0: // See if the constant has one bit set, meaning it can be encoded as a bitshift michael@0: if ((1 << shift) == constant) { michael@0: masm.ma_lsl(Imm32(shift), src, ToRegister(dest)); michael@0: handled = true; michael@0: } else { michael@0: // If the constant cannot be encoded as (1<> shift), if this does not hold, michael@0: // some bits were lost due to overflow, and the computation should michael@0: // be resumed as a double. michael@0: masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift)); michael@0: c = Assembler::NotEqual; michael@0: handled = true; michael@0: } michael@0: } michael@0: } michael@0: michael@0: if (!handled) { michael@0: if (mul->canOverflow()) michael@0: c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c); michael@0: else michael@0: masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest)); michael@0: } michael@0: } michael@0: } michael@0: // Bailout on overflow michael@0: if (mul->canOverflow() && !bailoutIf(c, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: Assembler::Condition c = Assembler::Overflow; michael@0: michael@0: //masm.imull(ToOperand(rhs), ToRegister(lhs)); michael@0: if (mul->canOverflow()) michael@0: c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c); michael@0: else michael@0: masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest)); michael@0: michael@0: // Bailout on overflow michael@0: if (mul->canOverflow() && !bailoutIf(c, ins->snapshot())) michael@0: return false; michael@0: michael@0: if (mul->canBeNegativeZero()) { michael@0: Label done; michael@0: masm.ma_cmp(ToRegister(dest), Imm32(0)); michael@0: masm.ma_b(&done, Assembler::NotEqual); michael@0: michael@0: // Result is -0 if lhs or rhs is negative. michael@0: masm.ma_cmn(ToRegister(lhs), ToRegister(rhs)); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&done); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register output, michael@0: LSnapshot *snapshot, Label &done) michael@0: { michael@0: if (mir->canBeNegativeOverflow()) { michael@0: // Handle INT32_MIN / -1; michael@0: // The integer division will give INT32_MIN, but we want -(double)INT32_MIN. michael@0: masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN michael@0: masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1 michael@0: if (mir->canTruncateOverflow()) { michael@0: // (-INT32_MIN)|0 = INT32_MIN michael@0: Label skip; michael@0: masm.ma_b(&skip, Assembler::NotEqual); michael@0: masm.ma_mov(Imm32(INT32_MIN), output); michael@0: masm.ma_b(&done); michael@0: masm.bind(&skip); michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, snapshot)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Handle divide by zero. michael@0: if (mir->canBeDivideByZero()) { michael@0: masm.ma_cmp(rhs, Imm32(0)); michael@0: if (mir->canTruncateInfinities()) { michael@0: // Infinity|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(&skip, Assembler::NotEqual); michael@0: masm.ma_mov(Imm32(0), output); michael@0: masm.ma_b(&done); michael@0: masm.bind(&skip); michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, snapshot)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Handle negative 0. michael@0: if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) { michael@0: Label nonzero; michael@0: masm.ma_cmp(lhs, Imm32(0)); michael@0: masm.ma_b(&nonzero, Assembler::NotEqual); michael@0: masm.ma_cmp(rhs, Imm32(0)); michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::LessThan, snapshot)) michael@0: return false; michael@0: masm.bind(&nonzero); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitDivI(LDivI *ins) michael@0: { michael@0: // Extract the registers from this instruction michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register temp = ToRegister(ins->getTemp(0)); michael@0: Register output = ToRegister(ins->output()); michael@0: MDiv *mir = ins->mir(); michael@0: michael@0: Label done; michael@0: if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done)) michael@0: return false; michael@0: michael@0: if (mir->canTruncateRemainder()) { michael@0: masm.ma_sdiv(lhs, rhs, output); michael@0: } else { michael@0: masm.ma_sdiv(lhs, rhs, ScratchRegister); michael@0: masm.ma_mul(ScratchRegister, rhs, temp); michael@0: masm.ma_cmp(lhs, temp); michael@0: if (!bailoutIf(Assembler::NotEqual, ins->snapshot())) michael@0: return false; michael@0: masm.ma_mov(ScratchRegister, output); michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: extern "C" { michael@0: extern int64_t __aeabi_idivmod(int,int); michael@0: extern int64_t __aeabi_uidivmod(int,int); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins) michael@0: { michael@0: // Extract the registers from this instruction michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: MDiv *mir = ins->mir(); michael@0: michael@0: Label done; michael@0: if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done)) michael@0: return false; michael@0: michael@0: masm.setupAlignedABICall(2); michael@0: masm.passABIArg(lhs); michael@0: masm.passABIArg(rhs); michael@0: if (gen->compilingAsmJS()) michael@0: masm.callWithABI(AsmJSImm_aeabi_idivmod); michael@0: else michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod)); michael@0: // idivmod returns the quotient in r0, and the remainder in r1. michael@0: if (!mir->canTruncateRemainder()) { michael@0: JS_ASSERT(mir->fallible()); michael@0: masm.ma_cmp(r1, Imm32(0)); michael@0: if (!bailoutIf(Assembler::NonZero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->numerator()); michael@0: Register output = ToRegister(ins->output()); michael@0: int32_t shift = ins->shift(); michael@0: michael@0: if (shift != 0) { michael@0: MDiv *mir = ins->mir(); michael@0: if (!mir->isTruncated()) { michael@0: // If the remainder is != 0, bailout since this must be a double. michael@0: masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCond); michael@0: if (!bailoutIf(Assembler::NonZero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: if (!mir->canBeNegativeDividend()) { michael@0: // Numerator is unsigned, so needs no adjusting. Do the shift. michael@0: masm.as_mov(output, asr(lhs, shift)); michael@0: return true; michael@0: } michael@0: michael@0: // Adjust the value so that shifting produces a correctly rounded result michael@0: // when the numerator is negative. See 10-1 "Signed Division by a Known michael@0: // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight. michael@0: if (shift > 1) { michael@0: masm.as_mov(ScratchRegister, asr(lhs, 31)); michael@0: masm.as_add(ScratchRegister, lhs, lsr(ScratchRegister, 32 - shift)); michael@0: } else michael@0: masm.as_add(ScratchRegister, lhs, lsr(lhs, 32 - shift)); michael@0: michael@0: // Do the shift. michael@0: masm.as_mov(output, asr(ScratchRegister, shift)); michael@0: } else { michael@0: masm.ma_mov(lhs, output); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output, michael@0: LSnapshot *snapshot, Label &done) michael@0: { michael@0: // 0/X (with X < 0) is bad because both of these values *should* be doubles, and michael@0: // the result should be -0.0, which cannot be represented in integers. michael@0: // X/0 is bad because it will give garbage (or abort), when it should give michael@0: // either \infty, -\infty or NAN. michael@0: michael@0: // Prevent 0 / X (with X < 0) and X / 0 michael@0: // testing X / Y. Compare Y with 0. michael@0: // There are three cases: (Y < 0), (Y == 0) and (Y > 0) michael@0: // If (Y < 0), then we compare X with 0, and bail if X == 0 michael@0: // If (Y == 0), then we simply want to bail. Since this does not set michael@0: // the flags necessary for LT to trigger, we don't test X, and take the michael@0: // bailout because the EQ flag is set. michael@0: // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout. michael@0: if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) { michael@0: masm.ma_cmp(rhs, Imm32(0)); michael@0: masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan); michael@0: if (mir->isTruncated()) { michael@0: // NaN|0 == 0 and (0 % -X)|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(&skip, Assembler::NotEqual); michael@0: masm.ma_mov(Imm32(0), output); michael@0: masm.ma_b(&done); michael@0: masm.bind(&skip); michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, snapshot)) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitModI(LModI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: Register callTemp = ToRegister(ins->callTemp()); michael@0: MMod *mir = ins->mir(); michael@0: michael@0: // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. michael@0: masm.ma_mov(lhs, callTemp); michael@0: michael@0: Label done; michael@0: if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done)) michael@0: return false; michael@0: michael@0: masm.ma_smod(lhs, rhs, output); michael@0: michael@0: // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0 michael@0: if (mir->canBeNegativeDividend()) { michael@0: if (mir->isTruncated()) { michael@0: // -0.0|0 == 0 michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: // See if X < 0 michael@0: masm.ma_cmp(output, Imm32(0)); michael@0: masm.ma_b(&done, Assembler::NotEqual); michael@0: masm.ma_cmp(callTemp, Imm32(0)); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitSoftModI(LSoftModI *ins) michael@0: { michael@0: // Extract the registers from this instruction michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: Register callTemp = ToRegister(ins->callTemp()); michael@0: MMod *mir = ins->mir(); michael@0: Label done; michael@0: michael@0: // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. michael@0: JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code()); michael@0: masm.ma_mov(lhs, callTemp); michael@0: michael@0: // Prevent INT_MIN % -1; michael@0: // The integer division will give INT_MIN, but we want -(double)INT_MIN. michael@0: if (mir->canBeNegativeDividend()) { michael@0: masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN michael@0: masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1 michael@0: if (mir->isTruncated()) { michael@0: // (INT_MIN % -1)|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(&skip, Assembler::NotEqual); michael@0: masm.ma_mov(Imm32(0), output); michael@0: masm.ma_b(&done); michael@0: masm.bind(&skip); michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done)) michael@0: return false; michael@0: michael@0: masm.setupAlignedABICall(2); michael@0: masm.passABIArg(lhs); michael@0: masm.passABIArg(rhs); michael@0: if (gen->compilingAsmJS()) michael@0: masm.callWithABI(AsmJSImm_aeabi_idivmod); michael@0: else michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod)); michael@0: michael@0: // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0 michael@0: if (mir->canBeNegativeDividend()) { michael@0: if (mir->isTruncated()) { michael@0: // -0.0|0 == 0 michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: // See if X < 0 michael@0: masm.ma_cmp(r1, Imm32(0)); michael@0: masm.ma_b(&done, Assembler::NotEqual); michael@0: masm.ma_cmp(callTemp, Imm32(0)); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins) michael@0: { michael@0: Register in = ToRegister(ins->getOperand(0)); michael@0: Register out = ToRegister(ins->getDef(0)); michael@0: MMod *mir = ins->mir(); michael@0: Label fin; michael@0: // bug 739870, jbramley has a different sequence that may help with speed here michael@0: masm.ma_mov(in, out, SetCond); michael@0: masm.ma_b(&fin, Assembler::Zero); michael@0: masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed); michael@0: masm.ma_and(Imm32((1<shift())-1), out); michael@0: masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed); michael@0: if (mir->canBeNegativeDividend()) { michael@0: if (!mir->isTruncated()) { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: // -0|0 == 0 michael@0: } michael@0: } michael@0: masm.bind(&fin); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitModMaskI(LModMaskI *ins) michael@0: { michael@0: Register src = ToRegister(ins->getOperand(0)); michael@0: Register dest = ToRegister(ins->getDef(0)); michael@0: Register tmp1 = ToRegister(ins->getTemp(0)); michael@0: Register tmp2 = ToRegister(ins->getTemp(1)); michael@0: MMod *mir = ins->mir(); michael@0: masm.ma_mod_mask(src, dest, tmp1, tmp2, ins->shift()); michael@0: if (mir->canBeNegativeDividend()) { michael@0: if (!mir->isTruncated()) { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: // -0|0 == 0 michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: bool michael@0: CodeGeneratorARM::visitBitNotI(LBitNotI *ins) michael@0: { michael@0: const LAllocation *input = ins->getOperand(0); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: // this will not actually be true on arm. michael@0: // We can not an imm8m in order to get a wider range michael@0: // of numbers michael@0: JS_ASSERT(!input->isConstant()); michael@0: michael@0: masm.ma_mvn(ToRegister(input), ToRegister(dest)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitBitOpI(LBitOpI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: // all of these bitops should be either imm32's, or integer registers. michael@0: switch (ins->bitop()) { michael@0: case JSOP_BITOR: michael@0: if (rhs->isConstant()) michael@0: masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest)); michael@0: else michael@0: masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest)); michael@0: break; michael@0: case JSOP_BITXOR: michael@0: if (rhs->isConstant()) michael@0: masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest)); michael@0: else michael@0: masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest)); michael@0: break; michael@0: case JSOP_BITAND: michael@0: if (rhs->isConstant()) michael@0: masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest)); michael@0: else michael@0: masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected binary opcode"); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitShiftI(LShiftI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: const LAllocation *rhs = ins->rhs(); michael@0: Register dest = ToRegister(ins->output()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: int32_t shift = ToInt32(rhs) & 0x1F; michael@0: switch (ins->bitop()) { michael@0: case JSOP_LSH: michael@0: if (shift) michael@0: masm.ma_lsl(Imm32(shift), lhs, dest); michael@0: else michael@0: masm.ma_mov(lhs, dest); michael@0: break; michael@0: case JSOP_RSH: michael@0: if (shift) michael@0: masm.ma_asr(Imm32(shift), lhs, dest); michael@0: else michael@0: masm.ma_mov(lhs, dest); michael@0: break; michael@0: case JSOP_URSH: michael@0: if (shift) { michael@0: masm.ma_lsr(Imm32(shift), lhs, dest); michael@0: } else { michael@0: // x >>> 0 can overflow. michael@0: masm.ma_mov(lhs, dest); michael@0: if (ins->mir()->toUrsh()->fallible()) { michael@0: masm.ma_cmp(dest, Imm32(0)); michael@0: if (!bailoutIf(Assembler::LessThan, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); michael@0: } michael@0: } else { michael@0: // The shift amounts should be AND'ed into the 0-31 range since arm michael@0: // shifts by the lower byte of the register (it will attempt to shift michael@0: // by 250 if you ask it to). michael@0: masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest); michael@0: michael@0: switch (ins->bitop()) { michael@0: case JSOP_LSH: michael@0: masm.ma_lsl(dest, lhs, dest); michael@0: break; michael@0: case JSOP_RSH: michael@0: masm.ma_asr(dest, lhs, dest); michael@0: break; michael@0: case JSOP_URSH: michael@0: masm.ma_lsr(dest, lhs, dest); michael@0: if (ins->mir()->toUrsh()->fallible()) { michael@0: // x >>> 0 can overflow. michael@0: masm.ma_cmp(dest, Imm32(0)); michael@0: if (!bailoutIf(Assembler::LessThan, ins->snapshot())) michael@0: return false; michael@0: } michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitUrshD(LUrshD *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register temp = ToRegister(ins->temp()); michael@0: michael@0: const LAllocation *rhs = ins->rhs(); michael@0: FloatRegister out = ToFloatRegister(ins->output()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: int32_t shift = ToInt32(rhs) & 0x1F; michael@0: if (shift) michael@0: masm.ma_lsr(Imm32(shift), lhs, temp); michael@0: else michael@0: masm.ma_mov(lhs, temp); michael@0: } else { michael@0: masm.ma_and(Imm32(0x1F), ToRegister(rhs), temp); michael@0: masm.ma_lsr(temp, lhs, temp); michael@0: } michael@0: michael@0: masm.convertUInt32ToDouble(temp, out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitPowHalfD(LPowHalfD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: michael@0: Label done; michael@0: michael@0: // Masm.pow(-Infinity, 0.5) == Infinity. michael@0: masm.ma_vimm(NegativeInfinity(), ScratchFloatReg); michael@0: masm.compareDouble(input, ScratchFloatReg); michael@0: masm.ma_vneg(ScratchFloatReg, output, Assembler::Equal); michael@0: masm.ma_b(&done, Assembler::Equal); michael@0: michael@0: // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0. michael@0: masm.ma_vimm(0.0, ScratchFloatReg); michael@0: masm.ma_vadd(ScratchFloatReg, input, output); michael@0: masm.ma_vsqrt(output, output); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: MoveOperand michael@0: CodeGeneratorARM::toMoveOperand(const LAllocation *a) const michael@0: { michael@0: if (a->isGeneralReg()) michael@0: return MoveOperand(ToRegister(a)); michael@0: if (a->isFloatReg()) michael@0: return MoveOperand(ToFloatRegister(a)); michael@0: JS_ASSERT((ToStackOffset(a) & 3) == 0); michael@0: int32_t offset = ToStackOffset(a); michael@0: michael@0: // The way the stack slots work, we assume that everything from depth == 0 downwards is writable michael@0: // however, since our frame is included in this, ensure that the frame gets skipped michael@0: if (gen->compilingAsmJS()) michael@0: offset -= AlignmentMidPrologue; michael@0: michael@0: return MoveOperand(StackPointer, offset); michael@0: } michael@0: michael@0: class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase michael@0: { michael@0: MTableSwitch *mir_; michael@0: Vector codeLabels_; michael@0: michael@0: bool accept(CodeGeneratorARM *codegen) { michael@0: return codegen->visitOutOfLineTableSwitch(this); michael@0: } michael@0: michael@0: public: michael@0: OutOfLineTableSwitch(TempAllocator &alloc, MTableSwitch *mir) michael@0: : mir_(mir), michael@0: codeLabels_(alloc) michael@0: {} michael@0: michael@0: MTableSwitch *mir() const { michael@0: return mir_; michael@0: } michael@0: michael@0: bool addCodeLabel(CodeLabel label) { michael@0: return codeLabels_.append(label); michael@0: } michael@0: CodeLabel codeLabel(unsigned i) { michael@0: return codeLabels_[i]; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool) michael@0: { michael@0: MTableSwitch *mir = ool->mir(); michael@0: michael@0: size_t numCases = mir->numCases(); michael@0: for (size_t i = 0; i < numCases; i++) { michael@0: LBlock *caseblock = mir->getCase(numCases - 1 - i)->lir(); michael@0: Label *caseheader = caseblock->label(); michael@0: uint32_t caseoffset = caseheader->offset(); michael@0: michael@0: // The entries of the jump table need to be absolute addresses and thus michael@0: // must be patched after codegen is finished. michael@0: CodeLabel cl = ool->codeLabel(i); michael@0: cl.src()->bind(caseoffset); michael@0: if (!masm.addCodeLabel(cl)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index, michael@0: const Register &base) michael@0: { michael@0: // the code generated by this is utter hax. michael@0: // the end result looks something like: michael@0: // SUBS index, input, #base michael@0: // RSBSPL index, index, #max michael@0: // LDRPL pc, pc, index lsl 2 michael@0: // B default michael@0: michael@0: // If the range of targets in N through M, we first subtract off the lowest michael@0: // case (N), which both shifts the arguments into the range 0 to (M-N) with michael@0: // and sets the MInus flag if the argument was out of range on the low end. michael@0: michael@0: // Then we a reverse subtract with the size of the jump table, which will michael@0: // reverse the order of range (It is size through 0, rather than 0 through michael@0: // size). The main purpose of this is that we set the same flag as the lower michael@0: // bound check for the upper bound check. Lastly, we do this conditionally michael@0: // on the previous check succeeding. michael@0: michael@0: // Then we conditionally load the pc offset by the (reversed) index (times michael@0: // the address size) into the pc, which branches to the correct case. michael@0: // NOTE: when we go to read the pc, the value that we get back is the pc of michael@0: // the current instruction *PLUS 8*. This means that ldr foo, [pc, +0] michael@0: // reads $pc+8. In other words, there is an empty word after the branch into michael@0: // the switch table before the table actually starts. Since the only other michael@0: // unhandled case is the default case (both out of range high and out of range low) michael@0: // I then insert a branch to default case into the extra slot, which ensures michael@0: // we don't attempt to execute the address table. michael@0: Label *defaultcase = mir->getDefault()->lir()->label(); michael@0: michael@0: int32_t cases = mir->numCases(); michael@0: // Lower value with low value michael@0: masm.ma_sub(index, Imm32(mir->low()), index, SetCond); michael@0: masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned); michael@0: AutoForbidPools afp(&masm); michael@0: masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned); michael@0: masm.ma_b(defaultcase); michael@0: michael@0: // To fill in the CodeLabels for the case entries, we need to first michael@0: // generate the case entries (we don't yet know their offsets in the michael@0: // instruction stream). michael@0: OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir); michael@0: for (int32_t i = 0; i < cases; i++) { michael@0: CodeLabel cl; michael@0: masm.writeCodePointer(cl.dest()); michael@0: if (!ool->addCodeLabel(cl)) michael@0: return false; michael@0: } michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitMathD(LMathD *math) michael@0: { michael@0: const LAllocation *src1 = math->getOperand(0); michael@0: const LAllocation *src2 = math->getOperand(1); michael@0: const LDefinition *output = math->getDef(0); michael@0: michael@0: switch (math->jsop()) { michael@0: case JSOP_ADD: michael@0: masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: case JSOP_SUB: michael@0: masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: case JSOP_MUL: michael@0: masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: case JSOP_DIV: michael@0: masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitMathF(LMathF *math) michael@0: { michael@0: const LAllocation *src1 = math->getOperand(0); michael@0: const LAllocation *src2 = math->getOperand(1); michael@0: const LDefinition *output = math->getDef(0); michael@0: michael@0: switch (math->jsop()) { michael@0: case JSOP_ADD: michael@0: masm.ma_vadd_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: case JSOP_SUB: michael@0: masm.ma_vsub_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: case JSOP_MUL: michael@0: masm.ma_vmul_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: case JSOP_DIV: michael@0: masm.ma_vdiv_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitFloor(LFloor *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: Register output = ToRegister(lir->output()); michael@0: Label bail; michael@0: masm.floor(input, output, &bail); michael@0: if (!bailoutFrom(&bail, lir->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitFloorF(LFloorF *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: Register output = ToRegister(lir->output()); michael@0: Label bail; michael@0: masm.floorf(input, output, &bail); michael@0: if (!bailoutFrom(&bail, lir->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitRound(LRound *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: Register output = ToRegister(lir->output()); michael@0: FloatRegister tmp = ToFloatRegister(lir->temp()); michael@0: Label bail; michael@0: // Output is either correct, or clamped. All -0 cases have been translated to a clamped michael@0: // case.a michael@0: masm.round(input, output, &bail, tmp); michael@0: if (!bailoutFrom(&bail, lir->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitRoundF(LRoundF *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: Register output = ToRegister(lir->output()); michael@0: FloatRegister tmp = ToFloatRegister(lir->temp()); michael@0: Label bail; michael@0: // Output is either correct, or clamped. All -0 cases have been translated to a clamped michael@0: // case.a michael@0: masm.roundf(input, output, &bail, tmp); michael@0: if (!bailoutFrom(&bail, lir->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorARM::emitRoundDouble(const FloatRegister &src, const Register &dest, Label *fail) michael@0: { michael@0: masm.ma_vcvt_F64_I32(src, ScratchFloatReg); michael@0: masm.ma_vxfer(ScratchFloatReg, dest); michael@0: masm.ma_cmp(dest, Imm32(0x7fffffff)); michael@0: masm.ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); michael@0: masm.ma_b(fail, Assembler::Equal); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32 *ins) michael@0: { michael@0: return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output())); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32 *ins) michael@0: { michael@0: return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output())); michael@0: } michael@0: michael@0: static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 }; michael@0: michael@0: FrameSizeClass michael@0: FrameSizeClass::FromDepth(uint32_t frameDepth) michael@0: { michael@0: for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) { michael@0: if (frameDepth < FrameSizes[i]) michael@0: return FrameSizeClass(i); michael@0: } michael@0: michael@0: return FrameSizeClass::None(); michael@0: } michael@0: michael@0: FrameSizeClass michael@0: FrameSizeClass::ClassLimit() michael@0: { michael@0: return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes)); michael@0: } michael@0: michael@0: uint32_t michael@0: FrameSizeClass::frameSize() const michael@0: { michael@0: JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID); michael@0: JS_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes)); michael@0: michael@0: return FrameSizes[class_]; michael@0: } michael@0: michael@0: ValueOperand michael@0: CodeGeneratorARM::ToValue(LInstruction *ins, size_t pos) michael@0: { michael@0: Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX)); michael@0: Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX)); michael@0: return ValueOperand(typeReg, payloadReg); michael@0: } michael@0: michael@0: ValueOperand michael@0: CodeGeneratorARM::ToOutValue(LInstruction *ins) michael@0: { michael@0: Register typeReg = ToRegister(ins->getDef(TYPE_INDEX)); michael@0: Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX)); michael@0: return ValueOperand(typeReg, payloadReg); michael@0: } michael@0: michael@0: ValueOperand michael@0: CodeGeneratorARM::ToTempValue(LInstruction *ins, size_t pos) michael@0: { michael@0: Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX)); michael@0: Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX)); michael@0: return ValueOperand(typeReg, payloadReg); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitValue(LValue *value) michael@0: { michael@0: const ValueOperand out = ToOutValue(value); michael@0: michael@0: masm.moveValue(value->value(), out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitBox(LBox *box) michael@0: { michael@0: const LDefinition *type = box->getDef(TYPE_INDEX); michael@0: michael@0: JS_ASSERT(!box->getOperand(0)->isConstant()); michael@0: michael@0: // On x86, the input operand and the output payload have the same michael@0: // virtual register. All that needs to be written is the type tag for michael@0: // the type definition. michael@0: masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint *box) michael@0: { michael@0: const LDefinition *payload = box->getDef(PAYLOAD_INDEX); michael@0: const LDefinition *type = box->getDef(TYPE_INDEX); michael@0: const LAllocation *in = box->getOperand(0); michael@0: michael@0: FloatRegister reg = ToFloatRegister(in); michael@0: if (box->type() == MIRType_Float32) { michael@0: masm.convertFloat32ToDouble(reg, ScratchFloatReg); michael@0: reg = ScratchFloatReg; michael@0: } michael@0: michael@0: //masm.as_vxfer(ToRegister(payload), ToRegister(type), michael@0: // VFPRegister(ToFloatRegister(in)), Assembler::FloatToCore); michael@0: masm.ma_vxfer(VFPRegister(reg), ToRegister(payload), ToRegister(type)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitUnbox(LUnbox *unbox) michael@0: { michael@0: // Note that for unbox, the type and payload indexes are switched on the michael@0: // inputs. michael@0: MUnbox *mir = unbox->mir(); michael@0: Register type = ToRegister(unbox->type()); michael@0: michael@0: if (mir->fallible()) { michael@0: masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type()))); michael@0: if (!bailoutIf(Assembler::NotEqual, unbox->snapshot())) michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitDouble(LDouble *ins) michael@0: { michael@0: michael@0: const LDefinition *out = ins->getDef(0); michael@0: michael@0: masm.ma_vimm(ins->getDouble(), ToFloatRegister(out)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitFloat32(LFloat32 *ins) michael@0: { michael@0: const LDefinition *out = ins->getDef(0); michael@0: masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out)); michael@0: return true; michael@0: } michael@0: michael@0: Register michael@0: CodeGeneratorARM::splitTagForTest(const ValueOperand &value) michael@0: { michael@0: return value.typeReg(); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->input(); michael@0: masm.ma_vcmpz(ToFloatRegister(opd)); michael@0: masm.as_vmrs(pc); michael@0: michael@0: MBasicBlock *ifTrue = test->ifTrue(); michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: // If the compare set the 0 bit, then the result michael@0: // is definately false. michael@0: jumpToBlock(ifFalse, Assembler::Zero); michael@0: // it is also false if one of the operands is NAN, which is michael@0: // shown as Overflow. michael@0: jumpToBlock(ifFalse, Assembler::Overflow); michael@0: jumpToBlock(ifTrue); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->input(); michael@0: masm.ma_vcmpz_f32(ToFloatRegister(opd)); michael@0: masm.as_vmrs(pc); michael@0: michael@0: MBasicBlock *ifTrue = test->ifTrue(); michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: // If the compare set the 0 bit, then the result michael@0: // is definately false. michael@0: jumpToBlock(ifFalse, Assembler::Zero); michael@0: // it is also false if one of the operands is NAN, which is michael@0: // shown as Overflow. michael@0: jumpToBlock(ifFalse, Assembler::Overflow); michael@0: jumpToBlock(ifTrue); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareD(LCompareD *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); michael@0: masm.compareDouble(lhs, rhs); michael@0: masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareF(LCompareF *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); michael@0: masm.compareFloat(lhs, rhs); michael@0: masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareDAndBranch(LCompareDAndBranch *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); michael@0: masm.compareDouble(lhs, rhs); michael@0: emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareFAndBranch(LCompareFAndBranch *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); michael@0: masm.compareFloat(lhs, rhs); michael@0: emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareB(LCompareB *lir) michael@0: { michael@0: MCompare *mir = lir->mir(); michael@0: michael@0: const ValueOperand lhs = ToValue(lir, LCompareB::Lhs); michael@0: const LAllocation *rhs = lir->rhs(); michael@0: const Register output = ToRegister(lir->output()); michael@0: michael@0: JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); michael@0: michael@0: Label notBoolean, done; michael@0: masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean); michael@0: { michael@0: if (rhs->isConstant()) michael@0: masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean())); michael@0: else michael@0: masm.cmp32(lhs.payloadReg(), ToRegister(rhs)); michael@0: masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output); michael@0: masm.jump(&done); michael@0: } michael@0: michael@0: masm.bind(¬Boolean); michael@0: { michael@0: masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output); michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareBAndBranch(LCompareBAndBranch *lir) michael@0: { michael@0: MCompare *mir = lir->cmpMir(); michael@0: const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs); michael@0: const LAllocation *rhs = lir->rhs(); michael@0: michael@0: JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); michael@0: michael@0: Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs); michael@0: jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond); michael@0: michael@0: if (rhs->isConstant()) michael@0: masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean())); michael@0: else michael@0: masm.cmp32(lhs.payloadReg(), ToRegister(rhs)); michael@0: emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareV(LCompareV *lir) michael@0: { michael@0: MCompare *mir = lir->mir(); michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); michael@0: const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput); michael@0: const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput); michael@0: const Register output = ToRegister(lir->output()); michael@0: michael@0: JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || michael@0: mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); michael@0: michael@0: Label notEqual, done; michael@0: masm.cmp32(lhs.typeReg(), rhs.typeReg()); michael@0: masm.j(Assembler::NotEqual, ¬Equal); michael@0: { michael@0: masm.cmp32(lhs.payloadReg(), rhs.payloadReg()); michael@0: masm.emitSet(cond, output); michael@0: masm.jump(&done); michael@0: } michael@0: masm.bind(¬Equal); michael@0: { michael@0: masm.move32(Imm32(cond == Assembler::NotEqual), output); michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitCompareVAndBranch(LCompareVAndBranch *lir) michael@0: { michael@0: MCompare *mir = lir->cmpMir(); michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); michael@0: const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput); michael@0: const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput); michael@0: michael@0: JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || michael@0: mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); michael@0: michael@0: MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue(); michael@0: michael@0: masm.cmp32(lhs.typeReg(), rhs.typeReg()); michael@0: jumpToBlock(notEqual, Assembler::NotEqual); michael@0: masm.cmp32(lhs.payloadReg(), rhs.payloadReg()); michael@0: emitBranch(cond, lir->ifTrue(), lir->ifFalse()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch *baab) michael@0: { michael@0: if (baab->right()->isConstant()) michael@0: masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right()))); michael@0: else michael@0: masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right())); michael@0: emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir) michael@0: { michael@0: masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir) michael@0: { michael@0: masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitNotI(LNotI *ins) michael@0: { michael@0: // It is hard to optimize !x, so just do it the basic way for now. michael@0: masm.ma_cmp(ToRegister(ins->input()), Imm32(0)); michael@0: masm.emitSet(Assembler::Equal, ToRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitNotD(LNotD *ins) michael@0: { michael@0: // Since this operation is not, we want to set a bit if michael@0: // the double is falsey, which means 0.0, -0.0 or NaN. michael@0: // when comparing with 0, an input of 0 will set the Z bit (30) michael@0: // and NaN will set the V bit (28) of the APSR. michael@0: FloatRegister opd = ToFloatRegister(ins->input()); michael@0: Register dest = ToRegister(ins->output()); michael@0: michael@0: // Do the compare michael@0: masm.ma_vcmpz(opd); michael@0: // TODO There are three variations here to compare performance-wise. michael@0: bool nocond = true; michael@0: if (nocond) { michael@0: // Load the value into the dest register michael@0: masm.as_vmrs(dest); michael@0: masm.ma_lsr(Imm32(28), dest, dest); michael@0: masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30 michael@0: masm.ma_and(Imm32(1), dest); michael@0: } else { michael@0: masm.as_vmrs(pc); michael@0: masm.ma_mov(Imm32(0), dest); michael@0: masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal); michael@0: masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitNotF(LNotF *ins) michael@0: { michael@0: // Since this operation is not, we want to set a bit if michael@0: // the double is falsey, which means 0.0, -0.0 or NaN. michael@0: // when comparing with 0, an input of 0 will set the Z bit (30) michael@0: // and NaN will set the V bit (28) of the APSR. michael@0: FloatRegister opd = ToFloatRegister(ins->input()); michael@0: Register dest = ToRegister(ins->output()); michael@0: michael@0: // Do the compare michael@0: masm.ma_vcmpz_f32(opd); michael@0: // TODO There are three variations here to compare performance-wise. michael@0: bool nocond = true; michael@0: if (nocond) { michael@0: // Load the value into the dest register michael@0: masm.as_vmrs(dest); michael@0: masm.ma_lsr(Imm32(28), dest, dest); michael@0: masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30 michael@0: masm.ma_and(Imm32(1), dest); michael@0: } else { michael@0: masm.as_vmrs(pc); michael@0: masm.ma_mov(Imm32(0), dest); michael@0: masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal); michael@0: masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitLoadSlotV(LLoadSlotV *load) michael@0: { michael@0: const ValueOperand out = ToOutValue(load); michael@0: Register base = ToRegister(load->input()); michael@0: int32_t offset = load->mir()->slot() * sizeof(js::Value); michael@0: michael@0: masm.loadValue(Address(base, offset), out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitLoadSlotT(LLoadSlotT *load) michael@0: { michael@0: Register base = ToRegister(load->input()); michael@0: int32_t offset = load->mir()->slot() * sizeof(js::Value); michael@0: michael@0: if (load->mir()->type() == MIRType_Double) michael@0: masm.loadInt32OrDouble(Operand(base, offset), ToFloatRegister(load->output())); michael@0: else michael@0: masm.ma_ldr(Operand(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitStoreSlotT(LStoreSlotT *store) michael@0: { michael@0: michael@0: Register base = ToRegister(store->slots()); michael@0: int32_t offset = store->mir()->slot() * sizeof(js::Value); michael@0: michael@0: const LAllocation *value = store->value(); michael@0: MIRType valueType = store->mir()->value()->type(); michael@0: michael@0: if (store->mir()->needsBarrier()) michael@0: emitPreBarrier(Address(base, offset), store->mir()->slotType()); michael@0: michael@0: if (valueType == MIRType_Double) { michael@0: masm.ma_vstr(ToFloatRegister(value), Operand(base, offset)); michael@0: return true; michael@0: } michael@0: michael@0: // Store the type tag if needed. michael@0: if (valueType != store->mir()->slotType()) michael@0: masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(base, offset)); michael@0: michael@0: // Store the payload. michael@0: if (value->isConstant()) michael@0: masm.storePayload(*value->toConstant(), Operand(base, offset)); michael@0: else michael@0: masm.storePayload(ToRegister(value), Operand(base, offset)); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitLoadElementT(LLoadElementT *load) michael@0: { michael@0: Register base = ToRegister(load->elements()); michael@0: if (load->mir()->type() == MIRType_Double) { michael@0: FloatRegister fpreg = ToFloatRegister(load->output()); michael@0: if (load->index()->isConstant()) { michael@0: Address source(base, ToInt32(load->index()) * sizeof(Value)); michael@0: if (load->mir()->loadDoubles()) michael@0: masm.loadDouble(source, fpreg); michael@0: else michael@0: masm.loadInt32OrDouble(source, fpreg); michael@0: } else { michael@0: Register index = ToRegister(load->index()); michael@0: if (load->mir()->loadDoubles()) michael@0: masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg); michael@0: else michael@0: masm.loadInt32OrDouble(base, index, fpreg); michael@0: } michael@0: } else { michael@0: if (load->index()->isConstant()) { michael@0: Address source(base, ToInt32(load->index()) * sizeof(Value)); michael@0: masm.load32(source, ToRegister(load->output())); michael@0: } else { michael@0: masm.ma_ldr(DTRAddr(base, DtrRegImmShift(ToRegister(load->index()), LSL, 3)), michael@0: ToRegister(load->output())); michael@0: } michael@0: } michael@0: JS_ASSERT(!load->mir()->needsHoleCheck()); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorARM::storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType, michael@0: const Register &elements, const LAllocation *index) michael@0: { michael@0: if (index->isConstant()) { michael@0: Address dest = Address(elements, ToInt32(index) * sizeof(Value)); michael@0: if (valueType == MIRType_Double) { michael@0: masm.ma_vstr(ToFloatRegister(value), Operand(dest)); michael@0: return; michael@0: } michael@0: michael@0: // Store the type tag if needed. michael@0: if (valueType != elementType) michael@0: masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest); michael@0: michael@0: // Store the payload. michael@0: if (value->isConstant()) michael@0: masm.storePayload(*value->toConstant(), dest); michael@0: else michael@0: masm.storePayload(ToRegister(value), dest); michael@0: } else { michael@0: Register indexReg = ToRegister(index); michael@0: if (valueType == MIRType_Double) { michael@0: masm.ma_vstr(ToFloatRegister(value), elements, indexReg); michael@0: return; michael@0: } michael@0: michael@0: // Store the type tag if needed. michael@0: if (valueType != elementType) michael@0: masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg); michael@0: michael@0: // Store the payload. michael@0: if (value->isConstant()) michael@0: masm.storePayload(*value->toConstant(), elements, indexReg); michael@0: else michael@0: masm.storePayload(ToRegister(value), elements, indexReg); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitGuardShape(LGuardShape *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfShape())), tmp); michael@0: masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape())); michael@0: michael@0: return bailoutIf(Assembler::NotEqual, guard->snapshot()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitGuardObjectType(LGuardObjectType *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfType())), tmp); michael@0: masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->typeObject())); michael@0: michael@0: Assembler::Condition cond = michael@0: guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual; michael@0: return bailoutIf(cond, guard->snapshot()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitGuardClass(LGuardClass *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.loadObjClass(obj, tmp); michael@0: masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass())); michael@0: if (!bailoutIf(Assembler::NotEqual, guard->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitImplicitThis(LImplicitThis *lir) michael@0: { michael@0: Register callee = ToRegister(lir->callee()); michael@0: const ValueOperand out = ToOutValue(lir); michael@0: michael@0: // The implicit |this| is always |undefined| if the function's environment michael@0: // is the current global. michael@0: masm.ma_ldr(DTRAddr(callee, DtrOffImm(JSFunction::offsetOfEnvironment())), out.typeReg()); michael@0: masm.ma_cmp(out.typeReg(), ImmGCPtr(&gen->info().script()->global())); michael@0: michael@0: // TODO: OOL stub path. michael@0: if (!bailoutIf(Assembler::NotEqual, lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.moveValue(UndefinedValue(), out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitInterruptCheck(LInterruptCheck *lir) michael@0: { michael@0: OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing()); michael@0: if (!ool) michael@0: return false; michael@0: michael@0: void *interrupt = (void*)GetIonContext()->runtime->addressOfInterrupt(); michael@0: masm.load32(AbsoluteAddress(interrupt), lr); michael@0: masm.ma_cmp(lr, Imm32(0)); michael@0: masm.ma_b(ool->entry(), Assembler::NonZero); michael@0: masm.bind(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::generateInvalidateEpilogue() michael@0: { michael@0: // Ensure that there is enough space in the buffer for the OsiPoint michael@0: // patching to occur. Otherwise, we could overwrite the invalidation michael@0: // epilogue. michael@0: for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize()) michael@0: masm.nop(); michael@0: michael@0: masm.bind(&invalidate_); michael@0: michael@0: // Push the return address of the point that we bailed out at onto the stack michael@0: masm.Push(lr); michael@0: michael@0: // Push the Ion script onto the stack (when we determine what that pointer is). michael@0: invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1))); michael@0: JitCode *thunk = gen->jitRuntime()->getInvalidationThunk(); michael@0: michael@0: masm.branch(thunk); michael@0: michael@0: // We should never reach this point in JIT code -- the invalidation thunk should michael@0: // pop the invalidated JS frame and return directly to its caller. michael@0: masm.assumeUnreachable("Should have returned directly to its caller instead of here."); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState) michael@0: { michael@0: // Can always use the scratch register on ARM. michael@0: addState->dispatchScratch = ScratchRegister; michael@0: } michael@0: michael@0: template michael@0: Register michael@0: getBase(U *mir) michael@0: { michael@0: switch (mir->base()) { michael@0: case U::Heap: return HeapReg; michael@0: case U::Global: return GlobalReg; michael@0: } michael@0: return InvalidReg; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) michael@0: { michael@0: const MAsmJSLoadHeap *mir = ins->mir(); michael@0: bool isSigned; michael@0: int size; michael@0: bool isFloat = false; michael@0: switch (mir->viewType()) { michael@0: case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break; michael@0: case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break; michael@0: case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break; michael@0: case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break; michael@0: case ArrayBufferView::TYPE_INT32: michael@0: case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break; michael@0: case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break; michael@0: case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break; michael@0: default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); michael@0: } michael@0: michael@0: const LAllocation *ptr = ins->ptr(); michael@0: michael@0: if (ptr->isConstant()) { michael@0: JS_ASSERT(mir->skipBoundsCheck()); michael@0: int32_t ptrImm = ptr->toConstant()->toInt32(); michael@0: JS_ASSERT(ptrImm >= 0); michael@0: if (isFloat) { michael@0: VFPRegister vd(ToFloatRegister(ins->output())); michael@0: if (size == 32) michael@0: masm.ma_vldr(Operand(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always); michael@0: else michael@0: masm.ma_vldr(Operand(HeapReg, ptrImm), vd, Assembler::Always); michael@0: } else { michael@0: masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm), michael@0: ToRegister(ins->output()), Offset, Assembler::Always); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: Register ptrReg = ToRegister(ptr); michael@0: michael@0: if (mir->skipBoundsCheck()) { michael@0: if (isFloat) { michael@0: VFPRegister vd(ToFloatRegister(ins->output())); michael@0: if (size == 32) michael@0: masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always); michael@0: else michael@0: masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Always); michael@0: } else { michael@0: masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, michael@0: ToRegister(ins->output()), Offset, Assembler::Always); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: BufferOffset bo = masm.ma_BoundsCheck(ptrReg); michael@0: if (isFloat) { michael@0: FloatRegister dst = ToFloatRegister(ins->output()); michael@0: VFPRegister vd(dst); michael@0: if (size == 32) { michael@0: masm.convertDoubleToFloat32(NANReg, dst, Assembler::AboveOrEqual); michael@0: masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below); michael@0: } else { michael@0: masm.ma_vmov(NANReg, dst, Assembler::AboveOrEqual); michael@0: masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below); michael@0: } michael@0: } else { michael@0: Register d = ToRegister(ins->output()); michael@0: masm.ma_mov(Imm32(0), d, NoSetCond, Assembler::AboveOrEqual); michael@0: masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below); michael@0: } michael@0: return masm.append(AsmJSHeapAccess(bo.getOffset())); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) michael@0: { michael@0: const MAsmJSStoreHeap *mir = ins->mir(); michael@0: bool isSigned; michael@0: int size; michael@0: bool isFloat = false; michael@0: switch (mir->viewType()) { michael@0: case ArrayBufferView::TYPE_INT8: michael@0: case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break; michael@0: case ArrayBufferView::TYPE_INT16: michael@0: case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break; michael@0: case ArrayBufferView::TYPE_INT32: michael@0: case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break; michael@0: case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break; michael@0: case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break; michael@0: default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); michael@0: } michael@0: const LAllocation *ptr = ins->ptr(); michael@0: if (ptr->isConstant()) { michael@0: JS_ASSERT(mir->skipBoundsCheck()); michael@0: int32_t ptrImm = ptr->toConstant()->toInt32(); michael@0: JS_ASSERT(ptrImm >= 0); michael@0: if (isFloat) { michael@0: VFPRegister vd(ToFloatRegister(ins->value())); michael@0: if (size == 32) michael@0: masm.ma_vstr(vd.singleOverlay(), Operand(HeapReg, ptrImm), Assembler::Always); michael@0: else michael@0: masm.ma_vstr(vd, Operand(HeapReg, ptrImm), Assembler::Always); michael@0: } else { michael@0: masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm), michael@0: ToRegister(ins->value()), Offset, Assembler::Always); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: Register ptrReg = ToRegister(ptr); michael@0: michael@0: if (mir->skipBoundsCheck()) { michael@0: Register ptrReg = ToRegister(ptr); michael@0: if (isFloat) { michael@0: VFPRegister vd(ToFloatRegister(ins->value())); michael@0: if (size == 32) michael@0: masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always); michael@0: else michael@0: masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Always); michael@0: } else { michael@0: masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, michael@0: ToRegister(ins->value()), Offset, Assembler::Always); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: BufferOffset bo = masm.ma_BoundsCheck(ptrReg); michael@0: if (isFloat) { michael@0: VFPRegister vd(ToFloatRegister(ins->value())); michael@0: if (size == 32) michael@0: masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below); michael@0: else michael@0: masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Below); michael@0: } else { michael@0: masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, michael@0: ToRegister(ins->value()), Offset, Assembler::Below); michael@0: } michael@0: return masm.append(AsmJSHeapAccess(bo.getOffset())); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins) michael@0: { michael@0: const MAsmJSPassStackArg *mir = ins->mir(); michael@0: Operand dst(StackPointer, mir->spOffset()); michael@0: if (ins->arg()->isConstant()) { michael@0: //masm.as_bkpt(); michael@0: masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst); michael@0: } else { michael@0: if (ins->arg()->isGeneralReg()) michael@0: masm.ma_str(ToRegister(ins->arg()), dst); michael@0: else michael@0: masm.ma_vstr(ToFloatRegister(ins->arg()), dst); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitUDiv(LUDiv *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: Label done; michael@0: if (ins->mir()->canBeDivideByZero()) { michael@0: masm.ma_cmp(rhs, Imm32(0)); michael@0: if (ins->mir()->isTruncated()) { michael@0: // Infinity|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(&skip, Assembler::NotEqual); michael@0: masm.ma_mov(Imm32(0), output); michael@0: masm.ma_b(&done); michael@0: masm.bind(&skip); michael@0: } else { michael@0: JS_ASSERT(ins->mir()->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: masm.ma_udiv(lhs, rhs, output); michael@0: michael@0: if (!ins->mir()->isTruncated()) { michael@0: masm.ma_cmp(output, Imm32(0)); michael@0: if (!bailoutIf(Assembler::LessThan, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitUMod(LUMod *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: Label done; michael@0: michael@0: if (ins->mir()->canBeDivideByZero()) { michael@0: masm.ma_cmp(rhs, Imm32(0)); michael@0: if (ins->mir()->isTruncated()) { michael@0: // Infinity|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(&skip, Assembler::NotEqual); michael@0: masm.ma_mov(Imm32(0), output); michael@0: masm.ma_b(&done); michael@0: masm.bind(&skip); michael@0: } else { michael@0: JS_ASSERT(ins->mir()->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: masm.ma_umod(lhs, rhs, output); michael@0: michael@0: if (!ins->mir()->isTruncated()) { michael@0: masm.ma_cmp(output, Imm32(0)); michael@0: if (!bailoutIf(Assembler::LessThan, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: JS_ASSERT(lhs == r0); michael@0: JS_ASSERT(rhs == r1); michael@0: JS_ASSERT(ins->mirRaw()->isDiv() || ins->mirRaw()->isMod()); michael@0: JS_ASSERT_IF(ins->mirRaw()->isDiv(), output == r0); michael@0: JS_ASSERT_IF(ins->mirRaw()->isMod(), output == r1); michael@0: michael@0: Label afterDiv; michael@0: michael@0: masm.ma_cmp(rhs, Imm32(0)); michael@0: Label notzero; michael@0: masm.ma_b(¬zero, Assembler::NonZero); michael@0: masm.ma_mov(Imm32(0), output); michael@0: masm.ma_b(&afterDiv); michael@0: masm.bind(¬zero); michael@0: michael@0: masm.setupAlignedABICall(2); michael@0: masm.passABIArg(lhs); michael@0: masm.passABIArg(rhs); michael@0: if (gen->compilingAsmJS()) michael@0: masm.callWithABI(AsmJSImm_aeabi_uidivmod); michael@0: else michael@0: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod)); michael@0: michael@0: masm.bind(&afterDiv); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress *ins) michael@0: { michael@0: const MEffectiveAddress *mir = ins->mir(); michael@0: Register base = ToRegister(ins->base()); michael@0: Register index = ToRegister(ins->index()); michael@0: Register output = ToRegister(ins->output()); michael@0: masm.as_add(output, base, lsl(index, mir->scale())); michael@0: masm.ma_add(Imm32(mir->displacement()), output); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins) michael@0: { michael@0: const MAsmJSLoadGlobalVar *mir = ins->mir(); michael@0: unsigned addr = mir->globalDataOffset(); michael@0: if (mir->type() == MIRType_Int32) { michael@0: masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output())); michael@0: } else if (mir->type() == MIRType_Float32) { michael@0: VFPRegister vd(ToFloatRegister(ins->output())); michael@0: masm.ma_vldr(Operand(GlobalReg, addr), vd.singleOverlay()); michael@0: } else { michael@0: masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output())); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins) michael@0: { michael@0: const MAsmJSStoreGlobalVar *mir = ins->mir(); michael@0: michael@0: MIRType type = mir->value()->type(); michael@0: JS_ASSERT(IsNumberType(type)); michael@0: unsigned addr = mir->globalDataOffset(); michael@0: if (mir->value()->type() == MIRType_Int32) { michael@0: masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value())); michael@0: } else if (mir->value()->type() == MIRType_Float32) { michael@0: VFPRegister vd(ToFloatRegister(ins->value())); michael@0: masm.ma_vstr(vd.singleOverlay(), Operand(GlobalReg, addr)); michael@0: } else { michael@0: masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr)); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins) michael@0: { michael@0: const MAsmJSLoadFuncPtr *mir = ins->mir(); michael@0: michael@0: Register index = ToRegister(ins->index()); michael@0: Register tmp = ToRegister(ins->temp()); michael@0: Register out = ToRegister(ins->output()); michael@0: unsigned addr = mir->globalDataOffset(); michael@0: masm.ma_mov(Imm32(addr), tmp); michael@0: masm.as_add(tmp, tmp, lsl(index, 2)); michael@0: masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins) michael@0: { michael@0: const MAsmJSLoadFFIFunc *mir = ins->mir(); michael@0: michael@0: masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output())); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitNegI(LNegI *ins) michael@0: { michael@0: Register input = ToRegister(ins->input()); michael@0: masm.ma_neg(input, ToRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitNegD(LNegD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: masm.ma_vneg(input, ToFloatRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitNegF(LNegF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: masm.ma_vneg_f32(input, ToFloatRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorARM::visitForkJoinGetSlice(LForkJoinGetSlice *ins) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateForkJoinGetSliceStub(JSContext *cx) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: }