michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/mips/CodeGenerator-mips.h" michael@0: michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "jscntxt.h" michael@0: #include "jscompartment.h" michael@0: #include "jsnum.h" michael@0: michael@0: #include "jit/CodeGenerator.h" michael@0: #include "jit/IonFrames.h" michael@0: #include "jit/JitCompartment.h" michael@0: #include "jit/MIR.h" michael@0: #include "jit/MIRGraph.h" michael@0: #include "vm/Shape.h" michael@0: michael@0: #include "jsscriptinlines.h" michael@0: michael@0: #include "jit/shared/CodeGenerator-shared-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::FloorLog2; michael@0: using mozilla::NegativeInfinity; michael@0: using JS::GenericNaN; michael@0: michael@0: // shared michael@0: CodeGeneratorMIPS::CodeGeneratorMIPS(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm) michael@0: : CodeGeneratorShared(gen, graph, masm) michael@0: { michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::generatePrologue() michael@0: { michael@0: if (gen->compilingAsmJS()) { michael@0: masm.Push(ra); michael@0: // Note that this automatically sets MacroAssembler::framePushed(). michael@0: masm.reserveStack(frameDepth_); michael@0: } else { michael@0: // Note that this automatically sets MacroAssembler::framePushed(). michael@0: masm.reserveStack(frameSize()); michael@0: masm.checkStackAlignment(); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::generateEpilogue() michael@0: { michael@0: masm.bind(&returnLabel_); michael@0: #if JS_TRACE_LOGGING michael@0: masm.tracelogStop(); michael@0: #endif michael@0: if (gen->compilingAsmJS()) { michael@0: // Pop the stack we allocated at the start of the function. michael@0: masm.freeStack(frameDepth_); michael@0: masm.Pop(ra); michael@0: masm.abiret(); michael@0: MOZ_ASSERT(masm.framePushed() == 0); michael@0: } else { michael@0: // Pop the stack we allocated at the start of the function. michael@0: masm.freeStack(frameSize()); michael@0: MOZ_ASSERT(masm.framePushed() == 0); michael@0: masm.ret(); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorMIPS::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, michael@0: MBasicBlock *mir, Assembler::DoubleCondition cond) michael@0: { michael@0: Label *label = mir->lir()->label(); michael@0: if (Label *oolEntry = labelForBackedgeWithImplicitCheck(mir)) { michael@0: // Note: the backedge is initially a jump to the next instruction. michael@0: // It will be patched to the target block's label during link(). michael@0: RepatchLabel rejoin; michael@0: michael@0: CodeOffsetJump backedge; michael@0: Label skip; michael@0: if (fmt == Assembler::DoubleFloat) michael@0: masm.ma_bc1d(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump); michael@0: else michael@0: masm.ma_bc1s(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump); michael@0: michael@0: backedge = masm.jumpWithPatch(&rejoin); michael@0: masm.bind(&rejoin); michael@0: masm.bind(&skip); michael@0: michael@0: if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry))) michael@0: MOZ_CRASH(); michael@0: } else { michael@0: if (fmt == Assembler::DoubleFloat) michael@0: masm.branchDouble(cond, lhs, rhs, mir->lir()->label()); michael@0: else michael@0: masm.branchFloat(cond, lhs, rhs, mir->lir()->label()); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: OutOfLineBailout::accept(CodeGeneratorMIPS *codegen) michael@0: { michael@0: return codegen->visitOutOfLineBailout(this); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitTestIAndBranch(LTestIAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->getOperand(0); michael@0: MBasicBlock *ifTrue = test->ifTrue(); michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: michael@0: emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompare(LCompare *comp) michael@0: { michael@0: Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop()); michael@0: const LAllocation *left = comp->getOperand(0); michael@0: const LAllocation *right = comp->getOperand(1); michael@0: const LDefinition *def = comp->getDef(0); michael@0: michael@0: if (right->isConstant()) michael@0: masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)), ToRegister(def)); michael@0: else if (right->isGeneralReg()) michael@0: masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def)); michael@0: else michael@0: masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def)); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareAndBranch(LCompareAndBranch *comp) michael@0: { michael@0: Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop()); michael@0: if (comp->right()->isConstant()) { michael@0: emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond, michael@0: comp->ifTrue(), comp->ifFalse()); michael@0: } else if (comp->right()->isGeneralReg()) { michael@0: emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond, michael@0: comp->ifTrue(), comp->ifFalse()); michael@0: } else { michael@0: emitBranch(ToRegister(comp->left()), ToAddress(comp->right()), cond, michael@0: comp->ifTrue(), comp->ifFalse()); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::generateOutOfLineCode() michael@0: { michael@0: if (!CodeGeneratorShared::generateOutOfLineCode()) michael@0: return false; michael@0: michael@0: if (deoptLabel_.used()) { michael@0: // All non-table-based bailouts will go here. michael@0: masm.bind(&deoptLabel_); michael@0: michael@0: // Push the frame size, so the handler can recover the IonScript. michael@0: // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk michael@0: // We have to use 'ra' because generateBailoutTable will implicitly do michael@0: // the same. michael@0: masm.move32(Imm32(frameSize()), ra); michael@0: michael@0: JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler(); michael@0: michael@0: masm.branch(handler); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::bailoutFrom(Label *label, LSnapshot *snapshot) michael@0: { michael@0: if (masm.bailed()) michael@0: return false; michael@0: MOZ_ASSERT(label->used()); michael@0: MOZ_ASSERT(!label->bound()); michael@0: michael@0: CompileInfo &info = snapshot->mir()->block()->info(); michael@0: switch (info.executionMode()) { michael@0: case ParallelExecution: { michael@0: // in parallel mode, make no attempt to recover, just signal an error. michael@0: OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported, michael@0: snapshot->mir()->block(), michael@0: snapshot->mir()->pc()); michael@0: masm.retarget(label, ool->entry()); michael@0: return true; michael@0: } michael@0: case SequentialExecution: michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: michael@0: if (!encode(snapshot)) michael@0: return false; michael@0: michael@0: // Though the assembler doesn't track all frame pushes, at least make sure michael@0: // the known value makes sense. We can't use bailout tables if the stack michael@0: // isn't properly aligned to the static frame size. michael@0: MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(), michael@0: frameClass_.frameSize() == masm.framePushed()); michael@0: michael@0: // We don't use table bailouts because retargeting is easier this way. michael@0: OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed()); michael@0: if (!addOutOfLineCode(ool)) { michael@0: return false; michael@0: } michael@0: michael@0: masm.retarget(label, ool->entry()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::bailout(LSnapshot *snapshot) michael@0: { michael@0: Label label; michael@0: masm.jump(&label); michael@0: return bailoutFrom(&label, snapshot); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitOutOfLineBailout(OutOfLineBailout *ool) michael@0: { michael@0: // Push snapshotOffset and make sure stack is aligned. michael@0: masm.subPtr(Imm32(2 * sizeof(void *)), StackPointer); michael@0: masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()), Address(StackPointer, 0)); michael@0: michael@0: masm.jump(&deoptLabel_); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitMinMaxD(LMinMaxD *ins) michael@0: { michael@0: FloatRegister first = ToFloatRegister(ins->first()); michael@0: FloatRegister second = ToFloatRegister(ins->second()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: michael@0: MOZ_ASSERT(first == output); michael@0: michael@0: Assembler::DoubleCondition cond = ins->mir()->isMax() michael@0: ? Assembler::DoubleLessThanOrEqual michael@0: : Assembler::DoubleGreaterThanOrEqual; michael@0: Label nan, equal, returnSecond, done; michael@0: michael@0: // First or second is NaN, result is NaN. michael@0: masm.ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump); michael@0: // Make sure we handle -0 and 0 right. michael@0: masm.ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump); michael@0: masm.ma_bc1d(first, second, &returnSecond, cond, ShortJump); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: // Check for zero. michael@0: masm.bind(&equal); michael@0: masm.loadConstantDouble(0.0, ScratchFloatReg); michael@0: // First wasn't 0 or -0, so just return it. michael@0: masm.ma_bc1d(first, ScratchFloatReg, &done, Assembler::DoubleNotEqualOrUnordered, ShortJump); michael@0: michael@0: // So now both operands are either -0 or 0. michael@0: if (ins->mir()->isMax()) { michael@0: // -0 + -0 = -0 and -0 + 0 = 0. michael@0: masm.addDouble(second, first); michael@0: } else { michael@0: masm.negateDouble(first); michael@0: masm.subDouble(second, first); michael@0: masm.negateDouble(first); michael@0: } michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&nan); michael@0: masm.loadConstantDouble(GenericNaN(), output); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&returnSecond); michael@0: masm.moveDouble(second, output); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAbsD(LAbsD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: MOZ_ASSERT(input == ToFloatRegister(ins->output())); michael@0: masm.as_absd(input, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAbsF(LAbsF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: MOZ_ASSERT(input == ToFloatRegister(ins->output())); michael@0: masm.as_abss(input, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitSqrtD(LSqrtD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: masm.as_sqrtd(output, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitSqrtF(LSqrtF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: masm.as_sqrts(output, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAddI(LAddI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: michael@0: MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg()); michael@0: michael@0: // If there is no snapshot, we don't need to check for overflow michael@0: if (!ins->snapshot()) { michael@0: if (rhs->isConstant()) michael@0: masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); michael@0: else michael@0: masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); michael@0: return true; michael@0: } michael@0: michael@0: Label overflow; michael@0: if (rhs->isConstant()) michael@0: masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow); michael@0: else michael@0: masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow); michael@0: michael@0: if (!bailoutFrom(&overflow, ins->snapshot())) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitSubI(LSubI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: michael@0: MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg()); michael@0: michael@0: // If there is no snapshot, we don't need to check for overflow michael@0: if (!ins->snapshot()) { michael@0: if (rhs->isConstant()) michael@0: masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); michael@0: else michael@0: masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); michael@0: return true; michael@0: } michael@0: michael@0: Label overflow; michael@0: if (rhs->isConstant()) michael@0: masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow); michael@0: else michael@0: masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow); michael@0: michael@0: if (!bailoutFrom(&overflow, ins->snapshot())) michael@0: return false; michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitMulI(LMulI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->lhs(); michael@0: const LAllocation *rhs = ins->rhs(); michael@0: Register dest = ToRegister(ins->output()); michael@0: MMul *mul = ins->mir(); michael@0: michael@0: MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: int32_t constant = ToInt32(rhs); michael@0: Register src = ToRegister(lhs); michael@0: michael@0: // Bailout on -0.0 michael@0: if (mul->canBeNegativeZero() && constant <= 0) { michael@0: Assembler::Condition cond = (constant == 0) ? Assembler::LessThan : Assembler::Equal; michael@0: if (!bailoutCmp32(cond, src, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: switch (constant) { michael@0: case -1: michael@0: if (mul->canOverflow()) { michael@0: if (!bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN), ins->snapshot())) michael@0: return false; michael@0: } michael@0: masm.ma_negu(dest, src); michael@0: break; michael@0: case 0: michael@0: masm.move32(Imm32(0), dest); michael@0: break; michael@0: case 1: michael@0: masm.move32(src, dest); michael@0: break; michael@0: case 2: michael@0: if (mul->canOverflow()) { michael@0: Label mulTwoOverflow; michael@0: masm.ma_addTestOverflow(dest, src, src, &mulTwoOverflow); michael@0: michael@0: if (!bailoutFrom(&mulTwoOverflow, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: masm.as_addu(dest, src, src); michael@0: } michael@0: break; michael@0: default: michael@0: uint32_t shift = FloorLog2(constant); michael@0: michael@0: if (!mul->canOverflow() && (constant > 0)) { michael@0: // If it cannot overflow, we can do lots of optimizations. michael@0: uint32_t rest = constant - (1 << shift); michael@0: michael@0: // See if the constant has one bit set, meaning it can be michael@0: // encoded as a bitshift. michael@0: if ((1 << shift) == constant) { michael@0: masm.ma_sll(dest, src, Imm32(shift)); michael@0: return true; michael@0: } michael@0: michael@0: // If the constant cannot be encoded as (1<canOverflow() && (constant > 0) && (src != dest)) { michael@0: // To stay on the safe side, only optimize things that are a michael@0: // power of 2. michael@0: michael@0: if ((1 << shift) == constant) { michael@0: // dest = lhs * pow(2, shift) michael@0: masm.ma_sll(dest, src, Imm32(shift)); michael@0: // At runtime, check (lhs == dest >> shift), if this does michael@0: // not hold, some bits were lost due to overflow, and the michael@0: // computation should be resumed as a double. michael@0: masm.ma_sra(ScratchRegister, dest, Imm32(shift)); michael@0: if (!bailoutCmp32(Assembler::NotEqual, src, ScratchRegister, ins->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: } michael@0: michael@0: if (mul->canOverflow()) { michael@0: Label mulConstOverflow; michael@0: masm.ma_mul_branch_overflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)), michael@0: &mulConstOverflow); michael@0: michael@0: if (!bailoutFrom(&mulConstOverflow, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: masm.ma_mult(src, Imm32(ToInt32(rhs))); michael@0: masm.as_mflo(dest); michael@0: } michael@0: break; michael@0: } michael@0: } else { michael@0: Label multRegOverflow; michael@0: michael@0: if (mul->canOverflow()) { michael@0: masm.ma_mul_branch_overflow(dest, ToRegister(lhs), ToRegister(rhs), &multRegOverflow); michael@0: if (!bailoutFrom(&multRegOverflow, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: masm.as_mult(ToRegister(lhs), ToRegister(rhs)); michael@0: masm.as_mflo(dest); michael@0: } michael@0: michael@0: if (mul->canBeNegativeZero()) { michael@0: Label done; michael@0: masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump); michael@0: michael@0: // Result is -0 if lhs or rhs is negative. michael@0: // In that case result must be double value so bailout michael@0: Register scratch = SecondScratchReg; michael@0: masm.ma_or(scratch, ToRegister(lhs), ToRegister(rhs)); michael@0: if (!bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&done); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitDivI(LDivI *ins) michael@0: { michael@0: // Extract the registers from this instruction michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register dest = ToRegister(ins->output()); michael@0: Register temp = ToRegister(ins->getTemp(0)); michael@0: MDiv *mir = ins->mir(); michael@0: michael@0: Label done; michael@0: michael@0: // Handle divide by zero. michael@0: if (mir->canBeDivideByZero()) { michael@0: if (mir->canTruncateInfinities()) { michael@0: // Truncated division by zero is zero (Infinity|0 == 0) michael@0: Label notzero; michael@0: masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump); michael@0: masm.move32(Imm32(0), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(¬zero); michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: if (!bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Handle an integer overflow exception from -2147483648 / -1. michael@0: if (mir->canBeNegativeOverflow()) { michael@0: Label notMinInt; michael@0: masm.move32(Imm32(INT32_MIN), temp); michael@0: masm.ma_b(lhs, temp, ¬MinInt, Assembler::NotEqual, ShortJump); michael@0: michael@0: masm.move32(Imm32(-1), temp); michael@0: if (mir->canTruncateOverflow()) { michael@0: // (-INT32_MIN)|0 == INT32_MIN michael@0: Label skip; michael@0: masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump); michael@0: masm.move32(Imm32(INT32_MIN), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(&skip); michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot())) michael@0: return false; michael@0: } michael@0: masm.bind(¬MinInt); michael@0: } michael@0: michael@0: // Handle negative 0. (0/-Y) michael@0: if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) { michael@0: Label nonzero; michael@0: masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump); michael@0: if (!bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: masm.bind(&nonzero); michael@0: } michael@0: // Note: above safety checks could not be verified as Ion seems to be michael@0: // smarter and requires double arithmetic in such cases. michael@0: michael@0: // All regular. Lets call div. michael@0: if (mir->canTruncateRemainder()) { michael@0: masm.as_div(lhs, rhs); michael@0: masm.as_mflo(dest); michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: michael@0: Label remainderNonZero; michael@0: masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero); michael@0: if (!bailoutFrom(&remainderNonZero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitDivPowTwoI(LDivPowTwoI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->numerator()); michael@0: Register dest = ToRegister(ins->output()); michael@0: Register tmp = ToRegister(ins->getTemp(0)); michael@0: int32_t shift = ins->shift(); michael@0: michael@0: if (shift != 0) { michael@0: MDiv *mir = ins->mir(); michael@0: if (!mir->isTruncated()) { michael@0: // If the remainder is going to be != 0, bailout since this must michael@0: // be a double. michael@0: masm.ma_sll(tmp, lhs, Imm32(32 - shift)); michael@0: if (!bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: if (!mir->canBeNegativeDividend()) { michael@0: // Numerator is unsigned, so needs no adjusting. Do the shift. michael@0: masm.ma_sra(dest, lhs, Imm32(shift)); michael@0: return true; michael@0: } michael@0: michael@0: // Adjust the value so that shifting produces a correctly rounded result michael@0: // when the numerator is negative. See 10-1 "Signed Division by a Known michael@0: // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight. michael@0: if (shift > 1) { michael@0: masm.ma_sra(tmp, lhs, Imm32(31)); michael@0: masm.ma_srl(tmp, tmp, Imm32(32 - shift)); michael@0: masm.add32(lhs, tmp); michael@0: } else { michael@0: masm.ma_srl(tmp, lhs, Imm32(32 - shift)); michael@0: masm.add32(lhs, tmp); michael@0: } michael@0: michael@0: // Do the shift. michael@0: masm.ma_sra(dest, tmp, Imm32(shift)); michael@0: } else { michael@0: masm.move32(lhs, dest); michael@0: } michael@0: michael@0: return true; michael@0: michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitModI(LModI *ins) michael@0: { michael@0: // Extract the registers from this instruction michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register dest = ToRegister(ins->output()); michael@0: Register callTemp = ToRegister(ins->callTemp()); michael@0: MMod *mir = ins->mir(); michael@0: Label done, prevent; michael@0: michael@0: masm.move32(lhs, callTemp); michael@0: michael@0: // Prevent INT_MIN % -1; michael@0: // The integer division will give INT_MIN, but we want -(double)INT_MIN. michael@0: if (mir->canBeNegativeDividend()) { michael@0: masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump); michael@0: if (mir->isTruncated()) { michael@0: // (INT_MIN % -1)|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump); michael@0: masm.move32(Imm32(0), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(&skip); michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot())) michael@0: return false; michael@0: } michael@0: masm.bind(&prevent); michael@0: } michael@0: michael@0: // 0/X (with X < 0) is bad because both of these values *should* be michael@0: // doubles, and the result should be -0.0, which cannot be represented in michael@0: // integers. X/0 is bad because it will give garbage (or abort), when it michael@0: // should give either \infty, -\infty or NAN. michael@0: michael@0: // Prevent 0 / X (with X < 0) and X / 0 michael@0: // testing X / Y. Compare Y with 0. michael@0: // There are three cases: (Y < 0), (Y == 0) and (Y > 0) michael@0: // If (Y < 0), then we compare X with 0, and bail if X == 0 michael@0: // If (Y == 0), then we simply want to bail. michael@0: // if (Y > 0), we don't bail. michael@0: michael@0: if (mir->canBeDivideByZero()) { michael@0: if (mir->isTruncated()) { michael@0: Label skip; michael@0: masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump); michael@0: masm.move32(Imm32(0), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(&skip); michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: if (mir->canBeNegativeDividend()) { michael@0: Label notNegative; michael@0: masm.ma_b(rhs, Imm32(0), ¬Negative, Assembler::GreaterThan, ShortJump); michael@0: if (mir->isTruncated()) { michael@0: // NaN|0 == 0 and (0 % -X)|0 == 0 michael@0: Label skip; michael@0: masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump); michael@0: masm.move32(Imm32(0), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(&skip); michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: masm.bind(¬Negative); michael@0: } michael@0: michael@0: masm.as_div(lhs, rhs); michael@0: masm.as_mfhi(dest); michael@0: michael@0: // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0 michael@0: if (mir->canBeNegativeDividend()) { michael@0: if (mir->isTruncated()) { michael@0: // -0.0|0 == 0 michael@0: } else { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: // See if X < 0 michael@0: masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump); michael@0: if (!bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitModPowTwoI(LModPowTwoI *ins) michael@0: { michael@0: Register in = ToRegister(ins->getOperand(0)); michael@0: Register out = ToRegister(ins->getDef(0)); michael@0: MMod *mir = ins->mir(); michael@0: Label negative, done; michael@0: michael@0: masm.move32(in, out); michael@0: masm.ma_b(in, in, &done, Assembler::Zero, ShortJump); michael@0: // Switch based on sign of the lhs. michael@0: // Positive numbers are just a bitmask michael@0: masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump); michael@0: { michael@0: masm.and32(Imm32((1 << ins->shift()) - 1), out); michael@0: masm.ma_b(&done, ShortJump); michael@0: } michael@0: michael@0: // Negative numbers need a negate, bitmask, negate michael@0: { michael@0: masm.bind(&negative); michael@0: masm.neg32(out); michael@0: masm.and32(Imm32((1 << ins->shift()) - 1), out); michael@0: masm.neg32(out); michael@0: } michael@0: if (mir->canBeNegativeDividend()) { michael@0: if (!mir->isTruncated()) { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: // -0|0 == 0 michael@0: } michael@0: } michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitModMaskI(LModMaskI *ins) michael@0: { michael@0: Register src = ToRegister(ins->getOperand(0)); michael@0: Register dest = ToRegister(ins->getDef(0)); michael@0: Register tmp = ToRegister(ins->getTemp(0)); michael@0: MMod *mir = ins->mir(); michael@0: michael@0: if (!mir->isTruncated() && mir->canBeNegativeDividend()) { michael@0: MOZ_ASSERT(mir->fallible()); michael@0: michael@0: Label bail; michael@0: masm.ma_mod_mask(src, dest, tmp, ins->shift(), &bail); michael@0: if (!bailoutFrom(&bail, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: masm.ma_mod_mask(src, dest, tmp, ins->shift(), nullptr); michael@0: } michael@0: return true; michael@0: } michael@0: bool michael@0: CodeGeneratorMIPS::visitBitNotI(LBitNotI *ins) michael@0: { michael@0: const LAllocation *input = ins->getOperand(0); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: MOZ_ASSERT(!input->isConstant()); michael@0: michael@0: masm.ma_not(ToRegister(dest), ToRegister(input)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitBitOpI(LBitOpI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: const LDefinition *dest = ins->getDef(0); michael@0: // all of these bitops should be either imm32's, or integer registers. michael@0: switch (ins->bitop()) { michael@0: case JSOP_BITOR: michael@0: if (rhs->isConstant()) michael@0: masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); michael@0: else michael@0: masm.ma_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); michael@0: break; michael@0: case JSOP_BITXOR: michael@0: if (rhs->isConstant()) michael@0: masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); michael@0: else michael@0: masm.ma_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); michael@0: break; michael@0: case JSOP_BITAND: michael@0: if (rhs->isConstant()) michael@0: masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); michael@0: else michael@0: masm.ma_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected binary opcode"); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitShiftI(LShiftI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: const LAllocation *rhs = ins->rhs(); michael@0: Register dest = ToRegister(ins->output()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: int32_t shift = ToInt32(rhs) & 0x1F; michael@0: switch (ins->bitop()) { michael@0: case JSOP_LSH: michael@0: if (shift) michael@0: masm.ma_sll(dest, lhs, Imm32(shift)); michael@0: else michael@0: masm.move32(lhs, dest); michael@0: break; michael@0: case JSOP_RSH: michael@0: if (shift) michael@0: masm.ma_sra(dest, lhs, Imm32(shift)); michael@0: else michael@0: masm.move32(lhs, dest); michael@0: break; michael@0: case JSOP_URSH: michael@0: if (shift) { michael@0: masm.ma_srl(dest, lhs, Imm32(shift)); michael@0: } else { michael@0: // x >>> 0 can overflow. michael@0: masm.move32(lhs, dest); michael@0: if (ins->mir()->toUrsh()->fallible()) { michael@0: if (!bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); michael@0: } michael@0: } else { michael@0: // The shift amounts should be AND'ed into the 0-31 range michael@0: masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F)); michael@0: michael@0: switch (ins->bitop()) { michael@0: case JSOP_LSH: michael@0: masm.ma_sll(dest, lhs, dest); michael@0: break; michael@0: case JSOP_RSH: michael@0: masm.ma_sra(dest, lhs, dest); michael@0: break; michael@0: case JSOP_URSH: michael@0: masm.ma_srl(dest, lhs, dest); michael@0: if (ins->mir()->toUrsh()->fallible()) { michael@0: // x >>> 0 can overflow. michael@0: if (!bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitUrshD(LUrshD *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register temp = ToRegister(ins->temp()); michael@0: michael@0: const LAllocation *rhs = ins->rhs(); michael@0: FloatRegister out = ToFloatRegister(ins->output()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs))); michael@0: } else { michael@0: masm.ma_srl(temp, lhs, ToRegister(rhs)); michael@0: } michael@0: michael@0: masm.convertUInt32ToDouble(temp, out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitPowHalfD(LPowHalfD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: michael@0: Label done, skip; michael@0: michael@0: // Masm.pow(-Infinity, 0.5) == Infinity. michael@0: masm.loadConstantDouble(NegativeInfinity(), ScratchFloatReg); michael@0: masm.ma_bc1d(input, ScratchFloatReg, &skip, Assembler::DoubleNotEqualOrUnordered, ShortJump); michael@0: masm.as_negd(output, ScratchFloatReg); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&skip); michael@0: // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). michael@0: // Adding 0 converts any -0 to 0. michael@0: masm.loadConstantDouble(0.0, ScratchFloatReg); michael@0: masm.as_addd(output, input, ScratchFloatReg); michael@0: masm.as_sqrtd(output, output); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: MoveOperand michael@0: CodeGeneratorMIPS::toMoveOperand(const LAllocation *a) const michael@0: { michael@0: if (a->isGeneralReg()) michael@0: return MoveOperand(ToRegister(a)); michael@0: if (a->isFloatReg()) { michael@0: return MoveOperand(ToFloatRegister(a)); michael@0: } michael@0: MOZ_ASSERT((ToStackOffset(a) & 3) == 0); michael@0: int32_t offset = ToStackOffset(a); michael@0: michael@0: // The way the stack slots work, we assume that everything from michael@0: // depth == 0 downwards is writable. However, since our frame is included michael@0: // in this, ensure that the frame gets skipped. michael@0: if (gen->compilingAsmJS()) michael@0: offset -= AlignmentMidPrologue; michael@0: michael@0: return MoveOperand(StackPointer, offset); michael@0: } michael@0: michael@0: class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase michael@0: { michael@0: MTableSwitch *mir_; michael@0: CodeLabel jumpLabel_; michael@0: michael@0: bool accept(CodeGeneratorMIPS *codegen) { michael@0: return codegen->visitOutOfLineTableSwitch(this); michael@0: } michael@0: michael@0: public: michael@0: OutOfLineTableSwitch(MTableSwitch *mir) michael@0: : mir_(mir) michael@0: {} michael@0: michael@0: MTableSwitch *mir() const { michael@0: return mir_; michael@0: } michael@0: michael@0: CodeLabel *jumpLabel() { michael@0: return &jumpLabel_; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool) michael@0: { michael@0: MTableSwitch *mir = ool->mir(); michael@0: michael@0: masm.align(sizeof(void*)); michael@0: masm.bind(ool->jumpLabel()->src()); michael@0: if (!masm.addCodeLabel(*ool->jumpLabel())) michael@0: return false; michael@0: michael@0: for (size_t i = 0; i < mir->numCases(); i++) { michael@0: LBlock *caseblock = mir->getCase(i)->lir(); michael@0: Label *caseheader = caseblock->label(); michael@0: uint32_t caseoffset = caseheader->offset(); michael@0: michael@0: // The entries of the jump table need to be absolute addresses and thus michael@0: // must be patched after codegen is finished. michael@0: CodeLabel cl; michael@0: masm.ma_li(ScratchRegister, cl.dest()); michael@0: masm.branch(ScratchRegister); michael@0: cl.src()->bind(caseoffset); michael@0: if (!masm.addCodeLabel(cl)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index, michael@0: const Register &address) michael@0: { michael@0: Label *defaultcase = mir->getDefault()->lir()->label(); michael@0: michael@0: // Lower value with low value michael@0: if (mir->low() != 0) michael@0: masm.subPtr(Imm32(mir->low()), index); michael@0: michael@0: // Jump to default case if input is out of range michael@0: int32_t cases = mir->numCases(); michael@0: masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase); michael@0: michael@0: // To fill in the CodeLabels for the case entries, we need to first michael@0: // generate the case entries (we don't yet know their offsets in the michael@0: // instruction stream). michael@0: OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(mir); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: // Compute the position where a pointer to the right case stands. michael@0: masm.ma_li(address, ool->jumpLabel()->dest()); michael@0: masm.lshiftPtr(Imm32(4), index); michael@0: masm.addPtr(index, address); michael@0: michael@0: masm.branch(address); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitMathD(LMathD *math) michael@0: { michael@0: const LAllocation *src1 = math->getOperand(0); michael@0: const LAllocation *src2 = math->getOperand(1); michael@0: const LDefinition *output = math->getDef(0); michael@0: michael@0: switch (math->jsop()) { michael@0: case JSOP_ADD: michael@0: masm.as_addd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: case JSOP_SUB: michael@0: masm.as_subd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: case JSOP_MUL: michael@0: masm.as_muld(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: case JSOP_DIV: michael@0: masm.as_divd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitMathF(LMathF *math) michael@0: { michael@0: const LAllocation *src1 = math->getOperand(0); michael@0: const LAllocation *src2 = math->getOperand(1); michael@0: const LDefinition *output = math->getDef(0); michael@0: michael@0: switch (math->jsop()) { michael@0: case JSOP_ADD: michael@0: masm.as_adds(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: case JSOP_SUB: michael@0: masm.as_subs(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: case JSOP_MUL: michael@0: masm.as_muls(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: case JSOP_DIV: michael@0: masm.as_divs(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitFloor(LFloor *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label skipCheck, done; michael@0: michael@0: // If Nan, 0 or -0 check for bailout michael@0: masm.loadConstantDouble(0.0, scratch); michael@0: masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump); michael@0: michael@0: // If high part is not zero, it is NaN or -0, so we bail. michael@0: masm.moveFromDoubleHi(input, SecondScratchReg); michael@0: if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input was zero, so return zero. michael@0: masm.move32(Imm32(0), output); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&skipCheck); michael@0: masm.as_floorwd(scratch, input); michael@0: masm.moveFromDoubleLo(scratch, output); michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot())) michael@0: return false; michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&done); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitFloorF(LFloorF *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label skipCheck, done; michael@0: michael@0: // If Nan, 0 or -0 check for bailout michael@0: masm.loadConstantFloat32(0.0, scratch); michael@0: masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump); michael@0: michael@0: // If binary value is not zero, it is NaN or -0, so we bail. michael@0: masm.moveFromDoubleLo(input, SecondScratchReg); michael@0: if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input was zero, so return zero. michael@0: masm.move32(Imm32(0), output); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&skipCheck); michael@0: masm.as_floorws(scratch, input); michael@0: masm.moveFromDoubleLo(scratch, output); michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot())) michael@0: return false; michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&done); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitRound(LRound *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister temp = ToFloatRegister(lir->temp()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label bail, negative, end, skipCheck; michael@0: michael@0: // Load 0.5 in the temp register. michael@0: masm.loadConstantDouble(0.5, temp); michael@0: michael@0: // Branch to a slow path for negative inputs. Doesn't catch NaN or -0. michael@0: masm.loadConstantDouble(0.0, scratch); michael@0: masm.ma_bc1d(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump); michael@0: michael@0: // If Nan, 0 or -0 check for bailout michael@0: masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump); michael@0: michael@0: // If high part is not zero, it is NaN or -0, so we bail. michael@0: masm.moveFromDoubleHi(input, SecondScratchReg); michael@0: if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input was zero, so return zero. michael@0: masm.move32(Imm32(0), output); michael@0: masm.ma_b(&end, ShortJump); michael@0: michael@0: masm.bind(&skipCheck); michael@0: masm.loadConstantDouble(0.5, scratch); michael@0: masm.addDouble(input, scratch); michael@0: masm.as_floorwd(scratch, scratch); michael@0: michael@0: masm.moveFromDoubleLo(scratch, output); michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot())) michael@0: return false; michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.jump(&end); michael@0: michael@0: // Input is negative, but isn't -0. michael@0: masm.bind(&negative); michael@0: masm.addDouble(input, temp); michael@0: michael@0: // If input + 0.5 >= 0, input is a negative number >= -0.5 and the michael@0: // result is -0. michael@0: masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail); michael@0: if (!bailoutFrom(&bail, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Truncate and round toward zero. michael@0: // This is off-by-one for everything but integer-valued inputs. michael@0: masm.as_floorwd(scratch, temp); michael@0: masm.moveFromDoubleLo(scratch, output); michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&end); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitRoundF(LRoundF *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister temp = ToFloatRegister(lir->temp()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label bail, negative, end, skipCheck; michael@0: michael@0: // Load 0.5 in the temp register. michael@0: masm.loadConstantFloat32(0.5, temp); michael@0: michael@0: // Branch to a slow path for negative inputs. Doesn't catch NaN or -0. michael@0: masm.loadConstantFloat32(0.0, scratch); michael@0: masm.ma_bc1s(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump); michael@0: michael@0: // If Nan, 0 or -0 check for bailout michael@0: masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump); michael@0: michael@0: // If binary value is not zero, it is NaN or -0, so we bail. michael@0: masm.moveFromFloat32(input, SecondScratchReg); michael@0: if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input was zero, so return zero. michael@0: masm.move32(Imm32(0), output); michael@0: masm.ma_b(&end, ShortJump); michael@0: michael@0: masm.bind(&skipCheck); michael@0: masm.loadConstantFloat32(0.5, scratch); michael@0: masm.as_adds(scratch, input, scratch); michael@0: masm.as_floorws(scratch, scratch); michael@0: michael@0: masm.moveFromFloat32(scratch, output); michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot())) michael@0: return false; michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.jump(&end); michael@0: michael@0: // Input is negative, but isn't -0. michael@0: masm.bind(&negative); michael@0: masm.as_adds(temp, input, temp); michael@0: michael@0: // If input + 0.5 >= 0, input is a negative number >= -0.5 and the michael@0: // result is -0. michael@0: masm.branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail); michael@0: if (!bailoutFrom(&bail, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Truncate and round toward zero. michael@0: // This is off-by-one for everything but integer-valued inputs. michael@0: masm.as_floorws(scratch, temp); michael@0: masm.moveFromFloat32(scratch, output); michael@0: michael@0: if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&end); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitTruncateDToInt32(LTruncateDToInt32 *ins) michael@0: { michael@0: return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output())); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitTruncateFToInt32(LTruncateFToInt32 *ins) michael@0: { michael@0: return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output())); michael@0: } michael@0: michael@0: static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 }; michael@0: michael@0: FrameSizeClass michael@0: FrameSizeClass::FromDepth(uint32_t frameDepth) michael@0: { michael@0: for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) { michael@0: if (frameDepth < FrameSizes[i]) michael@0: return FrameSizeClass(i); michael@0: } michael@0: michael@0: return FrameSizeClass::None(); michael@0: } michael@0: michael@0: FrameSizeClass michael@0: FrameSizeClass::ClassLimit() michael@0: { michael@0: return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes)); michael@0: } michael@0: michael@0: uint32_t michael@0: FrameSizeClass::frameSize() const michael@0: { michael@0: MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID); michael@0: MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes)); michael@0: michael@0: return FrameSizes[class_]; michael@0: } michael@0: michael@0: ValueOperand michael@0: CodeGeneratorMIPS::ToValue(LInstruction *ins, size_t pos) michael@0: { michael@0: Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX)); michael@0: Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX)); michael@0: return ValueOperand(typeReg, payloadReg); michael@0: } michael@0: michael@0: ValueOperand michael@0: CodeGeneratorMIPS::ToOutValue(LInstruction *ins) michael@0: { michael@0: Register typeReg = ToRegister(ins->getDef(TYPE_INDEX)); michael@0: Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX)); michael@0: return ValueOperand(typeReg, payloadReg); michael@0: } michael@0: michael@0: ValueOperand michael@0: CodeGeneratorMIPS::ToTempValue(LInstruction *ins, size_t pos) michael@0: { michael@0: Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX)); michael@0: Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX)); michael@0: return ValueOperand(typeReg, payloadReg); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitValue(LValue *value) michael@0: { michael@0: const ValueOperand out = ToOutValue(value); michael@0: michael@0: masm.moveValue(value->value(), out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitBox(LBox *box) michael@0: { michael@0: const LDefinition *type = box->getDef(TYPE_INDEX); michael@0: michael@0: MOZ_ASSERT(!box->getOperand(0)->isConstant()); michael@0: michael@0: // For NUNBOX32, the input operand and the output payload have the same michael@0: // virtual register. All that needs to be written is the type tag for michael@0: // the type definition. michael@0: masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitBoxFloatingPoint(LBoxFloatingPoint *box) michael@0: { michael@0: const LDefinition *payload = box->getDef(PAYLOAD_INDEX); michael@0: const LDefinition *type = box->getDef(TYPE_INDEX); michael@0: const LAllocation *in = box->getOperand(0); michael@0: michael@0: FloatRegister reg = ToFloatRegister(in); michael@0: if (box->type() == MIRType_Float32) { michael@0: masm.convertFloat32ToDouble(reg, ScratchFloatReg); michael@0: reg = ScratchFloatReg; michael@0: } michael@0: masm.ma_mv(reg, ValueOperand(ToRegister(type), ToRegister(payload))); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitUnbox(LUnbox *unbox) michael@0: { michael@0: // Note that for unbox, the type and payload indexes are switched on the michael@0: // inputs. michael@0: MUnbox *mir = unbox->mir(); michael@0: Register type = ToRegister(unbox->type()); michael@0: michael@0: if (mir->fallible()) { michael@0: if (!bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())), michael@0: unbox->snapshot())) michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitDouble(LDouble *ins) michael@0: { michael@0: const LDefinition *out = ins->getDef(0); michael@0: michael@0: masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitFloat32(LFloat32 *ins) michael@0: { michael@0: const LDefinition *out = ins->getDef(0); michael@0: masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out)); michael@0: return true; michael@0: } michael@0: michael@0: Register michael@0: CodeGeneratorMIPS::splitTagForTest(const ValueOperand &value) michael@0: { michael@0: return value.typeReg(); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitTestDAndBranch(LTestDAndBranch *test) michael@0: { michael@0: FloatRegister input = ToFloatRegister(test->input()); michael@0: michael@0: MBasicBlock *ifTrue = test->ifTrue(); michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: michael@0: masm.loadConstantDouble(0.0, ScratchFloatReg); michael@0: // If 0, or NaN, the result is false. michael@0: michael@0: if (isNextBlock(ifFalse->lir())) { michael@0: branchToBlock(Assembler::DoubleFloat, input, ScratchFloatReg, ifTrue, michael@0: Assembler::DoubleNotEqual); michael@0: } else { michael@0: branchToBlock(Assembler::DoubleFloat, input, ScratchFloatReg, ifFalse, michael@0: Assembler::DoubleEqualOrUnordered); michael@0: jumpToBlock(ifTrue); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitTestFAndBranch(LTestFAndBranch *test) michael@0: { michael@0: FloatRegister input = ToFloatRegister(test->input()); michael@0: michael@0: MBasicBlock *ifTrue = test->ifTrue(); michael@0: MBasicBlock *ifFalse = test->ifFalse(); michael@0: michael@0: masm.loadConstantFloat32(0.0, ScratchFloatReg); michael@0: // If 0, or NaN, the result is false. michael@0: michael@0: if (isNextBlock(ifFalse->lir())) { michael@0: branchToBlock(Assembler::SingleFloat, input, ScratchFloatReg, ifTrue, michael@0: Assembler::DoubleNotEqual); michael@0: } else { michael@0: branchToBlock(Assembler::SingleFloat, input, ScratchFloatReg, ifFalse, michael@0: Assembler::DoubleEqualOrUnordered); michael@0: jumpToBlock(ifTrue); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareD(LCompareD *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: Register dest = ToRegister(comp->output()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); michael@0: masm.ma_cmp_set_double(dest, lhs, rhs, cond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareF(LCompareF *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: Register dest = ToRegister(comp->output()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); michael@0: masm.ma_cmp_set_float32(dest, lhs, rhs, cond); michael@0: return true; michael@0: } michael@0: michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareDAndBranch(LCompareDAndBranch *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); michael@0: MBasicBlock *ifTrue = comp->ifTrue(); michael@0: MBasicBlock *ifFalse = comp->ifFalse(); michael@0: michael@0: if (isNextBlock(ifFalse->lir())) { michael@0: branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond); michael@0: } else { michael@0: branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse, michael@0: Assembler::InvertCondition(cond)); michael@0: jumpToBlock(ifTrue); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareFAndBranch(LCompareFAndBranch *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); michael@0: MBasicBlock *ifTrue = comp->ifTrue(); michael@0: MBasicBlock *ifFalse = comp->ifFalse(); michael@0: michael@0: if (isNextBlock(ifFalse->lir())) { michael@0: branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond); michael@0: } else { michael@0: branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse, michael@0: Assembler::InvertCondition(cond)); michael@0: jumpToBlock(ifTrue); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareB(LCompareB *lir) michael@0: { michael@0: MCompare *mir = lir->mir(); michael@0: michael@0: const ValueOperand lhs = ToValue(lir, LCompareB::Lhs); michael@0: const LAllocation *rhs = lir->rhs(); michael@0: const Register output = ToRegister(lir->output()); michael@0: michael@0: MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); michael@0: michael@0: Label notBoolean, done; michael@0: masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean); michael@0: { michael@0: if (rhs->isConstant()) michael@0: masm.cmp32Set(cond, lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), output); michael@0: else michael@0: masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output); michael@0: masm.jump(&done); michael@0: } michael@0: michael@0: masm.bind(¬Boolean); michael@0: { michael@0: masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output); michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareBAndBranch(LCompareBAndBranch *lir) michael@0: { michael@0: MCompare *mir = lir->cmpMir(); michael@0: const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs); michael@0: const LAllocation *rhs = lir->rhs(); michael@0: michael@0: MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); michael@0: michael@0: MBasicBlock *mirNotBoolean = (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(); michael@0: branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean, Assembler::NotEqual); michael@0: michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); michael@0: if (rhs->isConstant()) michael@0: emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond, lir->ifTrue(), michael@0: lir->ifFalse()); michael@0: else michael@0: emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(), lir->ifFalse()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareV(LCompareV *lir) michael@0: { michael@0: MCompare *mir = lir->mir(); michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); michael@0: const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput); michael@0: const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput); michael@0: const Register output = ToRegister(lir->output()); michael@0: michael@0: MOZ_ASSERT(IsEqualityOp(mir->jsop())); michael@0: michael@0: Label notEqual, done; michael@0: masm.ma_b(lhs.typeReg(), rhs.typeReg(), ¬Equal, Assembler::NotEqual, ShortJump); michael@0: { michael@0: masm.cmp32Set(cond, lhs.payloadReg(), rhs.payloadReg(), output); michael@0: masm.ma_b(&done, ShortJump); michael@0: } michael@0: masm.bind(¬Equal); michael@0: { michael@0: masm.move32(Imm32(cond == Assembler::NotEqual), output); michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitCompareVAndBranch(LCompareVAndBranch *lir) michael@0: { michael@0: MCompare *mir = lir->cmpMir(); michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); michael@0: const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput); michael@0: const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput); michael@0: michael@0: MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || michael@0: mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); michael@0: michael@0: MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue(); michael@0: michael@0: branchToBlock(lhs.typeReg(), rhs.typeReg(), notEqual, Assembler::NotEqual); michael@0: emitBranch(lhs.payloadReg(), rhs.payloadReg(), cond, lir->ifTrue(), lir->ifFalse()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitBitAndAndBranch(LBitAndAndBranch *lir) michael@0: { michael@0: if (lir->right()->isConstant()) michael@0: masm.ma_and(ScratchRegister, ToRegister(lir->left()), Imm32(ToInt32(lir->right()))); michael@0: else michael@0: masm.ma_and(ScratchRegister, ToRegister(lir->left()), ToRegister(lir->right())); michael@0: emitBranch(ScratchRegister, ScratchRegister, Assembler::NonZero, lir->ifTrue(), michael@0: lir->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir) michael@0: { michael@0: masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir) michael@0: { michael@0: masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitNotI(LNotI *ins) michael@0: { michael@0: masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0), michael@0: ToRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitNotD(LNotD *ins) michael@0: { michael@0: // Since this operation is not, we want to set a bit if michael@0: // the double is falsey, which means 0.0, -0.0 or NaN. michael@0: FloatRegister in = ToFloatRegister(ins->input()); michael@0: Register dest = ToRegister(ins->output()); michael@0: michael@0: Label falsey, done; michael@0: masm.loadConstantDouble(0.0, ScratchFloatReg); michael@0: masm.ma_bc1d(in, ScratchFloatReg, &falsey, Assembler::DoubleEqualOrUnordered, ShortJump); michael@0: michael@0: masm.move32(Imm32(0), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&falsey); michael@0: masm.move32(Imm32(1), dest); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitNotF(LNotF *ins) michael@0: { michael@0: // Since this operation is not, we want to set a bit if michael@0: // the float32 is falsey, which means 0.0, -0.0 or NaN. michael@0: FloatRegister in = ToFloatRegister(ins->input()); michael@0: Register dest = ToRegister(ins->output()); michael@0: michael@0: Label falsey, done; michael@0: masm.loadConstantFloat32(0.0, ScratchFloatReg); michael@0: masm.ma_bc1s(in, ScratchFloatReg, &falsey, Assembler::DoubleEqualOrUnordered, ShortJump); michael@0: michael@0: masm.move32(Imm32(0), dest); michael@0: masm.ma_b(&done, ShortJump); michael@0: michael@0: masm.bind(&falsey); michael@0: masm.move32(Imm32(1), dest); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitLoadSlotV(LLoadSlotV *load) michael@0: { michael@0: const ValueOperand out = ToOutValue(load); michael@0: Register base = ToRegister(load->input()); michael@0: int32_t offset = load->mir()->slot() * sizeof(js::Value); michael@0: michael@0: masm.loadValue(Address(base, offset), out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitLoadSlotT(LLoadSlotT *load) michael@0: { michael@0: Register base = ToRegister(load->input()); michael@0: int32_t offset = load->mir()->slot() * sizeof(js::Value); michael@0: michael@0: if (load->mir()->type() == MIRType_Double) michael@0: masm.loadInt32OrDouble(Address(base, offset), ToFloatRegister(load->output())); michael@0: else michael@0: masm.load32(Address(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitStoreSlotT(LStoreSlotT *store) michael@0: { michael@0: Register base = ToRegister(store->slots()); michael@0: int32_t offset = store->mir()->slot() * sizeof(js::Value); michael@0: michael@0: const LAllocation *value = store->value(); michael@0: MIRType valueType = store->mir()->value()->type(); michael@0: michael@0: if (store->mir()->needsBarrier()) michael@0: emitPreBarrier(Address(base, offset), store->mir()->slotType()); michael@0: michael@0: if (valueType == MIRType_Double) { michael@0: masm.storeDouble(ToFloatRegister(value), Address(base, offset)); michael@0: return true; michael@0: } michael@0: michael@0: // Store the type tag if needed. michael@0: if (valueType != store->mir()->slotType()) michael@0: masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Address(base, offset)); michael@0: michael@0: // Store the payload. michael@0: if (value->isConstant()) michael@0: masm.storePayload(*value->toConstant(), Address(base, offset)); michael@0: else michael@0: masm.storePayload(ToRegister(value), Address(base, offset)); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitLoadElementT(LLoadElementT *load) michael@0: { michael@0: Register base = ToRegister(load->elements()); michael@0: if (load->mir()->type() == MIRType_Double) { michael@0: FloatRegister fpreg = ToFloatRegister(load->output()); michael@0: if (load->index()->isConstant()) { michael@0: Address source(base, ToInt32(load->index()) * sizeof(Value)); michael@0: if (load->mir()->loadDoubles()) michael@0: masm.loadDouble(source, fpreg); michael@0: else michael@0: masm.loadInt32OrDouble(source, fpreg); michael@0: } else { michael@0: Register index = ToRegister(load->index()); michael@0: if (load->mir()->loadDoubles()) michael@0: masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg); michael@0: else michael@0: masm.loadInt32OrDouble(base, index, fpreg); michael@0: } michael@0: } else { michael@0: if (load->index()->isConstant()) { michael@0: Address source(base, ToInt32(load->index()) * sizeof(Value)); michael@0: masm.load32(source, ToRegister(load->output())); michael@0: } else { michael@0: BaseIndex source(base, ToRegister(load->index()), TimesEight); michael@0: masm.load32(source, ToRegister(load->output())); michael@0: } michael@0: } michael@0: MOZ_ASSERT(!load->mir()->needsHoleCheck()); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorMIPS::storeElementTyped(const LAllocation *value, MIRType valueType, michael@0: MIRType elementType, const Register &elements, michael@0: const LAllocation *index) michael@0: { michael@0: if (index->isConstant()) { michael@0: Address dest = Address(elements, ToInt32(index) * sizeof(Value)); michael@0: if (valueType == MIRType_Double) { michael@0: masm.storeDouble(ToFloatRegister(value), Address(dest.base, dest.offset)); michael@0: return; michael@0: } michael@0: michael@0: // Store the type tag if needed. michael@0: if (valueType != elementType) michael@0: masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest); michael@0: michael@0: // Store the payload. michael@0: if (value->isConstant()) michael@0: masm.storePayload(*value->toConstant(), dest); michael@0: else michael@0: masm.storePayload(ToRegister(value), dest); michael@0: } else { michael@0: Register indexReg = ToRegister(index); michael@0: if (valueType == MIRType_Double) { michael@0: masm.storeDouble(ToFloatRegister(value), BaseIndex(elements, indexReg, TimesEight)); michael@0: return; michael@0: } michael@0: michael@0: // Store the type tag if needed. michael@0: if (valueType != elementType) michael@0: masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg); michael@0: michael@0: // Store the payload. michael@0: if (value->isConstant()) michael@0: masm.storePayload(*value->toConstant(), elements, indexReg); michael@0: else michael@0: masm.storePayload(ToRegister(value), elements, indexReg); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitGuardShape(LGuardShape *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.loadPtr(Address(obj, JSObject::offsetOfShape()), tmp); michael@0: return bailoutCmpPtr(Assembler::NotEqual, tmp, ImmGCPtr(guard->mir()->shape()), michael@0: guard->snapshot()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitGuardObjectType(LGuardObjectType *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp); michael@0: Assembler::Condition cond = guard->mir()->bailOnEquality() michael@0: ? Assembler::Equal michael@0: : Assembler::NotEqual; michael@0: return bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->typeObject()), guard->snapshot()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitGuardClass(LGuardClass *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.loadObjClass(obj, tmp); michael@0: if (!bailoutCmpPtr(Assembler::NotEqual, tmp, Imm32((uint32_t)guard->mir()->getClass()), michael@0: guard->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitImplicitThis(LImplicitThis *lir) michael@0: { michael@0: Register callee = ToRegister(lir->callee()); michael@0: const ValueOperand out = ToOutValue(lir); michael@0: michael@0: // The implicit |this| is always |undefined| if the function's environment michael@0: // is the current global. michael@0: masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), out.typeReg()); michael@0: GlobalObject *global = &gen->info().script()->global(); michael@0: michael@0: // TODO: OOL stub path. michael@0: if (!bailoutCmpPtr(Assembler::NotEqual, out.typeReg(), ImmGCPtr(global), lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.moveValue(UndefinedValue(), out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitInterruptCheck(LInterruptCheck *lir) michael@0: { michael@0: OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing()); michael@0: if (!ool) michael@0: return false; michael@0: michael@0: masm.branch32(Assembler::NotEqual, michael@0: AbsoluteAddress(GetIonContext()->runtime->addressOfInterrupt()), Imm32(0), michael@0: ool->entry()); michael@0: masm.bind(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::generateInvalidateEpilogue() michael@0: { michael@0: // Ensure that there is enough space in the buffer for the OsiPoint michael@0: // patching to occur. Otherwise, we could overwrite the invalidation michael@0: // epilogue. michael@0: for (size_t i = 0; i < sizeof(void *); i += Assembler::nopSize()) michael@0: masm.nop(); michael@0: michael@0: masm.bind(&invalidate_); michael@0: michael@0: // Push the return address of the point that we bailed out at to the stack michael@0: masm.Push(ra); michael@0: michael@0: // Push the Ion script onto the stack (when we determine what that michael@0: // pointer is). michael@0: invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1))); michael@0: JitCode *thunk = gen->jitRuntime()->getInvalidationThunk(); michael@0: michael@0: masm.branch(thunk); michael@0: michael@0: // We should never reach this point in JIT code -- the invalidation thunk michael@0: // should pop the invalidated JS frame and return directly to its caller. michael@0: masm.assumeUnreachable("Should have returned directly to its caller instead of here."); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState) michael@0: { michael@0: // Can always use the scratch register on MIPS. michael@0: addState->dispatchScratch = ScratchRegister; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) michael@0: { michael@0: const MAsmJSLoadHeap *mir = ins->mir(); michael@0: const LAllocation *ptr = ins->ptr(); michael@0: const LDefinition *out = ins->output(); michael@0: michael@0: bool isSigned; michael@0: int size; michael@0: bool isFloat = false; michael@0: switch (mir->viewType()) { michael@0: case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break; michael@0: case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break; michael@0: case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break; michael@0: case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break; michael@0: case ArrayBufferView::TYPE_INT32: isSigned = true; size = 32; break; michael@0: case ArrayBufferView::TYPE_UINT32: isSigned = false; size = 32; break; michael@0: case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break; michael@0: case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break; michael@0: default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); michael@0: } michael@0: michael@0: if (ptr->isConstant()) { michael@0: MOZ_ASSERT(mir->skipBoundsCheck()); michael@0: int32_t ptrImm = ptr->toConstant()->toInt32(); michael@0: MOZ_ASSERT(ptrImm >= 0); michael@0: if (isFloat) { michael@0: if (size == 32) { michael@0: masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out)); michael@0: } else { michael@0: masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out)); michael@0: } michael@0: } else { michael@0: masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm), michael@0: static_cast(size), isSigned ? SignExtend : ZeroExtend); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: Register ptrReg = ToRegister(ptr); michael@0: michael@0: if (mir->skipBoundsCheck()) { michael@0: if (isFloat) { michael@0: if (size == 32) { michael@0: masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out)); michael@0: } else { michael@0: masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out)); michael@0: } michael@0: } else { michael@0: masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne), michael@0: static_cast(size), isSigned ? SignExtend : ZeroExtend); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister); michael@0: michael@0: Label outOfRange; michael@0: Label done; michael@0: masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump); michael@0: // Offset is ok, let's load value. michael@0: if (isFloat) { michael@0: if (size == 32) michael@0: masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out)); michael@0: else michael@0: masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out)); michael@0: } else { michael@0: masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne), michael@0: static_cast(size), isSigned ? SignExtend : ZeroExtend); michael@0: } michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(&outOfRange); michael@0: // Offset is out of range. Load default values. michael@0: if (isFloat) { michael@0: if (size == 32) michael@0: masm.convertDoubleToFloat32(NANReg, ToFloatRegister(out)); michael@0: else michael@0: masm.moveDouble(NANReg, ToFloatRegister(out)); michael@0: } else { michael@0: masm.move32(Imm32(0), ToRegister(out)); michael@0: } michael@0: masm.bind(&done); michael@0: michael@0: return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset())); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) michael@0: { michael@0: const MAsmJSStoreHeap *mir = ins->mir(); michael@0: const LAllocation *value = ins->value(); michael@0: const LAllocation *ptr = ins->ptr(); michael@0: michael@0: bool isSigned; michael@0: int size; michael@0: bool isFloat = false; michael@0: switch (mir->viewType()) { michael@0: case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break; michael@0: case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break; michael@0: case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break; michael@0: case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break; michael@0: case ArrayBufferView::TYPE_INT32: isSigned = true; size = 32; break; michael@0: case ArrayBufferView::TYPE_UINT32: isSigned = false; size = 32; break; michael@0: case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break; michael@0: case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break; michael@0: default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); michael@0: } michael@0: michael@0: if (ptr->isConstant()) { michael@0: MOZ_ASSERT(mir->skipBoundsCheck()); michael@0: int32_t ptrImm = ptr->toConstant()->toInt32(); michael@0: MOZ_ASSERT(ptrImm >= 0); michael@0: michael@0: if (isFloat) { michael@0: if (size == 32) { michael@0: masm.storeFloat32(ToFloatRegister(value), Address(HeapReg, ptrImm)); michael@0: } else { michael@0: masm.storeDouble(ToFloatRegister(value), Address(HeapReg, ptrImm)); michael@0: } michael@0: } else { michael@0: masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm), michael@0: static_cast(size), isSigned ? SignExtend : ZeroExtend); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: Register ptrReg = ToRegister(ptr); michael@0: Address dstAddr(ptrReg, 0); michael@0: michael@0: if (mir->skipBoundsCheck()) { michael@0: if (isFloat) { michael@0: if (size == 32) { michael@0: masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne)); michael@0: } else michael@0: masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne)); michael@0: } else { michael@0: masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne), michael@0: static_cast(size), isSigned ? SignExtend : ZeroExtend); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister); michael@0: michael@0: Label rejoin; michael@0: masm.ma_b(ptrReg, ScratchRegister, &rejoin, Assembler::AboveOrEqual, ShortJump); michael@0: michael@0: // Offset is ok, let's store value. michael@0: if (isFloat) { michael@0: if (size == 32) { michael@0: masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne)); michael@0: } else michael@0: masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne)); michael@0: } else { michael@0: masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne), michael@0: static_cast(size), isSigned ? SignExtend : ZeroExtend); michael@0: } michael@0: masm.bind(&rejoin); michael@0: michael@0: return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset())); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins) michael@0: { michael@0: const MAsmJSPassStackArg *mir = ins->mir(); michael@0: if (ins->arg()->isConstant()) { michael@0: masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset())); michael@0: } else { michael@0: if (ins->arg()->isGeneralReg()) { michael@0: masm.storePtr(ToRegister(ins->arg()), Address(StackPointer, mir->spOffset())); michael@0: } else { michael@0: masm.storeDouble(ToFloatRegister(ins->arg()), Address(StackPointer, mir->spOffset())); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitUDiv(LUDiv *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: Label done; michael@0: if (ins->mir()->canBeDivideByZero()) { michael@0: if (ins->mir()->isTruncated()) { michael@0: Label notzero; michael@0: masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump); michael@0: masm.move32(Imm32(0), output); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(¬zero); michael@0: } else { michael@0: MOZ_ASSERT(ins->mir()->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: masm.as_divu(lhs, rhs); michael@0: masm.as_mflo(output); michael@0: michael@0: if (!ins->mir()->isTruncated()) { michael@0: if (!bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitUMod(LUMod *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: Label done; michael@0: michael@0: if (ins->mir()->canBeDivideByZero()) { michael@0: if (ins->mir()->isTruncated()) { michael@0: // Infinity|0 == 0 michael@0: Label notzero; michael@0: masm.ma_b(rhs, rhs, ¬zero, Assembler::NonZero, ShortJump); michael@0: masm.move32(Imm32(0), output); michael@0: masm.ma_b(&done, ShortJump); michael@0: masm.bind(¬zero); michael@0: } else { michael@0: MOZ_ASSERT(ins->mir()->fallible()); michael@0: if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: masm.as_divu(lhs, rhs); michael@0: masm.as_mfhi(output); michael@0: michael@0: if (!ins->mir()->isTruncated()) { michael@0: if (!bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitEffectiveAddress(LEffectiveAddress *ins) michael@0: { michael@0: const MEffectiveAddress *mir = ins->mir(); michael@0: Register base = ToRegister(ins->base()); michael@0: Register index = ToRegister(ins->index()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: BaseIndex address(base, index, mir->scale(), mir->displacement()); michael@0: masm.computeEffectiveAddress(address, output); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins) michael@0: { michael@0: const MAsmJSLoadGlobalVar *mir = ins->mir(); michael@0: unsigned addr = mir->globalDataOffset(); michael@0: if (mir->type() == MIRType_Int32) michael@0: masm.load32(Address(GlobalReg, addr), ToRegister(ins->output())); michael@0: else if (mir->type() == MIRType_Float32) michael@0: masm.loadFloat32(Address(GlobalReg, addr), ToFloatRegister(ins->output())); michael@0: else michael@0: masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins) michael@0: { michael@0: const MAsmJSStoreGlobalVar *mir = ins->mir(); michael@0: michael@0: MIRType type = mir->value()->type(); michael@0: MOZ_ASSERT(IsNumberType(type)); michael@0: unsigned addr = mir->globalDataOffset(); michael@0: if (mir->value()->type() == MIRType_Int32) michael@0: masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr)); michael@0: else if (mir->value()->type() == MIRType_Float32) michael@0: masm.storeFloat32(ToFloatRegister(ins->value()), Address(GlobalReg, addr)); michael@0: else michael@0: masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins) michael@0: { michael@0: const MAsmJSLoadFuncPtr *mir = ins->mir(); michael@0: michael@0: Register index = ToRegister(ins->index()); michael@0: Register tmp = ToRegister(ins->temp()); michael@0: Register out = ToRegister(ins->output()); michael@0: unsigned addr = mir->globalDataOffset(); michael@0: michael@0: BaseIndex source(GlobalReg, index, TimesFour, addr); michael@0: masm.load32(source, out); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins) michael@0: { michael@0: const MAsmJSLoadFFIFunc *mir = ins->mir(); michael@0: masm.loadPtr(Address(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitNegI(LNegI *ins) michael@0: { michael@0: Register input = ToRegister(ins->input()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: masm.ma_negu(output, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitNegD(LNegD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: michael@0: masm.as_negd(output, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitNegF(LNegF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: michael@0: masm.as_negs(output, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorMIPS::visitForkJoinGetSlice(LForkJoinGetSlice *ins) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateForkJoinGetSliceStub(JSContext *cx) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("NYI"); michael@0: }