michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/shared/CodeGenerator-x86-shared.h" michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "jsmath.h" michael@0: michael@0: #include "jit/IonFrames.h" michael@0: #include "jit/IonLinker.h" michael@0: #include "jit/JitCompartment.h" michael@0: #include "jit/RangeAnalysis.h" michael@0: #include "vm/TraceLogging.h" michael@0: michael@0: #include "jit/shared/CodeGenerator-shared-inl.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::Abs; michael@0: using mozilla::FloatingPoint; michael@0: using mozilla::FloorLog2; michael@0: using mozilla::NegativeInfinity; michael@0: using mozilla::SpecificNaN; michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: CodeGeneratorX86Shared::CodeGeneratorX86Shared(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm) michael@0: : CodeGeneratorShared(gen, graph, masm) michael@0: { michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::generatePrologue() michael@0: { michael@0: JS_ASSERT(!gen->compilingAsmJS()); michael@0: michael@0: // Note that this automatically sets MacroAssembler::framePushed(). michael@0: masm.reserveStack(frameSize()); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::generateAsmJSPrologue(Label *stackOverflowLabel) michael@0: { michael@0: JS_ASSERT(gen->compilingAsmJS()); michael@0: michael@0: // The asm.js over-recursed handler wants to be able to assume that SP michael@0: // points to the return address, so perform the check before pushing michael@0: // frameDepth. michael@0: if (!omitOverRecursedCheck()) { michael@0: masm.branchPtr(Assembler::AboveOrEqual, michael@0: AsmJSAbsoluteAddress(AsmJSImm_StackLimit), michael@0: StackPointer, michael@0: stackOverflowLabel); michael@0: } michael@0: michael@0: // Note that this automatically sets MacroAssembler::framePushed(). michael@0: masm.reserveStack(frameSize()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::generateEpilogue() michael@0: { michael@0: masm.bind(&returnLabel_); michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) { michael@0: if (!emitTracelogStopEvent(TraceLogger::IonMonkey)) michael@0: return false; michael@0: if (!emitTracelogScriptStop()) michael@0: return false; michael@0: } michael@0: #endif michael@0: michael@0: // Pop the stack we allocated at the start of the function. michael@0: masm.freeStack(frameSize()); michael@0: JS_ASSERT(masm.framePushed() == 0); michael@0: michael@0: masm.ret(); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: OutOfLineBailout::accept(CodeGeneratorX86Shared *codegen) michael@0: { michael@0: return codegen->visitOutOfLineBailout(this); michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, michael@0: MBasicBlock *mirFalse, Assembler::NaNCond ifNaN) michael@0: { michael@0: if (ifNaN == Assembler::NaN_IsFalse) michael@0: jumpToBlock(mirFalse, Assembler::Parity); michael@0: else if (ifNaN == Assembler::NaN_IsTrue) michael@0: jumpToBlock(mirTrue, Assembler::Parity); michael@0: michael@0: if (isNextBlock(mirFalse->lir())) { michael@0: jumpToBlock(mirTrue, cond); michael@0: } else { michael@0: jumpToBlock(mirFalse, Assembler::InvertCondition(cond)); michael@0: jumpToBlock(mirTrue); michael@0: } michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitDouble(LDouble *ins) michael@0: { michael@0: const LDefinition *out = ins->getDef(0); michael@0: masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitFloat32(LFloat32 *ins) michael@0: { michael@0: const LDefinition *out = ins->getDef(0); michael@0: masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitTestIAndBranch(LTestIAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->input(); michael@0: michael@0: // Test the operand michael@0: masm.testl(ToRegister(opd), ToRegister(opd)); michael@0: emitBranch(Assembler::NonZero, test->ifTrue(), test->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitTestDAndBranch(LTestDAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->input(); michael@0: michael@0: // ucomisd flags: michael@0: // Z P C michael@0: // --------- michael@0: // NaN 1 1 1 michael@0: // > 0 0 0 michael@0: // < 0 0 1 michael@0: // = 1 0 0 michael@0: // michael@0: // NaN is falsey, so comparing against 0 and then using the Z flag is michael@0: // enough to determine which branch to take. michael@0: masm.xorpd(ScratchFloatReg, ScratchFloatReg); michael@0: masm.ucomisd(ToFloatRegister(opd), ScratchFloatReg); michael@0: emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitTestFAndBranch(LTestFAndBranch *test) michael@0: { michael@0: const LAllocation *opd = test->input(); michael@0: // ucomiss flags are the same as doubles; see comment above michael@0: masm.xorps(ScratchFloatReg, ScratchFloatReg); michael@0: masm.ucomiss(ToFloatRegister(opd), ScratchFloatReg); michael@0: emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitBitAndAndBranch(LBitAndAndBranch *baab) michael@0: { michael@0: if (baab->right()->isConstant()) michael@0: masm.testl(ToRegister(baab->left()), Imm32(ToInt32(baab->right()))); michael@0: else michael@0: masm.testl(ToRegister(baab->left()), ToRegister(baab->right())); michael@0: emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type, const LAllocation *left, const LAllocation *right) michael@0: { michael@0: #ifdef JS_CODEGEN_X64 michael@0: if (type == MCompare::Compare_Object) { michael@0: masm.cmpq(ToRegister(left), ToOperand(right)); michael@0: return; michael@0: } michael@0: #endif michael@0: michael@0: if (right->isConstant()) michael@0: masm.cmpl(ToRegister(left), Imm32(ToInt32(right))); michael@0: else michael@0: masm.cmpl(ToRegister(left), ToOperand(right)); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitCompare(LCompare *comp) michael@0: { michael@0: MCompare *mir = comp->mir(); michael@0: emitCompare(mir->compareType(), comp->left(), comp->right()); michael@0: masm.emitSet(JSOpToCondition(mir->compareType(), comp->jsop()), ToRegister(comp->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitCompareAndBranch(LCompareAndBranch *comp) michael@0: { michael@0: MCompare *mir = comp->cmpMir(); michael@0: emitCompare(mir->compareType(), comp->left(), comp->right()); michael@0: Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop()); michael@0: emitBranch(cond, comp->ifTrue(), comp->ifFalse()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitCompareD(LCompareD *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); michael@0: michael@0: Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond); michael@0: if (comp->mir()->operandsAreNeverNaN()) michael@0: nanCond = Assembler::NaN_HandledByCond; michael@0: michael@0: masm.compareDouble(cond, lhs, rhs); michael@0: masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()), nanCond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitCompareF(LCompareF *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); michael@0: michael@0: Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond); michael@0: if (comp->mir()->operandsAreNeverNaN()) michael@0: nanCond = Assembler::NaN_HandledByCond; michael@0: michael@0: masm.compareFloat(cond, lhs, rhs); michael@0: masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output()), nanCond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitNotI(LNotI *ins) michael@0: { michael@0: masm.cmpl(ToRegister(ins->input()), Imm32(0)); michael@0: masm.emitSet(Assembler::Equal, ToRegister(ins->output())); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitNotD(LNotD *ins) michael@0: { michael@0: FloatRegister opd = ToFloatRegister(ins->input()); michael@0: michael@0: // Not returns true if the input is a NaN. We don't have to worry about michael@0: // it if we know the input is never NaN though. michael@0: Assembler::NaNCond nanCond = Assembler::NaN_IsTrue; michael@0: if (ins->mir()->operandIsNeverNaN()) michael@0: nanCond = Assembler::NaN_HandledByCond; michael@0: michael@0: masm.xorpd(ScratchFloatReg, ScratchFloatReg); michael@0: masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, ScratchFloatReg); michael@0: masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitNotF(LNotF *ins) michael@0: { michael@0: FloatRegister opd = ToFloatRegister(ins->input()); michael@0: michael@0: // Not returns true if the input is a NaN. We don't have to worry about michael@0: // it if we know the input is never NaN though. michael@0: Assembler::NaNCond nanCond = Assembler::NaN_IsTrue; michael@0: if (ins->mir()->operandIsNeverNaN()) michael@0: nanCond = Assembler::NaN_HandledByCond; michael@0: michael@0: masm.xorps(ScratchFloatReg, ScratchFloatReg); michael@0: masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, ScratchFloatReg); michael@0: masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitCompareDAndBranch(LCompareDAndBranch *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); michael@0: michael@0: Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond); michael@0: if (comp->cmpMir()->operandsAreNeverNaN()) michael@0: nanCond = Assembler::NaN_HandledByCond; michael@0: michael@0: masm.compareDouble(cond, lhs, rhs); michael@0: emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse(), nanCond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitCompareFAndBranch(LCompareFAndBranch *comp) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(comp->left()); michael@0: FloatRegister rhs = ToFloatRegister(comp->right()); michael@0: michael@0: Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); michael@0: michael@0: Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond); michael@0: if (comp->cmpMir()->operandsAreNeverNaN()) michael@0: nanCond = Assembler::NaN_HandledByCond; michael@0: michael@0: masm.compareFloat(cond, lhs, rhs); michael@0: emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse(), nanCond); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins) michael@0: { michael@0: const MAsmJSPassStackArg *mir = ins->mir(); michael@0: Address dst(StackPointer, mir->spOffset()); michael@0: if (ins->arg()->isConstant()) { michael@0: masm.storePtr(ImmWord(ToInt32(ins->arg())), dst); michael@0: } else { michael@0: if (ins->arg()->isGeneralReg()) michael@0: masm.storePtr(ToRegister(ins->arg()), dst); michael@0: else michael@0: masm.storeDouble(ToFloatRegister(ins->arg()), dst); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::generateOutOfLineCode() michael@0: { michael@0: if (!CodeGeneratorShared::generateOutOfLineCode()) michael@0: return false; michael@0: michael@0: if (deoptLabel_.used()) { michael@0: // All non-table-based bailouts will go here. michael@0: masm.bind(&deoptLabel_); michael@0: michael@0: // Push the frame size, so the handler can recover the IonScript. michael@0: masm.push(Imm32(frameSize())); michael@0: michael@0: JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler(); michael@0: masm.jmp(ImmPtr(handler->raw()), Relocation::JITCODE); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: class BailoutJump { michael@0: Assembler::Condition cond_; michael@0: michael@0: public: michael@0: BailoutJump(Assembler::Condition cond) : cond_(cond) michael@0: { } michael@0: #ifdef JS_CODEGEN_X86 michael@0: void operator()(MacroAssembler &masm, uint8_t *code) const { michael@0: masm.j(cond_, ImmPtr(code), Relocation::HARDCODED); michael@0: } michael@0: #endif michael@0: void operator()(MacroAssembler &masm, Label *label) const { michael@0: masm.j(cond_, label); michael@0: } michael@0: }; michael@0: michael@0: class BailoutLabel { michael@0: Label *label_; michael@0: michael@0: public: michael@0: BailoutLabel(Label *label) : label_(label) michael@0: { } michael@0: #ifdef JS_CODEGEN_X86 michael@0: void operator()(MacroAssembler &masm, uint8_t *code) const { michael@0: masm.retarget(label_, ImmPtr(code), Relocation::HARDCODED); michael@0: } michael@0: #endif michael@0: void operator()(MacroAssembler &masm, Label *label) const { michael@0: masm.retarget(label_, label); michael@0: } michael@0: }; michael@0: michael@0: template bool michael@0: CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot) michael@0: { michael@0: CompileInfo &info = snapshot->mir()->block()->info(); michael@0: switch (info.executionMode()) { michael@0: case ParallelExecution: { michael@0: // in parallel mode, make no attempt to recover, just signal an error. michael@0: OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported, michael@0: snapshot->mir()->block(), michael@0: snapshot->mir()->pc()); michael@0: binder(masm, ool->entry()); michael@0: return true; michael@0: } michael@0: case SequentialExecution: michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("No such execution mode"); michael@0: } michael@0: michael@0: if (!encode(snapshot)) michael@0: return false; michael@0: michael@0: // Though the assembler doesn't track all frame pushes, at least make sure michael@0: // the known value makes sense. We can't use bailout tables if the stack michael@0: // isn't properly aligned to the static frame size. michael@0: JS_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_, michael@0: frameClass_.frameSize() == masm.framePushed()); michael@0: michael@0: #ifdef JS_CODEGEN_X86 michael@0: // On x64, bailout tables are pointless, because 16 extra bytes are michael@0: // reserved per external jump, whereas it takes only 10 bytes to encode a michael@0: // a non-table based bailout. michael@0: if (assignBailoutId(snapshot)) { michael@0: binder(masm, deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE); michael@0: return true; michael@0: } michael@0: #endif michael@0: michael@0: // We could not use a jump table, either because all bailout IDs were michael@0: // reserved, or a jump table is not optimal for this frame size or michael@0: // platform. Whatever, we will generate a lazy bailout. michael@0: OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: binder(masm, ool->entry()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot) michael@0: { michael@0: return bailout(BailoutJump(condition), snapshot); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::bailoutIf(Assembler::DoubleCondition condition, LSnapshot *snapshot) michael@0: { michael@0: JS_ASSERT(Assembler::NaNCondFromDoubleCondition(condition) == Assembler::NaN_HandledByCond); michael@0: return bailoutIf(Assembler::ConditionFromDoubleCondition(condition), snapshot); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::bailoutFrom(Label *label, LSnapshot *snapshot) michael@0: { michael@0: JS_ASSERT(label->used() && !label->bound()); michael@0: return bailout(BailoutLabel(label), snapshot); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::bailout(LSnapshot *snapshot) michael@0: { michael@0: Label label; michael@0: masm.jump(&label); michael@0: return bailoutFrom(&label, snapshot); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitOutOfLineBailout(OutOfLineBailout *ool) michael@0: { michael@0: masm.push(Imm32(ool->snapshot()->snapshotOffset())); michael@0: masm.jmp(&deoptLabel_); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitMinMaxD(LMinMaxD *ins) michael@0: { michael@0: FloatRegister first = ToFloatRegister(ins->first()); michael@0: FloatRegister second = ToFloatRegister(ins->second()); michael@0: #ifdef DEBUG michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: JS_ASSERT(first == output); michael@0: #endif michael@0: michael@0: Label done, nan, minMaxInst; michael@0: michael@0: // Do a ucomisd to catch equality and NaNs, which both require special michael@0: // handling. If the operands are ordered and inequal, we branch straight to michael@0: // the min/max instruction. If we wanted, we could also branch for less-than michael@0: // or greater-than here instead of using min/max, however these conditions michael@0: // will sometimes be hard on the branch predictor. michael@0: masm.ucomisd(first, second); michael@0: masm.j(Assembler::NotEqual, &minMaxInst); michael@0: if (!ins->mir()->range() || ins->mir()->range()->canBeNaN()) michael@0: masm.j(Assembler::Parity, &nan); michael@0: michael@0: // Ordered and equal. The operands are bit-identical unless they are zero michael@0: // and negative zero. These instructions merge the sign bits in that michael@0: // case, and are no-ops otherwise. michael@0: if (ins->mir()->isMax()) michael@0: masm.andpd(second, first); michael@0: else michael@0: masm.orpd(second, first); michael@0: masm.jump(&done); michael@0: michael@0: // x86's min/max are not symmetric; if either operand is a NaN, they return michael@0: // the read-only operand. We need to return a NaN if either operand is a michael@0: // NaN, so we explicitly check for a NaN in the read-write operand. michael@0: if (!ins->mir()->range() || ins->mir()->range()->canBeNaN()) { michael@0: masm.bind(&nan); michael@0: masm.ucomisd(first, first); michael@0: masm.j(Assembler::Parity, &done); michael@0: } michael@0: michael@0: // When the values are inequal, or second is NaN, x86's min and max will michael@0: // return the value we need. michael@0: masm.bind(&minMaxInst); michael@0: if (ins->mir()->isMax()) michael@0: masm.maxsd(second, first); michael@0: else michael@0: masm.minsd(second, first); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitAbsD(LAbsD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: // Load a value which is all ones except for the sign bit. michael@0: masm.loadConstantDouble(SpecificNaN(0, FloatingPoint::SignificandBits), michael@0: ScratchFloatReg); michael@0: masm.andpd(ScratchFloatReg, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitAbsF(LAbsF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: // Same trick as visitAbsD above. michael@0: masm.loadConstantFloat32(SpecificNaN(0, FloatingPoint::SignificandBits), michael@0: ScratchFloatReg); michael@0: masm.andps(ScratchFloatReg, input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitSqrtD(LSqrtD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: masm.sqrtsd(input, output); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitSqrtF(LSqrtF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: FloatRegister output = ToFloatRegister(ins->output()); michael@0: masm.sqrtss(input, output); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitPowHalfD(LPowHalfD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: michael@0: Label done, sqrt; michael@0: michael@0: if (!ins->mir()->operandIsNeverNegativeInfinity()) { michael@0: // Branch if not -Infinity. michael@0: masm.loadConstantDouble(NegativeInfinity(), ScratchFloatReg); michael@0: michael@0: Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered; michael@0: if (ins->mir()->operandIsNeverNaN()) michael@0: cond = Assembler::DoubleNotEqual; michael@0: masm.branchDouble(cond, input, ScratchFloatReg, &sqrt); michael@0: michael@0: // Math.pow(-Infinity, 0.5) == Infinity. michael@0: masm.xorpd(input, input); michael@0: masm.subsd(ScratchFloatReg, input); michael@0: masm.jump(&done); michael@0: michael@0: masm.bind(&sqrt); michael@0: } michael@0: michael@0: if (!ins->mir()->operandIsNeverNegativeZero()) { michael@0: // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0. michael@0: masm.xorpd(ScratchFloatReg, ScratchFloatReg); michael@0: masm.addsd(ScratchFloatReg, input); michael@0: } michael@0: michael@0: masm.sqrtsd(input, input); michael@0: michael@0: masm.bind(&done); michael@0: return true; michael@0: } michael@0: michael@0: class OutOfLineUndoALUOperation : public OutOfLineCodeBase michael@0: { michael@0: LInstruction *ins_; michael@0: michael@0: public: michael@0: OutOfLineUndoALUOperation(LInstruction *ins) michael@0: : ins_(ins) michael@0: { } michael@0: michael@0: virtual bool accept(CodeGeneratorX86Shared *codegen) { michael@0: return codegen->visitOutOfLineUndoALUOperation(this); michael@0: } michael@0: LInstruction *ins() const { michael@0: return ins_; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitAddI(LAddI *ins) michael@0: { michael@0: if (ins->rhs()->isConstant()) michael@0: masm.addl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs())); michael@0: else michael@0: masm.addl(ToOperand(ins->rhs()), ToRegister(ins->lhs())); michael@0: michael@0: if (ins->snapshot()) { michael@0: if (ins->recoversInput()) { michael@0: OutOfLineUndoALUOperation *ool = new(alloc()) OutOfLineUndoALUOperation(ins); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: masm.j(Assembler::Overflow, ool->entry()); michael@0: } else { michael@0: if (!bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitSubI(LSubI *ins) michael@0: { michael@0: if (ins->rhs()->isConstant()) michael@0: masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs())); michael@0: else michael@0: masm.subl(ToOperand(ins->rhs()), ToRegister(ins->lhs())); michael@0: michael@0: if (ins->snapshot()) { michael@0: if (ins->recoversInput()) { michael@0: OutOfLineUndoALUOperation *ool = new(alloc()) OutOfLineUndoALUOperation(ins); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: masm.j(Assembler::Overflow, ool->entry()); michael@0: } else { michael@0: if (!bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation *ool) michael@0: { michael@0: LInstruction *ins = ool->ins(); michael@0: Register reg = ToRegister(ins->getDef(0)); michael@0: michael@0: mozilla::DebugOnly lhs = ins->getOperand(0); michael@0: LAllocation *rhs = ins->getOperand(1); michael@0: michael@0: JS_ASSERT(reg == ToRegister(lhs)); michael@0: JS_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs)); michael@0: michael@0: // Undo the effect of the ALU operation, which was performed on the output michael@0: // register and overflowed. Writing to the output register clobbered an michael@0: // input reg, and the original value of the input needs to be recovered michael@0: // to satisfy the constraint imposed by any RECOVERED_INPUT operands to michael@0: // the bailout snapshot. michael@0: michael@0: if (rhs->isConstant()) { michael@0: Imm32 constant(ToInt32(rhs)); michael@0: if (ins->isAddI()) michael@0: masm.subl(constant, reg); michael@0: else michael@0: masm.addl(constant, reg); michael@0: } else { michael@0: if (ins->isAddI()) michael@0: masm.subl(ToOperand(rhs), reg); michael@0: else michael@0: masm.addl(ToOperand(rhs), reg); michael@0: } michael@0: michael@0: return bailout(ool->ins()->snapshot()); michael@0: } michael@0: michael@0: class MulNegativeZeroCheck : public OutOfLineCodeBase michael@0: { michael@0: LMulI *ins_; michael@0: michael@0: public: michael@0: MulNegativeZeroCheck(LMulI *ins) michael@0: : ins_(ins) michael@0: { } michael@0: michael@0: virtual bool accept(CodeGeneratorX86Shared *codegen) { michael@0: return codegen->visitMulNegativeZeroCheck(this); michael@0: } michael@0: LMulI *ins() const { michael@0: return ins_; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitMulI(LMulI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->lhs(); michael@0: const LAllocation *rhs = ins->rhs(); michael@0: MMul *mul = ins->mir(); michael@0: JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: // Bailout on -0.0 michael@0: int32_t constant = ToInt32(rhs); michael@0: if (mul->canBeNegativeZero() && constant <= 0) { michael@0: Assembler::Condition bailoutCond = (constant == 0) ? Assembler::Signed : Assembler::Equal; michael@0: masm.testl(ToRegister(lhs), ToRegister(lhs)); michael@0: if (!bailoutIf(bailoutCond, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: switch (constant) { michael@0: case -1: michael@0: masm.negl(ToOperand(lhs)); michael@0: break; michael@0: case 0: michael@0: masm.xorl(ToOperand(lhs), ToRegister(lhs)); michael@0: return true; // escape overflow check; michael@0: case 1: michael@0: // nop michael@0: return true; // escape overflow check; michael@0: case 2: michael@0: masm.addl(ToOperand(lhs), ToRegister(lhs)); michael@0: break; michael@0: default: michael@0: if (!mul->canOverflow() && constant > 0) { michael@0: // Use shift if cannot overflow and constant is power of 2 michael@0: int32_t shift = FloorLog2(constant); michael@0: if ((1 << shift) == constant) { michael@0: masm.shll(Imm32(shift), ToRegister(lhs)); michael@0: return true; michael@0: } michael@0: } michael@0: masm.imull(Imm32(ToInt32(rhs)), ToRegister(lhs)); michael@0: } michael@0: michael@0: // Bailout on overflow michael@0: if (mul->canOverflow() && !bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: } else { michael@0: masm.imull(ToOperand(rhs), ToRegister(lhs)); michael@0: michael@0: // Bailout on overflow michael@0: if (mul->canOverflow() && !bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: michael@0: if (mul->canBeNegativeZero()) { michael@0: // Jump to an OOL path if the result is 0. michael@0: MulNegativeZeroCheck *ool = new(alloc()) MulNegativeZeroCheck(ins); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: masm.testl(ToRegister(lhs), ToRegister(lhs)); michael@0: masm.j(Assembler::Zero, ool->entry()); michael@0: masm.bind(ool->rejoin()); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: class ReturnZero : public OutOfLineCodeBase michael@0: { michael@0: Register reg_; michael@0: michael@0: public: michael@0: explicit ReturnZero(Register reg) michael@0: : reg_(reg) michael@0: { } michael@0: michael@0: virtual bool accept(CodeGeneratorX86Shared *codegen) { michael@0: return codegen->visitReturnZero(this); michael@0: } michael@0: Register reg() const { michael@0: return reg_; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitReturnZero(ReturnZero *ool) michael@0: { michael@0: masm.mov(ImmWord(0), ool->reg()); michael@0: masm.jmp(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitUDivOrMod(LUDivOrMod *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: JS_ASSERT_IF(lhs != rhs, rhs != eax); michael@0: JS_ASSERT(rhs != edx); michael@0: JS_ASSERT_IF(output == eax, ToRegister(ins->remainder()) == edx); michael@0: michael@0: ReturnZero *ool = nullptr; michael@0: michael@0: // Put the lhs in eax. michael@0: if (lhs != eax) michael@0: masm.mov(lhs, eax); michael@0: michael@0: // Prevent divide by zero. michael@0: if (ins->canBeDivideByZero()) { michael@0: masm.testl(rhs, rhs); michael@0: if (ins->mir()->isTruncated()) { michael@0: if (!ool) michael@0: ool = new(alloc()) ReturnZero(output); michael@0: masm.j(Assembler::Zero, ool->entry()); michael@0: } else { michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Zero extend the lhs into edx to make (edx:eax), since udiv is 64-bit. michael@0: masm.mov(ImmWord(0), edx); michael@0: masm.udiv(rhs); michael@0: michael@0: // Unsigned div or mod can return a value that's not a signed int32. michael@0: // If our users aren't expecting that, bail. michael@0: if (!ins->mir()->isTruncated()) { michael@0: masm.testl(output, output); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: if (ool) { michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: masm.bind(ool->rejoin()); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitMulNegativeZeroCheck(MulNegativeZeroCheck *ool) michael@0: { michael@0: LMulI *ins = ool->ins(); michael@0: Register result = ToRegister(ins->output()); michael@0: Operand lhsCopy = ToOperand(ins->lhsCopy()); michael@0: Operand rhs = ToOperand(ins->rhs()); michael@0: JS_ASSERT_IF(lhsCopy.kind() == Operand::REG, lhsCopy.reg() != result.code()); michael@0: michael@0: // Result is -0 if lhs or rhs is negative. michael@0: masm.movl(lhsCopy, result); michael@0: masm.orl(rhs, result); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: michael@0: masm.mov(ImmWord(0), result); michael@0: masm.jmp(ool->rejoin()); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitDivPowTwoI(LDivPowTwoI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->numerator()); michael@0: mozilla::DebugOnly output = ToRegister(ins->output()); michael@0: michael@0: int32_t shift = ins->shift(); michael@0: bool negativeDivisor = ins->negativeDivisor(); michael@0: MDiv *mir = ins->mir(); michael@0: michael@0: // We use defineReuseInput so these should always be the same, which is michael@0: // convenient since all of our instructions here are two-address. michael@0: JS_ASSERT(lhs == output); michael@0: michael@0: if (!mir->isTruncated() && negativeDivisor) { michael@0: // 0 divided by a negative number must return a double. michael@0: masm.testl(lhs, lhs); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: if (shift != 0) { michael@0: if (!mir->isTruncated()) { michael@0: // If the remainder is != 0, bailout since this must be a double. michael@0: masm.testl(lhs, Imm32(UINT32_MAX >> (32 - shift))); michael@0: if (!bailoutIf(Assembler::NonZero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: // Adjust the value so that shifting produces a correctly rounded result michael@0: // when the numerator is negative. See 10-1 "Signed Division by a Known michael@0: // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight. michael@0: if (mir->canBeNegativeDividend()) { michael@0: Register lhsCopy = ToRegister(ins->numeratorCopy()); michael@0: JS_ASSERT(lhsCopy != lhs); michael@0: if (shift > 1) michael@0: masm.sarl(Imm32(31), lhs); michael@0: masm.shrl(Imm32(32 - shift), lhs); michael@0: masm.addl(lhsCopy, lhs); michael@0: } michael@0: michael@0: masm.sarl(Imm32(shift), lhs); michael@0: if (negativeDivisor) michael@0: masm.negl(lhs); michael@0: } else if (shift == 0 && negativeDivisor) { michael@0: // INT32_MIN / -1 overflows. michael@0: masm.negl(lhs); michael@0: if (!mir->isTruncated() && !bailoutIf(Assembler::Overflow, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitDivOrModConstantI(LDivOrModConstantI *ins) { michael@0: Register lhs = ToRegister(ins->numerator()); michael@0: Register output = ToRegister(ins->output()); michael@0: int32_t d = ins->denominator(); michael@0: michael@0: // This emits the division answer into edx or the modulus answer into eax. michael@0: JS_ASSERT(output == eax || output == edx); michael@0: JS_ASSERT(lhs != eax && lhs != edx); michael@0: bool isDiv = (output == edx); michael@0: michael@0: // The absolute value of the denominator isn't a power of 2 (see LDivPowTwoI michael@0: // and LModPowTwoI). michael@0: JS_ASSERT((Abs(d) & (Abs(d) - 1)) != 0); michael@0: michael@0: // We will first divide by Abs(d), and negate the answer if d is negative. michael@0: // If desired, this can be avoided by generalizing computeDivisionConstants. michael@0: ReciprocalMulConstants rmc = computeDivisionConstants(Abs(d)); michael@0: michael@0: // As explained in the comments of computeDivisionConstants, we first compute michael@0: // X >> (32 + shift), where X is either (rmc.multiplier * n) if the multiplier michael@0: // is non-negative or (rmc.multiplier * n) + (2^32 * n) otherwise. This is the michael@0: // desired division result if n is non-negative, and is one less than the result michael@0: // otherwise. michael@0: masm.movl(Imm32(rmc.multiplier), eax); michael@0: masm.imull(lhs); michael@0: if (rmc.multiplier < 0) michael@0: masm.addl(lhs, edx); michael@0: masm.sarl(Imm32(rmc.shiftAmount), edx); michael@0: michael@0: // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be michael@0: // computed with just a sign-extending shift of 31 bits. michael@0: if (ins->canBeNegativeDividend()) { michael@0: masm.movl(lhs, eax); michael@0: masm.sarl(Imm32(31), eax); michael@0: masm.subl(eax, edx); michael@0: } michael@0: michael@0: // After this, edx contains the correct truncated division result. michael@0: if (d < 0) michael@0: masm.negl(edx); michael@0: michael@0: if (!isDiv) { michael@0: masm.imull(Imm32(-d), edx, eax); michael@0: masm.addl(lhs, eax); michael@0: } michael@0: michael@0: if (!ins->mir()->isTruncated()) { michael@0: if (isDiv) { michael@0: // This is a division op. Multiply the obtained value by d to check if michael@0: // the correct answer is an integer. This cannot overflow, since |d| > 1. michael@0: masm.imull(Imm32(d), edx, eax); michael@0: masm.cmpl(lhs, eax); michael@0: if (!bailoutIf(Assembler::NotEqual, ins->snapshot())) michael@0: return false; michael@0: michael@0: // If lhs is zero and the divisor is negative, the answer should have michael@0: // been -0. michael@0: if (d < 0) { michael@0: masm.testl(lhs, lhs); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } else if (ins->canBeNegativeDividend()) { michael@0: // This is a mod op. If the computed value is zero and lhs michael@0: // is negative, the answer should have been -0. michael@0: Label done; michael@0: michael@0: masm.cmpl(lhs, Imm32(0)); michael@0: masm.j(Assembler::GreaterThanOrEqual, &done); michael@0: michael@0: masm.testl(eax, eax); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: michael@0: masm.bind(&done); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitDivI(LDivI *ins) michael@0: { michael@0: Register remainder = ToRegister(ins->remainder()); michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: Register output = ToRegister(ins->output()); michael@0: michael@0: MDiv *mir = ins->mir(); michael@0: michael@0: JS_ASSERT_IF(lhs != rhs, rhs != eax); michael@0: JS_ASSERT(rhs != edx); michael@0: JS_ASSERT(remainder == edx); michael@0: JS_ASSERT(output == eax); michael@0: michael@0: Label done; michael@0: ReturnZero *ool = nullptr; michael@0: michael@0: // Put the lhs in eax, for either the negative overflow case or the regular michael@0: // divide case. michael@0: if (lhs != eax) michael@0: masm.mov(lhs, eax); michael@0: michael@0: // Handle divide by zero. michael@0: if (mir->canBeDivideByZero()) { michael@0: masm.testl(rhs, rhs); michael@0: if (mir->canTruncateInfinities()) { michael@0: // Truncated division by zero is zero (Infinity|0 == 0) michael@0: if (!ool) michael@0: ool = new(alloc()) ReturnZero(output); michael@0: masm.j(Assembler::Zero, ool->entry()); michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: // Handle an integer overflow exception from -2147483648 / -1. michael@0: if (mir->canBeNegativeOverflow()) { michael@0: Label notmin; michael@0: masm.cmpl(lhs, Imm32(INT32_MIN)); michael@0: masm.j(Assembler::NotEqual, ¬min); michael@0: masm.cmpl(rhs, Imm32(-1)); michael@0: if (mir->canTruncateOverflow()) { michael@0: // (-INT32_MIN)|0 == INT32_MIN and INT32_MIN is already in the michael@0: // output register (lhs == eax). michael@0: masm.j(Assembler::Equal, &done); michael@0: } else { michael@0: JS_ASSERT(mir->fallible()); michael@0: if (!bailoutIf(Assembler::Equal, ins->snapshot())) michael@0: return false; michael@0: } michael@0: masm.bind(¬min); michael@0: } michael@0: michael@0: // Handle negative 0. michael@0: if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) { michael@0: Label nonzero; michael@0: masm.testl(lhs, lhs); michael@0: masm.j(Assembler::NonZero, &nonzero); michael@0: masm.cmpl(rhs, Imm32(0)); michael@0: if (!bailoutIf(Assembler::LessThan, ins->snapshot())) michael@0: return false; michael@0: masm.bind(&nonzero); michael@0: } michael@0: michael@0: // Sign extend the lhs into edx to make (edx:eax), since idiv is 64-bit. michael@0: if (lhs != eax) michael@0: masm.mov(lhs, eax); michael@0: masm.cdq(); michael@0: masm.idiv(rhs); michael@0: michael@0: if (!mir->canTruncateRemainder()) { michael@0: // If the remainder is > 0, bailout since this must be a double. michael@0: masm.testl(remainder, remainder); michael@0: if (!bailoutIf(Assembler::NonZero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: michael@0: if (ool) { michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: masm.bind(ool->rejoin()); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitModPowTwoI(LModPowTwoI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->getOperand(0)); michael@0: int32_t shift = ins->shift(); michael@0: michael@0: Label negative; michael@0: michael@0: if (ins->mir()->canBeNegativeDividend()) { michael@0: // Switch based on sign of the lhs. michael@0: // Positive numbers are just a bitmask michael@0: masm.branchTest32(Assembler::Signed, lhs, lhs, &negative); michael@0: } michael@0: michael@0: masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs); michael@0: michael@0: if (ins->mir()->canBeNegativeDividend()) { michael@0: Label done; michael@0: masm.jump(&done); michael@0: michael@0: // Negative numbers need a negate, bitmask, negate michael@0: masm.bind(&negative); michael@0: michael@0: // Unlike in the visitModI case, we are not computing the mod by means of a michael@0: // division. Therefore, the divisor = -1 case isn't problematic (the andl michael@0: // always returns 0, which is what we expect). michael@0: // michael@0: // The negl instruction overflows if lhs == INT32_MIN, but this is also not michael@0: // a problem: shift is at most 31, and so the andl also always returns 0. michael@0: masm.negl(lhs); michael@0: masm.andl(Imm32((uint32_t(1) << shift) - 1), lhs); michael@0: masm.negl(lhs); michael@0: michael@0: // Since a%b has the same sign as b, and a is negative in this branch, michael@0: // an answer of 0 means the correct result is actually -0. Bail out. michael@0: if (!ins->mir()->isTruncated() && !bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: masm.bind(&done); michael@0: } michael@0: return true; michael@0: michael@0: } michael@0: michael@0: class ModOverflowCheck : public OutOfLineCodeBase michael@0: { michael@0: Label done_; michael@0: LModI *ins_; michael@0: Register rhs_; michael@0: michael@0: public: michael@0: explicit ModOverflowCheck(LModI *ins, Register rhs) michael@0: : ins_(ins), rhs_(rhs) michael@0: { } michael@0: michael@0: virtual bool accept(CodeGeneratorX86Shared *codegen) { michael@0: return codegen->visitModOverflowCheck(this); michael@0: } michael@0: Label *done() { michael@0: return &done_; michael@0: } michael@0: LModI *ins() const { michael@0: return ins_; michael@0: } michael@0: Register rhs() const { michael@0: return rhs_; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitModOverflowCheck(ModOverflowCheck *ool) michael@0: { michael@0: masm.cmpl(ool->rhs(), Imm32(-1)); michael@0: if (ool->ins()->mir()->isTruncated()) { michael@0: masm.j(Assembler::NotEqual, ool->rejoin()); michael@0: masm.mov(ImmWord(0), edx); michael@0: masm.jmp(ool->done()); michael@0: } else { michael@0: if (!bailoutIf(Assembler::Equal, ool->ins()->snapshot())) michael@0: return false; michael@0: masm.jmp(ool->rejoin()); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitModI(LModI *ins) michael@0: { michael@0: Register remainder = ToRegister(ins->remainder()); michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: Register rhs = ToRegister(ins->rhs()); michael@0: michael@0: // Required to use idiv. michael@0: JS_ASSERT_IF(lhs != rhs, rhs != eax); michael@0: JS_ASSERT(rhs != edx); michael@0: JS_ASSERT(remainder == edx); michael@0: JS_ASSERT(ToRegister(ins->getTemp(0)) == eax); michael@0: michael@0: Label done; michael@0: ReturnZero *ool = nullptr; michael@0: ModOverflowCheck *overflow = nullptr; michael@0: michael@0: // Set up eax in preparation for doing a div. michael@0: if (lhs != eax) michael@0: masm.mov(lhs, eax); michael@0: michael@0: // Prevent divide by zero. michael@0: if (ins->mir()->canBeDivideByZero()) { michael@0: masm.testl(rhs, rhs); michael@0: if (ins->mir()->isTruncated()) { michael@0: if (!ool) michael@0: ool = new(alloc()) ReturnZero(edx); michael@0: masm.j(Assembler::Zero, ool->entry()); michael@0: } else { michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: Label negative; michael@0: michael@0: // Switch based on sign of the lhs. michael@0: if (ins->mir()->canBeNegativeDividend()) michael@0: masm.branchTest32(Assembler::Signed, lhs, lhs, &negative); michael@0: michael@0: // If lhs >= 0 then remainder = lhs % rhs. The remainder must be positive. michael@0: { michael@0: // Check if rhs is a power-of-two. michael@0: if (ins->mir()->canBePowerOfTwoDivisor()) { michael@0: JS_ASSERT(rhs != remainder); michael@0: michael@0: // Rhs y is a power-of-two if (y & (y-1)) == 0. Note that if michael@0: // y is any negative number other than INT32_MIN, both y and michael@0: // y-1 will have the sign bit set so these are never optimized michael@0: // as powers-of-two. If y is INT32_MIN, y-1 will be INT32_MAX michael@0: // and because lhs >= 0 at this point, lhs & INT32_MAX returns michael@0: // the correct value. michael@0: Label notPowerOfTwo; michael@0: masm.mov(rhs, remainder); michael@0: masm.subl(Imm32(1), remainder); michael@0: masm.branchTest32(Assembler::NonZero, remainder, rhs, ¬PowerOfTwo); michael@0: { michael@0: masm.andl(lhs, remainder); michael@0: masm.jmp(&done); michael@0: } michael@0: masm.bind(¬PowerOfTwo); michael@0: } michael@0: michael@0: // Since lhs >= 0, the sign-extension will be 0 michael@0: masm.mov(ImmWord(0), edx); michael@0: masm.idiv(rhs); michael@0: } michael@0: michael@0: // Otherwise, we have to beware of two special cases: michael@0: if (ins->mir()->canBeNegativeDividend()) { michael@0: masm.jump(&done); michael@0: michael@0: masm.bind(&negative); michael@0: michael@0: // Prevent an integer overflow exception from -2147483648 % -1 michael@0: Label notmin; michael@0: masm.cmpl(lhs, Imm32(INT32_MIN)); michael@0: overflow = new(alloc()) ModOverflowCheck(ins, rhs); michael@0: masm.j(Assembler::Equal, overflow->entry()); michael@0: masm.bind(overflow->rejoin()); michael@0: masm.cdq(); michael@0: masm.idiv(rhs); michael@0: michael@0: if (!ins->mir()->isTruncated()) { michael@0: // A remainder of 0 means that the rval must be -0, which is a double. michael@0: masm.testl(remainder, remainder); michael@0: if (!bailoutIf(Assembler::Zero, ins->snapshot())) michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: masm.bind(&done); michael@0: michael@0: if (overflow) { michael@0: if (!addOutOfLineCode(overflow)) michael@0: return false; michael@0: masm.bind(overflow->done()); michael@0: } michael@0: michael@0: if (ool) { michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: masm.bind(ool->rejoin()); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitBitNotI(LBitNotI *ins) michael@0: { michael@0: const LAllocation *input = ins->getOperand(0); michael@0: JS_ASSERT(!input->isConstant()); michael@0: michael@0: masm.notl(ToOperand(input)); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitBitOpI(LBitOpI *ins) michael@0: { michael@0: const LAllocation *lhs = ins->getOperand(0); michael@0: const LAllocation *rhs = ins->getOperand(1); michael@0: michael@0: switch (ins->bitop()) { michael@0: case JSOP_BITOR: michael@0: if (rhs->isConstant()) michael@0: masm.orl(Imm32(ToInt32(rhs)), ToOperand(lhs)); michael@0: else michael@0: masm.orl(ToOperand(rhs), ToRegister(lhs)); michael@0: break; michael@0: case JSOP_BITXOR: michael@0: if (rhs->isConstant()) michael@0: masm.xorl(Imm32(ToInt32(rhs)), ToOperand(lhs)); michael@0: else michael@0: masm.xorl(ToOperand(rhs), ToRegister(lhs)); michael@0: break; michael@0: case JSOP_BITAND: michael@0: if (rhs->isConstant()) michael@0: masm.andl(Imm32(ToInt32(rhs)), ToOperand(lhs)); michael@0: else michael@0: masm.andl(ToOperand(rhs), ToRegister(lhs)); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected binary opcode"); michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitShiftI(LShiftI *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: const LAllocation *rhs = ins->rhs(); michael@0: michael@0: if (rhs->isConstant()) { michael@0: int32_t shift = ToInt32(rhs) & 0x1F; michael@0: switch (ins->bitop()) { michael@0: case JSOP_LSH: michael@0: if (shift) michael@0: masm.shll(Imm32(shift), lhs); michael@0: break; michael@0: case JSOP_RSH: michael@0: if (shift) michael@0: masm.sarl(Imm32(shift), lhs); michael@0: break; michael@0: case JSOP_URSH: michael@0: if (shift) { michael@0: masm.shrl(Imm32(shift), lhs); michael@0: } else if (ins->mir()->toUrsh()->fallible()) { michael@0: // x >>> 0 can overflow. michael@0: masm.testl(lhs, lhs); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: } michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); michael@0: } michael@0: } else { michael@0: JS_ASSERT(ToRegister(rhs) == ecx); michael@0: switch (ins->bitop()) { michael@0: case JSOP_LSH: michael@0: masm.shll_cl(lhs); michael@0: break; michael@0: case JSOP_RSH: michael@0: masm.sarl_cl(lhs); michael@0: break; michael@0: case JSOP_URSH: michael@0: masm.shrl_cl(lhs); michael@0: if (ins->mir()->toUrsh()->fallible()) { michael@0: // x >>> 0 can overflow. michael@0: masm.testl(lhs, lhs); michael@0: if (!bailoutIf(Assembler::Signed, ins->snapshot())) michael@0: return false; michael@0: } michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); michael@0: } michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitUrshD(LUrshD *ins) michael@0: { michael@0: Register lhs = ToRegister(ins->lhs()); michael@0: JS_ASSERT(ToRegister(ins->temp()) == lhs); michael@0: michael@0: const LAllocation *rhs = ins->rhs(); michael@0: FloatRegister out = ToFloatRegister(ins->output()); michael@0: michael@0: if (rhs->isConstant()) { michael@0: int32_t shift = ToInt32(rhs) & 0x1F; michael@0: if (shift) michael@0: masm.shrl(Imm32(shift), lhs); michael@0: } else { michael@0: JS_ASSERT(ToRegister(rhs) == ecx); michael@0: masm.shrl_cl(lhs); michael@0: } michael@0: michael@0: masm.convertUInt32ToDouble(lhs, out); michael@0: return true; michael@0: } michael@0: michael@0: MoveOperand michael@0: CodeGeneratorX86Shared::toMoveOperand(const LAllocation *a) const michael@0: { michael@0: if (a->isGeneralReg()) michael@0: return MoveOperand(ToRegister(a)); michael@0: if (a->isFloatReg()) michael@0: return MoveOperand(ToFloatRegister(a)); michael@0: return MoveOperand(StackPointer, ToStackOffset(a)); michael@0: } michael@0: michael@0: class OutOfLineTableSwitch : public OutOfLineCodeBase michael@0: { michael@0: MTableSwitch *mir_; michael@0: CodeLabel jumpLabel_; michael@0: michael@0: bool accept(CodeGeneratorX86Shared *codegen) { michael@0: return codegen->visitOutOfLineTableSwitch(this); michael@0: } michael@0: michael@0: public: michael@0: OutOfLineTableSwitch(MTableSwitch *mir) michael@0: : mir_(mir) michael@0: {} michael@0: michael@0: MTableSwitch *mir() const { michael@0: return mir_; michael@0: } michael@0: michael@0: CodeLabel *jumpLabel() { michael@0: return &jumpLabel_; michael@0: } michael@0: }; michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool) michael@0: { michael@0: MTableSwitch *mir = ool->mir(); michael@0: michael@0: masm.align(sizeof(void*)); michael@0: masm.bind(ool->jumpLabel()->src()); michael@0: if (!masm.addCodeLabel(*ool->jumpLabel())) michael@0: return false; michael@0: michael@0: for (size_t i = 0; i < mir->numCases(); i++) { michael@0: LBlock *caseblock = mir->getCase(i)->lir(); michael@0: Label *caseheader = caseblock->label(); michael@0: uint32_t caseoffset = caseheader->offset(); michael@0: michael@0: // The entries of the jump table need to be absolute addresses and thus michael@0: // must be patched after codegen is finished. michael@0: CodeLabel cl; michael@0: masm.writeCodePointer(cl.dest()); michael@0: cl.src()->bind(caseoffset); michael@0: if (!masm.addCodeLabel(cl)) michael@0: return false; michael@0: } michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index, michael@0: const Register &base) michael@0: { michael@0: Label *defaultcase = mir->getDefault()->lir()->label(); michael@0: michael@0: // Lower value with low value michael@0: if (mir->low() != 0) michael@0: masm.subl(Imm32(mir->low()), index); michael@0: michael@0: // Jump to default case if input is out of range michael@0: int32_t cases = mir->numCases(); michael@0: masm.cmpl(index, Imm32(cases)); michael@0: masm.j(AssemblerX86Shared::AboveOrEqual, defaultcase); michael@0: michael@0: // To fill in the CodeLabels for the case entries, we need to first michael@0: // generate the case entries (we don't yet know their offsets in the michael@0: // instruction stream). michael@0: OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(mir); michael@0: if (!addOutOfLineCode(ool)) michael@0: return false; michael@0: michael@0: // Compute the position where a pointer to the right case stands. michael@0: masm.mov(ool->jumpLabel()->dest(), base); michael@0: Operand pointer = Operand(base, index, ScalePointer); michael@0: michael@0: // Jump to the right case michael@0: masm.jmp(pointer); michael@0: michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitMathD(LMathD *math) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(math->lhs()); michael@0: Operand rhs = ToOperand(math->rhs()); michael@0: michael@0: JS_ASSERT(ToFloatRegister(math->output()) == lhs); michael@0: michael@0: switch (math->jsop()) { michael@0: case JSOP_ADD: michael@0: masm.addsd(rhs, lhs); michael@0: break; michael@0: case JSOP_SUB: michael@0: masm.subsd(rhs, lhs); michael@0: break; michael@0: case JSOP_MUL: michael@0: masm.mulsd(rhs, lhs); michael@0: break; michael@0: case JSOP_DIV: michael@0: masm.divsd(rhs, lhs); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitMathF(LMathF *math) michael@0: { michael@0: FloatRegister lhs = ToFloatRegister(math->lhs()); michael@0: Operand rhs = ToOperand(math->rhs()); michael@0: michael@0: JS_ASSERT(ToFloatRegister(math->output()) == lhs); michael@0: michael@0: switch (math->jsop()) { michael@0: case JSOP_ADD: michael@0: masm.addss(rhs, lhs); michael@0: break; michael@0: case JSOP_SUB: michael@0: masm.subss(rhs, lhs); michael@0: break; michael@0: case JSOP_MUL: michael@0: masm.mulss(rhs, lhs); michael@0: break; michael@0: case JSOP_DIV: michael@0: masm.divss(rhs, lhs); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("unexpected opcode"); michael@0: return false; michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitFloor(LFloor *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label bailout; michael@0: michael@0: if (AssemblerX86Shared::HasSSE41()) { michael@0: // Bail on negative-zero. michael@0: masm.branchNegativeZero(input, output, &bailout); michael@0: if (!bailoutFrom(&bailout, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Round toward -Infinity. michael@0: masm.roundsd(input, scratch, JSC::X86Assembler::RoundDown); michael@0: michael@0: masm.cvttsd2si(scratch, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: } else { michael@0: Label negative, end; michael@0: michael@0: // Branch to a slow path for negative inputs. Doesn't catch NaN or -0. michael@0: masm.xorpd(scratch, scratch); michael@0: masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative); michael@0: michael@0: // Bail on negative-zero. michael@0: masm.branchNegativeZero(input, output, &bailout); michael@0: if (!bailoutFrom(&bailout, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input is non-negative, so truncation correctly rounds. michael@0: masm.cvttsd2si(input, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.jump(&end); michael@0: michael@0: // Input is negative, but isn't -0. michael@0: // Negative values go on a comparatively expensive path, since no michael@0: // native rounding mode matches JS semantics. Still better than callVM. michael@0: masm.bind(&negative); michael@0: { michael@0: // Truncate and round toward zero. michael@0: // This is off-by-one for everything but integer-valued inputs. michael@0: masm.cvttsd2si(input, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Test whether the input double was integer-valued. michael@0: masm.convertInt32ToDouble(output, scratch); michael@0: masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, &end); michael@0: michael@0: // Input is not integer-valued, so we rounded off-by-one in the michael@0: // wrong direction. Correct by subtraction. michael@0: masm.subl(Imm32(1), output); michael@0: // Cannot overflow: output was already checked against INT_MIN. michael@0: } michael@0: michael@0: masm.bind(&end); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitFloorF(LFloorF *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label bailout; michael@0: michael@0: if (AssemblerX86Shared::HasSSE41()) { michael@0: // Bail on negative-zero. michael@0: masm.branchNegativeZeroFloat32(input, output, &bailout); michael@0: if (!bailoutFrom(&bailout, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Round toward -Infinity. michael@0: masm.roundss(input, scratch, JSC::X86Assembler::RoundDown); michael@0: michael@0: masm.cvttss2si(scratch, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: } else { michael@0: Label negative, end; michael@0: michael@0: // Branch to a slow path for negative inputs. Doesn't catch NaN or -0. michael@0: masm.xorps(scratch, scratch); michael@0: masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative); michael@0: michael@0: // Bail on negative-zero. michael@0: masm.branchNegativeZeroFloat32(input, output, &bailout); michael@0: if (!bailoutFrom(&bailout, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input is non-negative, so truncation correctly rounds. michael@0: masm.cvttss2si(input, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.jump(&end); michael@0: michael@0: // Input is negative, but isn't -0. michael@0: // Negative values go on a comparatively expensive path, since no michael@0: // native rounding mode matches JS semantics. Still better than callVM. michael@0: masm.bind(&negative); michael@0: { michael@0: // Truncate and round toward zero. michael@0: // This is off-by-one for everything but integer-valued inputs. michael@0: masm.cvttss2si(input, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Test whether the input double was integer-valued. michael@0: masm.convertInt32ToFloat32(output, scratch); michael@0: masm.branchFloat(Assembler::DoubleEqualOrUnordered, input, scratch, &end); michael@0: michael@0: // Input is not integer-valued, so we rounded off-by-one in the michael@0: // wrong direction. Correct by subtraction. michael@0: masm.subl(Imm32(1), output); michael@0: // Cannot overflow: output was already checked against INT_MIN. michael@0: } michael@0: michael@0: masm.bind(&end); michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitRound(LRound *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister temp = ToFloatRegister(lir->temp()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label negative, end, bailout; michael@0: michael@0: // Load 0.5 in the temp register. michael@0: masm.loadConstantDouble(0.5, temp); michael@0: michael@0: // Branch to a slow path for negative inputs. Doesn't catch NaN or -0. michael@0: masm.xorpd(scratch, scratch); michael@0: masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative); michael@0: michael@0: // Bail on negative-zero. michael@0: masm.branchNegativeZero(input, output, &bailout); michael@0: if (!bailoutFrom(&bailout, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input is non-negative. Add 0.5 and truncate, rounding down. Note that we michael@0: // have to add the input to the temp register (which contains 0.5) because michael@0: // we're not allowed to modify the input register. michael@0: masm.addsd(input, temp); michael@0: michael@0: masm.cvttsd2si(temp, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.jump(&end); michael@0: michael@0: michael@0: // Input is negative, but isn't -0. michael@0: masm.bind(&negative); michael@0: michael@0: if (AssemblerX86Shared::HasSSE41()) { michael@0: // Add 0.5 and round toward -Infinity. The result is stored in the temp michael@0: // register (currently contains 0.5). michael@0: masm.addsd(input, temp); michael@0: masm.roundsd(temp, scratch, JSC::X86Assembler::RoundDown); michael@0: michael@0: // Truncate. michael@0: masm.cvttsd2si(scratch, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: // If the result is positive zero, then the actual result is -0. Bail. michael@0: // Otherwise, the truncation will have produced the correct negative integer. michael@0: masm.testl(output, output); michael@0: if (!bailoutIf(Assembler::Zero, lir->snapshot())) michael@0: return false; michael@0: michael@0: } else { michael@0: masm.addsd(input, temp); michael@0: michael@0: // Round toward -Infinity without the benefit of ROUNDSD. michael@0: { michael@0: // If input + 0.5 >= 0, input is a negative number >= -0.5 and the result is -0. michael@0: masm.compareDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch); michael@0: if (!bailoutIf(Assembler::DoubleGreaterThanOrEqual, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Truncate and round toward zero. michael@0: // This is off-by-one for everything but integer-valued inputs. michael@0: masm.cvttsd2si(temp, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Test whether the truncated double was integer-valued. michael@0: masm.convertInt32ToDouble(output, scratch); michael@0: masm.branchDouble(Assembler::DoubleEqualOrUnordered, temp, scratch, &end); michael@0: michael@0: // Input is not integer-valued, so we rounded off-by-one in the michael@0: // wrong direction. Correct by subtraction. michael@0: masm.subl(Imm32(1), output); michael@0: // Cannot overflow: output was already checked against INT_MIN. michael@0: } michael@0: } michael@0: michael@0: masm.bind(&end); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitRoundF(LRoundF *lir) michael@0: { michael@0: FloatRegister input = ToFloatRegister(lir->input()); michael@0: FloatRegister temp = ToFloatRegister(lir->temp()); michael@0: FloatRegister scratch = ScratchFloatReg; michael@0: Register output = ToRegister(lir->output()); michael@0: michael@0: Label negative, end, bailout; michael@0: michael@0: // Load 0.5 in the temp register. michael@0: masm.loadConstantFloat32(0.5f, temp); michael@0: michael@0: // Branch to a slow path for negative inputs. Doesn't catch NaN or -0. michael@0: masm.xorps(scratch, scratch); michael@0: masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative); michael@0: michael@0: // Bail on negative-zero. michael@0: masm.branchNegativeZeroFloat32(input, output, &bailout); michael@0: if (!bailoutFrom(&bailout, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Input is non-negative. Add 0.5 and truncate, rounding down. Note that we michael@0: // have to add the input to the temp register (which contains 0.5) because michael@0: // we're not allowed to modify the input register. michael@0: masm.addss(input, temp); michael@0: michael@0: masm.cvttss2si(temp, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: masm.jump(&end); michael@0: michael@0: michael@0: // Input is negative, but isn't -0. michael@0: masm.bind(&negative); michael@0: michael@0: if (AssemblerX86Shared::HasSSE41()) { michael@0: // Add 0.5 and round toward -Infinity. The result is stored in the temp michael@0: // register (currently contains 0.5). michael@0: masm.addss(input, temp); michael@0: masm.roundss(temp, scratch, JSC::X86Assembler::RoundDown); michael@0: michael@0: // Truncate. michael@0: masm.cvttss2si(scratch, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: // If the result is positive zero, then the actual result is -0. Bail. michael@0: // Otherwise, the truncation will have produced the correct negative integer. michael@0: masm.testl(output, output); michael@0: if (!bailoutIf(Assembler::Zero, lir->snapshot())) michael@0: return false; michael@0: michael@0: } else { michael@0: masm.addss(input, temp); michael@0: // Round toward -Infinity without the benefit of ROUNDSS. michael@0: { michael@0: // If input + 0.5 >= 0, input is a negative number >= -0.5 and the result is -0. michael@0: masm.compareFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch); michael@0: if (!bailoutIf(Assembler::DoubleGreaterThanOrEqual, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Truncate and round toward zero. michael@0: // This is off-by-one for everything but integer-valued inputs. michael@0: masm.cvttss2si(temp, output); michael@0: masm.cmp32(output, Imm32(INT_MIN)); michael@0: if (!bailoutIf(Assembler::Equal, lir->snapshot())) michael@0: return false; michael@0: michael@0: // Test whether the truncated double was integer-valued. michael@0: masm.convertInt32ToFloat32(output, scratch); michael@0: masm.branchFloat(Assembler::DoubleEqualOrUnordered, temp, scratch, &end); michael@0: michael@0: // Input is not integer-valued, so we rounded off-by-one in the michael@0: // wrong direction. Correct by subtraction. michael@0: masm.subl(Imm32(1), output); michael@0: // Cannot overflow: output was already checked against INT_MIN. michael@0: } michael@0: } michael@0: michael@0: masm.bind(&end); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitGuardShape(LGuardShape *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: masm.cmpPtr(Operand(obj, JSObject::offsetOfShape()), ImmGCPtr(guard->mir()->shape())); michael@0: michael@0: return bailoutIf(Assembler::NotEqual, guard->snapshot()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitGuardObjectType(LGuardObjectType *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: masm.cmpPtr(Operand(obj, JSObject::offsetOfType()), ImmGCPtr(guard->mir()->typeObject())); michael@0: michael@0: Assembler::Condition cond = michael@0: guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual; michael@0: return bailoutIf(cond, guard->snapshot()); michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitGuardClass(LGuardClass *guard) michael@0: { michael@0: Register obj = ToRegister(guard->input()); michael@0: Register tmp = ToRegister(guard->tempInt()); michael@0: michael@0: masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp); michael@0: masm.cmpPtr(Operand(tmp, types::TypeObject::offsetOfClasp()), ImmPtr(guard->mir()->getClass())); michael@0: if (!bailoutIf(Assembler::NotEqual, guard->snapshot())) michael@0: return false; michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitEffectiveAddress(LEffectiveAddress *ins) michael@0: { michael@0: const MEffectiveAddress *mir = ins->mir(); michael@0: Register base = ToRegister(ins->base()); michael@0: Register index = ToRegister(ins->index()); michael@0: Register output = ToRegister(ins->output()); michael@0: masm.leal(Operand(base, index, mir->scale(), mir->displacement()), output); michael@0: return true; michael@0: } michael@0: michael@0: Operand michael@0: CodeGeneratorX86Shared::createArrayElementOperand(Register elements, const LAllocation *index) michael@0: { michael@0: if (index->isConstant()) michael@0: return Operand(elements, ToInt32(index) * sizeof(js::Value)); michael@0: michael@0: return Operand(elements, ToRegister(index), TimesEight); michael@0: } michael@0: bool michael@0: CodeGeneratorX86Shared::generateInvalidateEpilogue() michael@0: { michael@0: // Ensure that there is enough space in the buffer for the OsiPoint michael@0: // patching to occur. Otherwise, we could overwrite the invalidation michael@0: // epilogue. michael@0: for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize()) michael@0: masm.nop(); michael@0: michael@0: masm.bind(&invalidate_); michael@0: michael@0: // Push the Ion script onto the stack (when we determine what that pointer is). michael@0: invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1))); michael@0: JitCode *thunk = gen->jitRuntime()->getInvalidationThunk(); michael@0: michael@0: masm.call(thunk); michael@0: michael@0: // We should never reach this point in JIT code -- the invalidation thunk should michael@0: // pop the invalidated JS frame and return directly to its caller. michael@0: masm.assumeUnreachable("Should have returned directly to its caller instead of here."); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitNegI(LNegI *ins) michael@0: { michael@0: Register input = ToRegister(ins->input()); michael@0: JS_ASSERT(input == ToRegister(ins->output())); michael@0: michael@0: masm.neg32(input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitNegD(LNegD *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: michael@0: masm.negateDouble(input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitNegF(LNegF *ins) michael@0: { michael@0: FloatRegister input = ToFloatRegister(ins->input()); michael@0: JS_ASSERT(input == ToFloatRegister(ins->output())); michael@0: michael@0: masm.negateFloat(input); michael@0: return true; michael@0: } michael@0: michael@0: bool michael@0: CodeGeneratorX86Shared::visitForkJoinGetSlice(LForkJoinGetSlice *ins) michael@0: { michael@0: MOZ_ASSERT(gen->info().executionMode() == ParallelExecution); michael@0: MOZ_ASSERT(ToRegister(ins->forkJoinContext()) == ForkJoinGetSliceReg_cx); michael@0: MOZ_ASSERT(ToRegister(ins->temp1()) == eax); michael@0: MOZ_ASSERT(ToRegister(ins->temp2()) == edx); michael@0: MOZ_ASSERT(ToRegister(ins->temp3()) == ForkJoinGetSliceReg_temp0); michael@0: MOZ_ASSERT(ToRegister(ins->temp4()) == ForkJoinGetSliceReg_temp1); michael@0: MOZ_ASSERT(ToRegister(ins->output()) == ForkJoinGetSliceReg_output); michael@0: michael@0: masm.call(gen->jitRuntime()->forkJoinGetSliceStub()); michael@0: return true; michael@0: } michael@0: michael@0: JitCode * michael@0: JitRuntime::generateForkJoinGetSliceStub(JSContext *cx) michael@0: { michael@0: #ifdef JS_THREADSAFE michael@0: MacroAssembler masm(cx); michael@0: michael@0: // We need two fixed temps. We need to fix eax for cmpxchg, and edx for michael@0: // div. michael@0: Register cxReg = ForkJoinGetSliceReg_cx, worker = cxReg; michael@0: Register pool = ForkJoinGetSliceReg_temp0; michael@0: Register bounds = ForkJoinGetSliceReg_temp1; michael@0: Register output = ForkJoinGetSliceReg_output; michael@0: michael@0: MOZ_ASSERT(worker != eax && worker != edx); michael@0: MOZ_ASSERT(pool != eax && pool != edx); michael@0: MOZ_ASSERT(bounds != eax && bounds != edx); michael@0: MOZ_ASSERT(output != eax && output != edx); michael@0: michael@0: Label stealWork, noMoreWork, gotSlice; michael@0: Operand workerSliceBounds(Address(worker, ThreadPoolWorker::offsetOfSliceBounds())); michael@0: michael@0: // Clobber cx to load the worker. michael@0: masm.push(cxReg); michael@0: masm.loadPtr(Address(cxReg, ForkJoinContext::offsetOfWorker()), worker); michael@0: michael@0: // Load the thread pool, which is used in all cases below. michael@0: masm.loadThreadPool(pool); michael@0: michael@0: { michael@0: // Try to get a slice from the current thread. michael@0: Label getOwnSliceLoopHead; michael@0: masm.bind(&getOwnSliceLoopHead); michael@0: michael@0: // Load the slice bounds for the current thread. michael@0: masm.loadSliceBounds(worker, bounds); michael@0: michael@0: // The slice bounds is a uint32 composed from two uint16s: michael@0: // [ from , to ] michael@0: // ^~~~ ^~ michael@0: // upper 16 bits | lower 16 bits michael@0: masm.move32(bounds, output); michael@0: masm.shrl(Imm32(16), output); michael@0: michael@0: // If we don't have any slices left ourselves, move on to stealing. michael@0: masm.branch16(Assembler::Equal, output, bounds, &stealWork); michael@0: michael@0: // If we still have work, try to CAS [ from+1, to ]. michael@0: masm.move32(bounds, edx); michael@0: masm.add32(Imm32(0x10000), edx); michael@0: masm.move32(bounds, eax); michael@0: masm.atomic_cmpxchg32(edx, workerSliceBounds, eax); michael@0: masm.j(Assembler::NonZero, &getOwnSliceLoopHead); michael@0: michael@0: // If the CAS succeeded, return |from| in output. michael@0: masm.jump(&gotSlice); michael@0: } michael@0: michael@0: // Try to steal work. michael@0: masm.bind(&stealWork); michael@0: michael@0: // It's not technically correct to test whether work-stealing is turned on michael@0: // only during stub-generation time, but it's a DEBUG only thing. michael@0: if (cx->runtime()->threadPool.workStealing()) { michael@0: Label stealWorkLoopHead; michael@0: masm.bind(&stealWorkLoopHead); michael@0: michael@0: // Check if we have work. michael@0: masm.branch32(Assembler::Equal, michael@0: Address(pool, ThreadPool::offsetOfPendingSlices()), michael@0: Imm32(0), &noMoreWork); michael@0: michael@0: // Get an id at random. The following is an inline of michael@0: // the 32-bit xorshift in ThreadPoolWorker::randomWorker(). michael@0: { michael@0: // Reload the current worker. michael@0: masm.loadPtr(Address(StackPointer, 0), cxReg); michael@0: masm.loadPtr(Address(cxReg, ForkJoinContext::offsetOfWorker()), worker); michael@0: michael@0: // Perform the xorshift to get a random number in eax, using edx michael@0: // as a temp. michael@0: Address rngState(worker, ThreadPoolWorker::offsetOfSchedulerRNGState()); michael@0: masm.load32(rngState, eax); michael@0: masm.move32(eax, edx); michael@0: masm.shll(Imm32(ThreadPoolWorker::XORSHIFT_A), eax); michael@0: masm.xor32(edx, eax); michael@0: masm.move32(eax, edx); michael@0: masm.shrl(Imm32(ThreadPoolWorker::XORSHIFT_B), eax); michael@0: masm.xor32(edx, eax); michael@0: masm.move32(eax, edx); michael@0: masm.shll(Imm32(ThreadPoolWorker::XORSHIFT_C), eax); michael@0: masm.xor32(edx, eax); michael@0: masm.store32(eax, rngState); michael@0: michael@0: // Compute the random worker id by computing % numWorkers. Reuse michael@0: // output as a temp. michael@0: masm.move32(Imm32(0), edx); michael@0: masm.move32(Imm32(cx->runtime()->threadPool.numWorkers()), output); michael@0: masm.udiv(output); michael@0: } michael@0: michael@0: // Load the worker from the workers array. michael@0: masm.loadPtr(Address(pool, ThreadPool::offsetOfWorkers()), worker); michael@0: masm.loadPtr(BaseIndex(worker, edx, ScalePointer), worker); michael@0: michael@0: // Try to get a slice from the designated victim worker. michael@0: Label stealSliceFromWorkerLoopHead; michael@0: masm.bind(&stealSliceFromWorkerLoopHead); michael@0: michael@0: // Load the slice bounds and decompose for the victim worker. michael@0: masm.loadSliceBounds(worker, bounds); michael@0: masm.move32(bounds, eax); michael@0: masm.shrl(Imm32(16), eax); michael@0: michael@0: // If the victim worker has no more slices left, find another worker. michael@0: masm.branch16(Assembler::Equal, eax, bounds, &stealWorkLoopHead); michael@0: michael@0: // If the victim worker still has work, try to CAS [ from, to-1 ]. michael@0: masm.move32(bounds, output); michael@0: masm.sub32(Imm32(1), output); michael@0: masm.move32(bounds, eax); michael@0: masm.atomic_cmpxchg32(output, workerSliceBounds, eax); michael@0: masm.j(Assembler::NonZero, &stealSliceFromWorkerLoopHead); michael@0: michael@0: // If the CAS succeeded, return |to-1| in output. michael@0: #ifdef DEBUG michael@0: masm.atomic_inc32(Operand(Address(pool, ThreadPool::offsetOfStolenSlices()))); michael@0: #endif michael@0: // Copies lower 16 bits only. michael@0: masm.movzwl(output, output); michael@0: } michael@0: michael@0: // If we successfully got a slice, decrement pool->pendingSlices_ and michael@0: // return the slice. michael@0: masm.bind(&gotSlice); michael@0: masm.atomic_dec32(Operand(Address(pool, ThreadPool::offsetOfPendingSlices()))); michael@0: masm.pop(cxReg); michael@0: masm.ret(); michael@0: michael@0: // There's no more slices to give out, return a sentinel value. michael@0: masm.bind(&noMoreWork); michael@0: masm.move32(Imm32(ThreadPool::MAX_SLICE_ID), output); michael@0: masm.pop(cxReg); michael@0: masm.ret(); michael@0: michael@0: Linker linker(masm); michael@0: JitCode *code = linker.newCode(cx, JSC::OTHER_CODE); michael@0: michael@0: #ifdef JS_ION_PERF michael@0: writePerfSpewerJitCodeProfile(code, "ForkJoinGetSliceStub"); michael@0: #endif michael@0: michael@0: return code; michael@0: #else michael@0: return nullptr; michael@0: #endif // JS_THREADSAFE michael@0: } michael@0: michael@0: } // namespace jit michael@0: } // namespace js