michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_IonMacroAssembler_h michael@0: #define jit_IonMacroAssembler_h michael@0: michael@0: #ifdef JS_ION michael@0: michael@0: #include "jscompartment.h" michael@0: michael@0: #if defined(JS_CODEGEN_X86) michael@0: # include "jit/x86/MacroAssembler-x86.h" michael@0: #elif defined(JS_CODEGEN_X64) michael@0: # include "jit/x64/MacroAssembler-x64.h" michael@0: #elif defined(JS_CODEGEN_ARM) michael@0: # include "jit/arm/MacroAssembler-arm.h" michael@0: #elif defined(JS_CODEGEN_MIPS) michael@0: # include "jit/mips/MacroAssembler-mips.h" michael@0: #else michael@0: # error "Unknown architecture!" michael@0: #endif michael@0: #include "jit/IonInstrumentation.h" michael@0: #include "jit/JitCompartment.h" michael@0: #include "jit/VMFunctions.h" michael@0: #include "vm/ProxyObject.h" michael@0: #include "vm/Shape.h" michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: // The public entrypoint for emitting assembly. Note that a MacroAssembler can michael@0: // use cx->lifoAlloc, so take care not to interleave masm use with other michael@0: // lifoAlloc use if one will be destroyed before the other. michael@0: class MacroAssembler : public MacroAssemblerSpecific michael@0: { michael@0: MacroAssembler *thisFromCtor() { michael@0: return this; michael@0: } michael@0: michael@0: public: michael@0: class AutoRooter : public AutoGCRooter michael@0: { michael@0: MacroAssembler *masm_; michael@0: michael@0: public: michael@0: AutoRooter(JSContext *cx, MacroAssembler *masm) michael@0: : AutoGCRooter(cx, IONMASM), michael@0: masm_(masm) michael@0: { } michael@0: michael@0: MacroAssembler *masm() const { michael@0: return masm_; michael@0: } michael@0: }; michael@0: michael@0: /* michael@0: * Base class for creating a branch. michael@0: */ michael@0: class Branch michael@0: { michael@0: bool init_; michael@0: Condition cond_; michael@0: Label *jump_; michael@0: Register reg_; michael@0: michael@0: public: michael@0: Branch() michael@0: : init_(false), michael@0: cond_(Equal), michael@0: jump_(nullptr), michael@0: reg_(Register::FromCode(0)) // Quell compiler warnings. michael@0: { } michael@0: michael@0: Branch(Condition cond, Register reg, Label *jump) michael@0: : init_(true), michael@0: cond_(cond), michael@0: jump_(jump), michael@0: reg_(reg) michael@0: { } michael@0: michael@0: bool isInitialized() const { michael@0: return init_; michael@0: } michael@0: michael@0: Condition cond() const { michael@0: return cond_; michael@0: } michael@0: michael@0: Label *jump() const { michael@0: return jump_; michael@0: } michael@0: michael@0: Register reg() const { michael@0: return reg_; michael@0: } michael@0: michael@0: void invertCondition() { michael@0: cond_ = InvertCondition(cond_); michael@0: } michael@0: michael@0: void relink(Label *jump) { michael@0: jump_ = jump; michael@0: } michael@0: michael@0: virtual void emit(MacroAssembler &masm) = 0; michael@0: }; michael@0: michael@0: /* michael@0: * Creates a branch based on a specific types::Type. michael@0: * Note: emits number test (int/double) for types::Type::DoubleType() michael@0: */ michael@0: class BranchType : public Branch michael@0: { michael@0: types::Type type_; michael@0: michael@0: public: michael@0: BranchType() michael@0: : Branch(), michael@0: type_(types::Type::UnknownType()) michael@0: { } michael@0: michael@0: BranchType(Condition cond, Register reg, types::Type type, Label *jump) michael@0: : Branch(cond, reg, jump), michael@0: type_(type) michael@0: { } michael@0: michael@0: void emit(MacroAssembler &masm) { michael@0: JS_ASSERT(isInitialized()); michael@0: MIRType mirType = MIRType_None; michael@0: michael@0: if (type_.isPrimitive()) { michael@0: if (type_.isMagicArguments()) michael@0: mirType = MIRType_MagicOptimizedArguments; michael@0: else michael@0: mirType = MIRTypeFromValueType(type_.primitive()); michael@0: } else if (type_.isAnyObject()) { michael@0: mirType = MIRType_Object; michael@0: } else { michael@0: MOZ_ASSUME_UNREACHABLE("Unknown conversion to mirtype"); michael@0: } michael@0: michael@0: if (mirType == MIRType_Double) michael@0: masm.branchTestNumber(cond(), reg(), jump()); michael@0: else michael@0: masm.branchTestMIRType(cond(), reg(), mirType, jump()); michael@0: } michael@0: michael@0: }; michael@0: michael@0: /* michael@0: * Creates a branch based on a GCPtr. michael@0: */ michael@0: class BranchGCPtr : public Branch michael@0: { michael@0: ImmGCPtr ptr_; michael@0: michael@0: public: michael@0: BranchGCPtr() michael@0: : Branch(), michael@0: ptr_(ImmGCPtr(nullptr)) michael@0: { } michael@0: michael@0: BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label *jump) michael@0: : Branch(cond, reg, jump), michael@0: ptr_(ptr) michael@0: { } michael@0: michael@0: void emit(MacroAssembler &masm) { michael@0: JS_ASSERT(isInitialized()); michael@0: masm.branchPtr(cond(), reg(), ptr_, jump()); michael@0: } michael@0: }; michael@0: michael@0: mozilla::Maybe autoRooter_; michael@0: mozilla::Maybe ionContext_; michael@0: mozilla::Maybe alloc_; michael@0: bool enoughMemory_; michael@0: bool embedsNurseryPointers_; michael@0: michael@0: // SPS instrumentation, only used for Ion caches. michael@0: mozilla::Maybe spsInstrumentation_; michael@0: jsbytecode *spsPc_; michael@0: michael@0: private: michael@0: // This field is used to manage profiling instrumentation output. If michael@0: // provided and enabled, then instrumentation will be emitted around call michael@0: // sites. The IonInstrumentation instance is hosted inside of michael@0: // CodeGeneratorShared and is the manager of when instrumentation is michael@0: // actually emitted or not. If nullptr, then no instrumentation is emitted. michael@0: IonInstrumentation *sps_; michael@0: michael@0: // Labels for handling exceptions and failures. michael@0: NonAssertingLabel sequentialFailureLabel_; michael@0: NonAssertingLabel parallelFailureLabel_; michael@0: michael@0: public: michael@0: // If instrumentation should be emitted, then the sps parameter should be michael@0: // provided, but otherwise it can be safely omitted to prevent all michael@0: // instrumentation from being emitted. michael@0: MacroAssembler() michael@0: : enoughMemory_(true), michael@0: embedsNurseryPointers_(false), michael@0: sps_(nullptr) michael@0: { michael@0: IonContext *icx = GetIonContext(); michael@0: JSContext *cx = icx->cx; michael@0: if (cx) michael@0: constructRoot(cx); michael@0: michael@0: if (!icx->temp) { michael@0: JS_ASSERT(cx); michael@0: alloc_.construct(cx); michael@0: } michael@0: michael@0: moveResolver_.setAllocator(*icx->temp); michael@0: #ifdef JS_CODEGEN_ARM michael@0: initWithAllocator(); michael@0: m_buffer.id = icx->getNextAssemblerId(); michael@0: #endif michael@0: } michael@0: michael@0: // This constructor should only be used when there is no IonContext active michael@0: // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp). michael@0: MacroAssembler(JSContext *cx, IonScript *ion = nullptr, michael@0: JSScript *script = nullptr, jsbytecode *pc = nullptr) michael@0: : enoughMemory_(true), michael@0: embedsNurseryPointers_(false), michael@0: sps_(nullptr) michael@0: { michael@0: constructRoot(cx); michael@0: ionContext_.construct(cx, (js::jit::TempAllocator *)nullptr); michael@0: alloc_.construct(cx); michael@0: moveResolver_.setAllocator(*ionContext_.ref().temp); michael@0: #ifdef JS_CODEGEN_ARM michael@0: initWithAllocator(); michael@0: m_buffer.id = GetIonContext()->getNextAssemblerId(); michael@0: #endif michael@0: if (ion) { michael@0: setFramePushed(ion->frameSize()); michael@0: if (pc && cx->runtime()->spsProfiler.enabled()) { michael@0: // We have to update the SPS pc when this IC stub calls into michael@0: // the VM. michael@0: spsPc_ = pc; michael@0: spsInstrumentation_.construct(&cx->runtime()->spsProfiler, &spsPc_); michael@0: sps_ = spsInstrumentation_.addr(); michael@0: sps_->setPushed(script); michael@0: } michael@0: } michael@0: } michael@0: michael@0: // asm.js compilation handles its own IonContet-pushing michael@0: struct AsmJSToken {}; michael@0: MacroAssembler(AsmJSToken) michael@0: : enoughMemory_(true), michael@0: embedsNurseryPointers_(false), michael@0: sps_(nullptr) michael@0: { michael@0: #ifdef JS_CODEGEN_ARM michael@0: initWithAllocator(); michael@0: m_buffer.id = 0; michael@0: #endif michael@0: } michael@0: michael@0: void setInstrumentation(IonInstrumentation *sps) { michael@0: sps_ = sps; michael@0: } michael@0: michael@0: void resetForNewCodeGenerator(TempAllocator &alloc) { michael@0: setFramePushed(0); michael@0: moveResolver_.clearTempObjectPool(); michael@0: moveResolver_.setAllocator(alloc); michael@0: } michael@0: michael@0: void constructRoot(JSContext *cx) { michael@0: autoRooter_.construct(cx, this); michael@0: } michael@0: michael@0: MoveResolver &moveResolver() { michael@0: return moveResolver_; michael@0: } michael@0: michael@0: size_t instructionsSize() const { michael@0: return size(); michael@0: } michael@0: michael@0: void propagateOOM(bool success) { michael@0: enoughMemory_ &= success; michael@0: } michael@0: bool oom() const { michael@0: return !enoughMemory_ || MacroAssemblerSpecific::oom(); michael@0: } michael@0: michael@0: bool embedsNurseryPointers() const { michael@0: return embedsNurseryPointers_; michael@0: } michael@0: michael@0: // Emits a test of a value against all types in a TypeSet. A scratch michael@0: // register is required. michael@0: template michael@0: void guardTypeSet(const Source &address, const TypeSet *types, Register scratch, Label *miss); michael@0: template michael@0: void guardObjectType(Register obj, const TypeSet *types, Register scratch, Label *miss); michael@0: template michael@0: void guardType(const Source &address, types::Type type, Register scratch, Label *miss); michael@0: michael@0: void loadObjShape(Register objReg, Register dest) { michael@0: loadPtr(Address(objReg, JSObject::offsetOfShape()), dest); michael@0: } michael@0: void loadBaseShape(Register objReg, Register dest) { michael@0: loadPtr(Address(objReg, JSObject::offsetOfShape()), dest); michael@0: michael@0: loadPtr(Address(dest, Shape::offsetOfBase()), dest); michael@0: } michael@0: void loadObjClass(Register objReg, Register dest) { michael@0: loadPtr(Address(objReg, JSObject::offsetOfType()), dest); michael@0: loadPtr(Address(dest, types::TypeObject::offsetOfClasp()), dest); michael@0: } michael@0: void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class *clasp, michael@0: Label *label) { michael@0: loadPtr(Address(obj, JSObject::offsetOfType()), scratch); michael@0: branchPtr(cond, Address(scratch, types::TypeObject::offsetOfClasp()), ImmPtr(clasp), label); michael@0: } michael@0: void branchTestObjShape(Condition cond, Register obj, const Shape *shape, Label *label) { michael@0: branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape), label); michael@0: } michael@0: void branchTestObjShape(Condition cond, Register obj, Register shape, Label *label) { michael@0: branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label); michael@0: } michael@0: void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch, michael@0: const void *handlerp, Label *label) { michael@0: Address handlerAddr(proxy, ProxyObject::offsetOfHandler()); michael@0: loadPrivate(handlerAddr, scratch); michael@0: Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily()); michael@0: branchPtr(cond, familyAddr, ImmPtr(handlerp), label); michael@0: } michael@0: michael@0: template michael@0: void branchTestMIRType(Condition cond, const Value &val, MIRType type, Label *label) { michael@0: switch (type) { michael@0: case MIRType_Null: return branchTestNull(cond, val, label); michael@0: case MIRType_Undefined: return branchTestUndefined(cond, val, label); michael@0: case MIRType_Boolean: return branchTestBoolean(cond, val, label); michael@0: case MIRType_Int32: return branchTestInt32(cond, val, label); michael@0: case MIRType_String: return branchTestString(cond, val, label); michael@0: case MIRType_Object: return branchTestObject(cond, val, label); michael@0: case MIRType_Double: return branchTestDouble(cond, val, label); michael@0: case MIRType_MagicOptimizedArguments: // Fall through. michael@0: case MIRType_MagicIsConstructing: michael@0: case MIRType_MagicHole: return branchTestMagic(cond, val, label); michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Bad MIRType"); michael@0: } michael@0: } michael@0: michael@0: // Branches to |label| if |reg| is false. |reg| should be a C++ bool. michael@0: void branchIfFalseBool(Register reg, Label *label) { michael@0: // Note that C++ bool is only 1 byte, so ignore the higher-order bits. michael@0: branchTest32(Assembler::Zero, reg, Imm32(0xFF), label); michael@0: } michael@0: michael@0: // Branches to |label| if |reg| is true. |reg| should be a C++ bool. michael@0: void branchIfTrueBool(Register reg, Label *label) { michael@0: // Note that C++ bool is only 1 byte, so ignore the higher-order bits. michael@0: branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label); michael@0: } michael@0: michael@0: void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) { michael@0: loadPtr(Address(obj, JSObject::getPrivateDataOffset(nfixed)), dest); michael@0: } michael@0: michael@0: void loadObjProto(Register obj, Register dest) { michael@0: loadPtr(Address(obj, JSObject::offsetOfType()), dest); michael@0: loadPtr(Address(dest, types::TypeObject::offsetOfProto()), dest); michael@0: } michael@0: michael@0: void loadStringLength(Register str, Register dest) { michael@0: loadPtr(Address(str, JSString::offsetOfLengthAndFlags()), dest); michael@0: rshiftPtr(Imm32(JSString::LENGTH_SHIFT), dest); michael@0: } michael@0: michael@0: void loadSliceBounds(Register worker, Register dest) { michael@0: loadPtr(Address(worker, ThreadPoolWorker::offsetOfSliceBounds()), dest); michael@0: } michael@0: michael@0: void loadJSContext(Register dest) { michael@0: loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfJSContext()), dest); michael@0: } michael@0: void loadJitActivation(Register dest) { michael@0: loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfActivation()), dest); michael@0: } michael@0: michael@0: template michael@0: void loadTypedOrValue(const T &src, TypedOrValueRegister dest) { michael@0: if (dest.hasValue()) michael@0: loadValue(src, dest.valueReg()); michael@0: else michael@0: loadUnboxedValue(src, dest.type(), dest.typedReg()); michael@0: } michael@0: michael@0: template michael@0: void loadElementTypedOrValue(const T &src, TypedOrValueRegister dest, bool holeCheck, michael@0: Label *hole) { michael@0: if (dest.hasValue()) { michael@0: loadValue(src, dest.valueReg()); michael@0: if (holeCheck) michael@0: branchTestMagic(Assembler::Equal, dest.valueReg(), hole); michael@0: } else { michael@0: if (holeCheck) michael@0: branchTestMagic(Assembler::Equal, src, hole); michael@0: loadUnboxedValue(src, dest.type(), dest.typedReg()); michael@0: } michael@0: } michael@0: michael@0: template michael@0: void storeTypedOrValue(TypedOrValueRegister src, const T &dest) { michael@0: if (src.hasValue()) { michael@0: storeValue(src.valueReg(), dest); michael@0: } else if (IsFloatingPointType(src.type())) { michael@0: FloatRegister reg = src.typedReg().fpu(); michael@0: if (src.type() == MIRType_Float32) { michael@0: convertFloat32ToDouble(reg, ScratchFloatReg); michael@0: reg = ScratchFloatReg; michael@0: } michael@0: storeDouble(reg, dest); michael@0: } else { michael@0: storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest); michael@0: } michael@0: } michael@0: michael@0: template michael@0: void storeConstantOrRegister(ConstantOrRegister src, const T &dest) { michael@0: if (src.constant()) michael@0: storeValue(src.value(), dest); michael@0: else michael@0: storeTypedOrValue(src.reg(), dest); michael@0: } michael@0: michael@0: void storeCallResult(Register reg) { michael@0: if (reg != ReturnReg) michael@0: mov(ReturnReg, reg); michael@0: } michael@0: michael@0: void storeCallFloatResult(const FloatRegister ®) { michael@0: if (reg != ReturnFloatReg) michael@0: moveDouble(ReturnFloatReg, reg); michael@0: } michael@0: michael@0: void storeCallResultValue(AnyRegister dest) { michael@0: #if defined(JS_NUNBOX32) michael@0: unboxValue(ValueOperand(JSReturnReg_Type, JSReturnReg_Data), dest); michael@0: #elif defined(JS_PUNBOX64) michael@0: unboxValue(ValueOperand(JSReturnReg), dest); michael@0: #else michael@0: #error "Bad architecture" michael@0: #endif michael@0: } michael@0: michael@0: void storeCallResultValue(ValueOperand dest) { michael@0: #if defined(JS_NUNBOX32) michael@0: // reshuffle the return registers used for a call result to store into michael@0: // dest, using ReturnReg as a scratch register if necessary. This must michael@0: // only be called after returning from a call, at a point when the michael@0: // return register is not live. XXX would be better to allow wrappers michael@0: // to store the return value to different places. michael@0: if (dest.typeReg() == JSReturnReg_Data) { michael@0: if (dest.payloadReg() == JSReturnReg_Type) { michael@0: // swap the two registers. michael@0: mov(JSReturnReg_Type, ReturnReg); michael@0: mov(JSReturnReg_Data, JSReturnReg_Type); michael@0: mov(ReturnReg, JSReturnReg_Data); michael@0: } else { michael@0: mov(JSReturnReg_Data, dest.payloadReg()); michael@0: mov(JSReturnReg_Type, dest.typeReg()); michael@0: } michael@0: } else { michael@0: mov(JSReturnReg_Type, dest.typeReg()); michael@0: mov(JSReturnReg_Data, dest.payloadReg()); michael@0: } michael@0: #elif defined(JS_PUNBOX64) michael@0: if (dest.valueReg() != JSReturnReg) michael@0: movq(JSReturnReg, dest.valueReg()); michael@0: #else michael@0: #error "Bad architecture" michael@0: #endif michael@0: } michael@0: michael@0: void storeCallResultValue(TypedOrValueRegister dest) { michael@0: if (dest.hasValue()) michael@0: storeCallResultValue(dest.valueReg()); michael@0: else michael@0: storeCallResultValue(dest.typedReg()); michael@0: } michael@0: michael@0: template michael@0: Register extractString(const T &source, Register scratch) { michael@0: return extractObject(source, scratch); michael@0: } michael@0: michael@0: void PushRegsInMask(RegisterSet set); michael@0: void PushRegsInMask(GeneralRegisterSet set) { michael@0: PushRegsInMask(RegisterSet(set, FloatRegisterSet())); michael@0: } michael@0: void PopRegsInMask(RegisterSet set) { michael@0: PopRegsInMaskIgnore(set, RegisterSet()); michael@0: } michael@0: void PopRegsInMask(GeneralRegisterSet set) { michael@0: PopRegsInMask(RegisterSet(set, FloatRegisterSet())); michael@0: } michael@0: void PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore); michael@0: michael@0: void branchIfFunctionHasNoScript(Register fun, Label *label) { michael@0: // 16-bit loads are slow and unaligned 32-bit loads may be too so michael@0: // perform an aligned 32-bit load and adjust the bitmask accordingly. michael@0: JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0); michael@0: JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2); michael@0: JS_STATIC_ASSERT(IS_LITTLE_ENDIAN); michael@0: Address address(fun, JSFunction::offsetOfNargs()); michael@0: uint32_t bit = JSFunction::INTERPRETED << 16; michael@0: branchTest32(Assembler::Zero, address, Imm32(bit), label); michael@0: } michael@0: void branchIfInterpreted(Register fun, Label *label) { michael@0: // 16-bit loads are slow and unaligned 32-bit loads may be too so michael@0: // perform an aligned 32-bit load and adjust the bitmask accordingly. michael@0: JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0); michael@0: JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2); michael@0: JS_STATIC_ASSERT(IS_LITTLE_ENDIAN); michael@0: Address address(fun, JSFunction::offsetOfNargs()); michael@0: uint32_t bit = JSFunction::INTERPRETED << 16; michael@0: branchTest32(Assembler::NonZero, address, Imm32(bit), label); michael@0: } michael@0: michael@0: void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label); michael@0: michael@0: using MacroAssemblerSpecific::Push; michael@0: using MacroAssemblerSpecific::Pop; michael@0: michael@0: void Push(jsid id, Register scratchReg) { michael@0: if (JSID_IS_GCTHING(id)) { michael@0: // If we're pushing a gcthing, then we can't just push the tagged jsid michael@0: // value since the GC won't have any idea that the push instruction michael@0: // carries a reference to a gcthing. Need to unpack the pointer, michael@0: // push it using ImmGCPtr, and then rematerialize the id at runtime. michael@0: michael@0: // double-checking this here to ensure we don't lose sync michael@0: // with implementation of JSID_IS_GCTHING. michael@0: if (JSID_IS_OBJECT(id)) { michael@0: JSObject *obj = JSID_TO_OBJECT(id); michael@0: movePtr(ImmGCPtr(obj), scratchReg); michael@0: JS_ASSERT(((size_t)obj & JSID_TYPE_MASK) == 0); michael@0: orPtr(Imm32(JSID_TYPE_OBJECT), scratchReg); michael@0: Push(scratchReg); michael@0: } else { michael@0: JSString *str = JSID_TO_STRING(id); michael@0: JS_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0); michael@0: JS_ASSERT(JSID_TYPE_STRING == 0x0); michael@0: Push(ImmGCPtr(str)); michael@0: } michael@0: } else { michael@0: Push(ImmWord(JSID_BITS(id))); michael@0: } michael@0: } michael@0: michael@0: void Push(TypedOrValueRegister v) { michael@0: if (v.hasValue()) { michael@0: Push(v.valueReg()); michael@0: } else if (IsFloatingPointType(v.type())) { michael@0: FloatRegister reg = v.typedReg().fpu(); michael@0: if (v.type() == MIRType_Float32) { michael@0: convertFloat32ToDouble(reg, ScratchFloatReg); michael@0: reg = ScratchFloatReg; michael@0: } michael@0: Push(reg); michael@0: } else { michael@0: Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr()); michael@0: } michael@0: } michael@0: michael@0: void Push(ConstantOrRegister v) { michael@0: if (v.constant()) michael@0: Push(v.value()); michael@0: else michael@0: Push(v.reg()); michael@0: } michael@0: michael@0: void Push(const ValueOperand &val) { michael@0: pushValue(val); michael@0: framePushed_ += sizeof(Value); michael@0: } michael@0: michael@0: void Push(const Value &val) { michael@0: pushValue(val); michael@0: framePushed_ += sizeof(Value); michael@0: } michael@0: michael@0: void Push(JSValueType type, Register reg) { michael@0: pushValue(type, reg); michael@0: framePushed_ += sizeof(Value); michael@0: } michael@0: michael@0: void PushValue(const Address &addr) { michael@0: JS_ASSERT(addr.base != StackPointer); michael@0: pushValue(addr); michael@0: framePushed_ += sizeof(Value); michael@0: } michael@0: michael@0: void PushEmptyRooted(VMFunction::RootType rootType); michael@0: void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand &valueReg); michael@0: michael@0: void adjustStack(int amount) { michael@0: if (amount > 0) michael@0: freeStack(amount); michael@0: else if (amount < 0) michael@0: reserveStack(-amount); michael@0: } michael@0: michael@0: void bumpKey(Int32Key *key, int diff) { michael@0: if (key->isRegister()) michael@0: add32(Imm32(diff), key->reg()); michael@0: else michael@0: key->bumpConstant(diff); michael@0: } michael@0: michael@0: void storeKey(const Int32Key &key, const Address &dest) { michael@0: if (key.isRegister()) michael@0: store32(key.reg(), dest); michael@0: else michael@0: store32(Imm32(key.constant()), dest); michael@0: } michael@0: michael@0: template michael@0: void branchKey(Condition cond, const T &length, const Int32Key &key, Label *label) { michael@0: if (key.isRegister()) michael@0: branch32(cond, length, key.reg(), label); michael@0: else michael@0: branch32(cond, length, Imm32(key.constant()), label); michael@0: } michael@0: michael@0: void branchTestNeedsBarrier(Condition cond, Register scratch, Label *label) { michael@0: JS_ASSERT(cond == Zero || cond == NonZero); michael@0: CompileZone *zone = GetIonContext()->compartment->zone(); michael@0: movePtr(ImmPtr(zone->addressOfNeedsBarrier()), scratch); michael@0: Address needsBarrierAddr(scratch, 0); michael@0: branchTest32(cond, needsBarrierAddr, Imm32(0x1), label); michael@0: } michael@0: michael@0: template michael@0: void callPreBarrier(const T &address, MIRType type) { michael@0: JS_ASSERT(type == MIRType_Value || michael@0: type == MIRType_String || michael@0: type == MIRType_Object || michael@0: type == MIRType_Shape); michael@0: Label done; michael@0: michael@0: if (type == MIRType_Value) michael@0: branchTestGCThing(Assembler::NotEqual, address, &done); michael@0: michael@0: Push(PreBarrierReg); michael@0: computeEffectiveAddress(address, PreBarrierReg); michael@0: michael@0: const JitRuntime *rt = GetIonContext()->runtime->jitRuntime(); michael@0: JitCode *preBarrier = (type == MIRType_Shape) michael@0: ? rt->shapePreBarrier() michael@0: : rt->valuePreBarrier(); michael@0: michael@0: call(preBarrier); michael@0: Pop(PreBarrierReg); michael@0: michael@0: bind(&done); michael@0: } michael@0: michael@0: template michael@0: void patchableCallPreBarrier(const T &address, MIRType type) { michael@0: JS_ASSERT(type == MIRType_Value || michael@0: type == MIRType_String || michael@0: type == MIRType_Object || michael@0: type == MIRType_Shape); michael@0: michael@0: Label done; michael@0: michael@0: // All barriers are off by default. michael@0: // They are enabled if necessary at the end of CodeGenerator::generate(). michael@0: CodeOffsetLabel nopJump = toggledJump(&done); michael@0: writePrebarrierOffset(nopJump); michael@0: michael@0: callPreBarrier(address, type); michael@0: jump(&done); michael@0: michael@0: align(8); michael@0: bind(&done); michael@0: } michael@0: michael@0: void branchNurseryPtr(Condition cond, const Address &ptr1, const ImmMaybeNurseryPtr &ptr2, michael@0: Label *label); michael@0: void moveNurseryPtr(const ImmMaybeNurseryPtr &ptr, Register reg); michael@0: michael@0: void canonicalizeDouble(FloatRegister reg) { michael@0: Label notNaN; michael@0: branchDouble(DoubleOrdered, reg, reg, ¬NaN); michael@0: loadConstantDouble(JS::GenericNaN(), reg); michael@0: bind(¬NaN); michael@0: } michael@0: michael@0: void canonicalizeFloat(FloatRegister reg) { michael@0: Label notNaN; michael@0: branchFloat(DoubleOrdered, reg, reg, ¬NaN); michael@0: loadConstantFloat32(float(JS::GenericNaN()), reg); michael@0: bind(¬NaN); michael@0: } michael@0: michael@0: template michael@0: void loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp, Label *fail); michael@0: michael@0: template michael@0: void loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest, bool allowDouble, michael@0: Register temp, Label *fail); michael@0: michael@0: template michael@0: void storeToTypedIntArray(int arrayType, const S &value, const T &dest) { michael@0: switch (arrayType) { michael@0: case ScalarTypeDescr::TYPE_INT8: michael@0: case ScalarTypeDescr::TYPE_UINT8: michael@0: case ScalarTypeDescr::TYPE_UINT8_CLAMPED: michael@0: store8(value, dest); michael@0: break; michael@0: case ScalarTypeDescr::TYPE_INT16: michael@0: case ScalarTypeDescr::TYPE_UINT16: michael@0: store16(value, dest); michael@0: break; michael@0: case ScalarTypeDescr::TYPE_INT32: michael@0: case ScalarTypeDescr::TYPE_UINT32: michael@0: store32(value, dest); michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Invalid typed array type"); michael@0: } michael@0: } michael@0: michael@0: void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const BaseIndex &dest); michael@0: void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const Address &dest); michael@0: michael@0: Register extractString(const Address &address, Register scratch) { michael@0: return extractObject(address, scratch); michael@0: } michael@0: Register extractString(const ValueOperand &value, Register scratch) { michael@0: return extractObject(value, scratch); michael@0: } michael@0: michael@0: using MacroAssemblerSpecific::extractTag; michael@0: Register extractTag(const TypedOrValueRegister ®, Register scratch) { michael@0: if (reg.hasValue()) michael@0: return extractTag(reg.valueReg(), scratch); michael@0: mov(ImmWord(MIRTypeToTag(reg.type())), scratch); michael@0: return scratch; michael@0: } michael@0: michael@0: using MacroAssemblerSpecific::extractObject; michael@0: Register extractObject(const TypedOrValueRegister ®, Register scratch) { michael@0: if (reg.hasValue()) michael@0: return extractObject(reg.valueReg(), scratch); michael@0: JS_ASSERT(reg.type() == MIRType_Object); michael@0: return reg.typedReg().gpr(); michael@0: } michael@0: michael@0: // Inline version of js_TypedArray_uint8_clamp_double. michael@0: // This function clobbers the input register. michael@0: void clampDoubleToUint8(FloatRegister input, Register output); michael@0: michael@0: using MacroAssemblerSpecific::ensureDouble; michael@0: michael@0: template michael@0: void ensureDouble(const S &source, FloatRegister dest, Label *failure) { michael@0: Label isDouble, done; michael@0: branchTestDouble(Assembler::Equal, source, &isDouble); michael@0: branchTestInt32(Assembler::NotEqual, source, failure); michael@0: michael@0: convertInt32ToDouble(source, dest); michael@0: jump(&done); michael@0: michael@0: bind(&isDouble); michael@0: unboxDouble(source, dest); michael@0: michael@0: bind(&done); michael@0: } michael@0: michael@0: // Emit type case branch on tag matching if the type tag in the definition michael@0: // might actually be that type. michael@0: void branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag, Label *label); michael@0: michael@0: // Inline allocation. michael@0: void newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail, michael@0: gc::InitialHeap initialHeap = gc::DefaultHeap); michael@0: void newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail, michael@0: gc::InitialHeap initialHeap); michael@0: void newGCString(Register result, Register temp, Label *fail); michael@0: void newGCFatInlineString(Register result, Register temp, Label *fail); michael@0: michael@0: void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2, michael@0: gc::AllocKind allocKind, Label *fail); michael@0: void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2, michael@0: JSObject *templateObject, Label *fail); michael@0: void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2, michael@0: Label *fail); michael@0: void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2, michael@0: Label *fail); michael@0: michael@0: void copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj, michael@0: uint32_t start, uint32_t end); michael@0: void fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj, michael@0: uint32_t start, uint32_t end); michael@0: void initGCSlots(Register obj, Register temp, JSObject *templateObj); michael@0: void initGCThing(Register obj, Register temp, JSObject *templateObj); michael@0: michael@0: // Compares two strings for equality based on the JSOP. michael@0: // This checks for identical pointers, atoms and length and fails for everything else. michael@0: void compareStrings(JSOp op, Register left, Register right, Register result, michael@0: Register temp, Label *fail); michael@0: michael@0: // Checks the flags that signal that parallel code may need to interrupt or michael@0: // abort. Branches to fail in that case. michael@0: void checkInterruptFlagPar(Register tempReg, Label *fail); michael@0: michael@0: // If the JitCode that created this assembler needs to transition into the VM, michael@0: // we want to store the JitCode on the stack in order to mark it during a GC. michael@0: // This is a reference to a patch location where the JitCode* will be written. michael@0: private: michael@0: CodeOffsetLabel exitCodePatch_; michael@0: michael@0: public: michael@0: void enterExitFrame(const VMFunction *f = nullptr) { michael@0: linkExitFrame(); michael@0: // Push the ioncode. (Bailout or VM wrapper) michael@0: exitCodePatch_ = PushWithPatch(ImmWord(-1)); michael@0: // Push VMFunction pointer, to mark arguments. michael@0: Push(ImmPtr(f)); michael@0: } michael@0: void enterFakeExitFrame(JitCode *codeVal = nullptr) { michael@0: linkExitFrame(); michael@0: Push(ImmPtr(codeVal)); michael@0: Push(ImmPtr(nullptr)); michael@0: } michael@0: michael@0: void loadThreadPool(Register pool) { michael@0: // JitRuntimes are tied to JSRuntimes and there is one ThreadPool per michael@0: // JSRuntime, so we can hardcode the ThreadPool address here. michael@0: movePtr(ImmPtr(GetIonContext()->runtime->addressOfThreadPool()), pool); michael@0: } michael@0: michael@0: void loadForkJoinContext(Register cx, Register scratch); michael@0: void loadContext(Register cxReg, Register scratch, ExecutionMode executionMode); michael@0: michael@0: void enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx, michael@0: Register scratch); michael@0: michael@0: void enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch, michael@0: ExecutionMode executionMode); michael@0: michael@0: void enterFakeParallelExitFrame(Register cx, Register scratch, michael@0: JitCode *codeVal = nullptr); michael@0: michael@0: void enterFakeExitFrame(Register cxReg, Register scratch, michael@0: ExecutionMode executionMode, michael@0: JitCode *codeVal = nullptr); michael@0: michael@0: void leaveExitFrame() { michael@0: freeStack(IonExitFooterFrame::Size()); michael@0: } michael@0: michael@0: bool hasEnteredExitFrame() const { michael@0: return exitCodePatch_.offset() != 0; michael@0: } michael@0: michael@0: void link(JitCode *code) { michael@0: JS_ASSERT(!oom()); michael@0: // If this code can transition to C++ code and witness a GC, then we need to store michael@0: // the JitCode onto the stack in order to GC it correctly. exitCodePatch should michael@0: // be unset if the code never needed to push its JitCode*. michael@0: if (hasEnteredExitFrame()) { michael@0: exitCodePatch_.fixup(this); michael@0: patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_), michael@0: ImmPtr(code), michael@0: ImmPtr((void*)-1)); michael@0: } michael@0: michael@0: } michael@0: michael@0: // Generates code used to complete a bailout. michael@0: void generateBailoutTail(Register scratch, Register bailoutInfo); michael@0: michael@0: // These functions exist as small wrappers around sites where execution can michael@0: // leave the currently running stream of instructions. They exist so that michael@0: // instrumentation may be put in place around them if necessary and the michael@0: // instrumentation is enabled. For the functions that return a uint32_t, michael@0: // they are returning the offset of the assembler just after the call has michael@0: // been made so that a safepoint can be made at that location. michael@0: michael@0: template michael@0: void callWithABINoProfiling(const T &fun, MoveOp::Type result = MoveOp::GENERAL) { michael@0: MacroAssemblerSpecific::callWithABI(fun, result); michael@0: } michael@0: michael@0: template michael@0: void callWithABI(const T &fun, MoveOp::Type result = MoveOp::GENERAL) { michael@0: leaveSPSFrame(); michael@0: callWithABINoProfiling(fun, result); michael@0: reenterSPSFrame(); michael@0: } michael@0: michael@0: // see above comment for what is returned michael@0: uint32_t callIon(Register callee) { michael@0: leaveSPSFrame(); michael@0: MacroAssemblerSpecific::callIon(callee); michael@0: uint32_t ret = currentOffset(); michael@0: reenterSPSFrame(); michael@0: return ret; michael@0: } michael@0: michael@0: // see above comment for what is returned michael@0: uint32_t callWithExitFrame(JitCode *target) { michael@0: leaveSPSFrame(); michael@0: MacroAssemblerSpecific::callWithExitFrame(target); michael@0: uint32_t ret = currentOffset(); michael@0: reenterSPSFrame(); michael@0: return ret; michael@0: } michael@0: michael@0: // see above comment for what is returned michael@0: uint32_t callWithExitFrame(JitCode *target, Register dynStack) { michael@0: leaveSPSFrame(); michael@0: MacroAssemblerSpecific::callWithExitFrame(target, dynStack); michael@0: uint32_t ret = currentOffset(); michael@0: reenterSPSFrame(); michael@0: return ret; michael@0: } michael@0: michael@0: void branchTestObjectTruthy(bool truthy, Register objReg, Register scratch, michael@0: Label *slowCheck, Label *checked) michael@0: { michael@0: // The branches to out-of-line code here implement a conservative version michael@0: // of the JSObject::isWrapper test performed in EmulatesUndefined. If none michael@0: // of the branches are taken, we can check class flags directly. michael@0: loadObjClass(objReg, scratch); michael@0: Address flags(scratch, Class::offsetOfFlags()); michael@0: michael@0: branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_IS_PROXY), slowCheck); michael@0: michael@0: Condition cond = truthy ? Assembler::Zero : Assembler::NonZero; michael@0: branchTest32(cond, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), checked); michael@0: } michael@0: michael@0: private: michael@0: // These two functions are helpers used around call sites throughout the michael@0: // assembler. They are called from the above call wrappers to emit the michael@0: // necessary instrumentation. michael@0: void leaveSPSFrame() { michael@0: if (!sps_ || !sps_->enabled()) michael@0: return; michael@0: // No registers are guaranteed to be available, so push/pop a register michael@0: // so we can use one michael@0: push(CallTempReg0); michael@0: sps_->leave(*this, CallTempReg0); michael@0: pop(CallTempReg0); michael@0: } michael@0: michael@0: void reenterSPSFrame() { michael@0: if (!sps_ || !sps_->enabled()) michael@0: return; michael@0: // Attempt to use a now-free register within a given set, but if the michael@0: // architecture being built doesn't have an available register, resort michael@0: // to push/pop michael@0: GeneralRegisterSet regs(Registers::TempMask & ~Registers::JSCallMask & michael@0: ~Registers::CallMask); michael@0: if (regs.empty()) { michael@0: push(CallTempReg0); michael@0: sps_->reenter(*this, CallTempReg0); michael@0: pop(CallTempReg0); michael@0: } else { michael@0: sps_->reenter(*this, regs.getAny()); michael@0: } michael@0: } michael@0: michael@0: void spsProfileEntryAddress(SPSProfiler *p, int offset, Register temp, michael@0: Label *full) michael@0: { michael@0: movePtr(ImmPtr(p->sizePointer()), temp); michael@0: load32(Address(temp, 0), temp); michael@0: if (offset != 0) michael@0: add32(Imm32(offset), temp); michael@0: branch32(Assembler::GreaterThanOrEqual, temp, Imm32(p->maxSize()), full); michael@0: michael@0: // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*))) michael@0: JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*)); michael@0: lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp); michael@0: addPtr(ImmPtr(p->stack()), temp); michael@0: } michael@0: michael@0: // The safe version of the above method refrains from assuming that the fields michael@0: // of the SPSProfiler class are going to stay the same across different runs of michael@0: // the jitcode. Ion can use the more efficient unsafe version because ion jitcode michael@0: // will not survive changes to to the profiler settings. Baseline jitcode, however, michael@0: // can span these changes, so any hardcoded field values will be incorrect afterwards. michael@0: // All the sps-related methods used by baseline call |spsProfileEntryAddressSafe|. michael@0: void spsProfileEntryAddressSafe(SPSProfiler *p, int offset, Register temp, michael@0: Label *full) michael@0: { michael@0: // Load size pointer michael@0: loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp); michael@0: michael@0: // Load size michael@0: load32(Address(temp, 0), temp); michael@0: if (offset != 0) michael@0: add32(Imm32(offset), temp); michael@0: michael@0: // Test against max size. michael@0: branch32(Assembler::LessThanOrEqual, AbsoluteAddress(p->addressOfMaxSize()), temp, full); michael@0: michael@0: // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*))) michael@0: JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*)); michael@0: lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp); michael@0: push(temp); michael@0: loadPtr(AbsoluteAddress(p->addressOfStack()), temp); michael@0: addPtr(Address(StackPointer, 0), temp); michael@0: addPtr(Imm32(sizeof(size_t)), StackPointer); michael@0: } michael@0: michael@0: public: michael@0: // These functions are needed by the IonInstrumentation interface defined in michael@0: // vm/SPSProfiler.h. They will modify the pseudostack provided to SPS to michael@0: // perform the actual instrumentation. michael@0: michael@0: void spsUpdatePCIdx(SPSProfiler *p, int32_t idx, Register temp) { michael@0: Label stackFull; michael@0: spsProfileEntryAddress(p, -1, temp, &stackFull); michael@0: store32(Imm32(idx), Address(temp, ProfileEntry::offsetOfPCIdx())); michael@0: bind(&stackFull); michael@0: } michael@0: michael@0: void spsUpdatePCIdx(SPSProfiler *p, Register idx, Register temp) { michael@0: Label stackFull; michael@0: spsProfileEntryAddressSafe(p, -1, temp, &stackFull); michael@0: store32(idx, Address(temp, ProfileEntry::offsetOfPCIdx())); michael@0: bind(&stackFull); michael@0: } michael@0: michael@0: // spsPushFrame variant for Ion-optimized scripts. michael@0: void spsPushFrame(SPSProfiler *p, const char *str, JSScript *s, Register temp) { michael@0: Label stackFull; michael@0: spsProfileEntryAddress(p, 0, temp, &stackFull); michael@0: michael@0: storePtr(ImmPtr(str), Address(temp, ProfileEntry::offsetOfString())); michael@0: storePtr(ImmGCPtr(s), Address(temp, ProfileEntry::offsetOfScript())); michael@0: storePtr(ImmPtr((void*) ProfileEntry::SCRIPT_OPT_STACKPOINTER), michael@0: Address(temp, ProfileEntry::offsetOfStackAddress())); michael@0: store32(Imm32(ProfileEntry::NullPCIndex), Address(temp, ProfileEntry::offsetOfPCIdx())); michael@0: michael@0: /* Always increment the stack size, whether or not we actually pushed. */ michael@0: bind(&stackFull); michael@0: movePtr(ImmPtr(p->sizePointer()), temp); michael@0: add32(Imm32(1), Address(temp, 0)); michael@0: } michael@0: michael@0: // spsPushFrame variant for Baseline-optimized scripts. michael@0: void spsPushFrame(SPSProfiler *p, const Address &str, const Address &script, michael@0: Register temp, Register temp2) michael@0: { michael@0: Label stackFull; michael@0: spsProfileEntryAddressSafe(p, 0, temp, &stackFull); michael@0: michael@0: loadPtr(str, temp2); michael@0: storePtr(temp2, Address(temp, ProfileEntry::offsetOfString())); michael@0: michael@0: loadPtr(script, temp2); michael@0: storePtr(temp2, Address(temp, ProfileEntry::offsetOfScript())); michael@0: michael@0: storePtr(ImmPtr(nullptr), Address(temp, ProfileEntry::offsetOfStackAddress())); michael@0: michael@0: // Store 0 for PCIdx because that's what interpreter does. michael@0: // (See probes::EnterScript, which calls spsProfiler.enter, which pushes an entry michael@0: // with 0 pcIdx). michael@0: store32(Imm32(0), Address(temp, ProfileEntry::offsetOfPCIdx())); michael@0: michael@0: /* Always increment the stack size, whether or not we actually pushed. */ michael@0: bind(&stackFull); michael@0: movePtr(ImmPtr(p->addressOfSizePointer()), temp); michael@0: loadPtr(Address(temp, 0), temp); michael@0: add32(Imm32(1), Address(temp, 0)); michael@0: } michael@0: michael@0: void spsPopFrame(SPSProfiler *p, Register temp) { michael@0: movePtr(ImmPtr(p->sizePointer()), temp); michael@0: add32(Imm32(-1), Address(temp, 0)); michael@0: } michael@0: michael@0: // spsPropFrameSafe does not assume |profiler->sizePointer()| will stay constant. michael@0: void spsPopFrameSafe(SPSProfiler *p, Register temp) { michael@0: loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp); michael@0: add32(Imm32(-1), Address(temp, 0)); michael@0: } michael@0: michael@0: static const char enterJitLabel[]; michael@0: void spsMarkJit(SPSProfiler *p, Register framePtr, Register temp); michael@0: void spsUnmarkJit(SPSProfiler *p, Register temp); michael@0: michael@0: void loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode, Label *failure); michael@0: void loadBaselineOrIonNoArgCheck(Register callee, Register dest, ExecutionMode mode, Label *failure); michael@0: michael@0: void loadBaselineFramePtr(Register framePtr, Register dest); michael@0: michael@0: void pushBaselineFramePtr(Register framePtr, Register scratch) { michael@0: loadBaselineFramePtr(framePtr, scratch); michael@0: push(scratch); michael@0: } michael@0: michael@0: private: michael@0: void handleFailure(ExecutionMode executionMode); michael@0: michael@0: public: michael@0: Label *exceptionLabel() { michael@0: // Exceptions are currently handled the same way as sequential failures. michael@0: return &sequentialFailureLabel_; michael@0: } michael@0: michael@0: Label *failureLabel(ExecutionMode executionMode) { michael@0: switch (executionMode) { michael@0: case SequentialExecution: return &sequentialFailureLabel_; michael@0: case ParallelExecution: return ¶llelFailureLabel_; michael@0: default: MOZ_ASSUME_UNREACHABLE("Unexpected execution mode"); michael@0: } michael@0: } michael@0: michael@0: void finish(); michael@0: michael@0: void assumeUnreachable(const char *output); michael@0: void printf(const char *output); michael@0: void printf(const char *output, Register value); michael@0: michael@0: #ifdef JS_TRACE_LOGGING michael@0: void tracelogStart(Register logger, uint32_t textId); michael@0: void tracelogStart(Register logger, Register textId); michael@0: void tracelogStop(Register logger, uint32_t textId); michael@0: void tracelogStop(Register logger, Register textId); michael@0: void tracelogStop(Register logger); michael@0: #endif michael@0: michael@0: #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \ michael@0: JS_ASSERT(IsFloatingPointType(type)); \ michael@0: if (type == MIRType_Double) \ michael@0: method##Double(arg1d, arg2); \ michael@0: else \ michael@0: method##Float32(arg1f, arg2); \ michael@0: michael@0: void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) { michael@0: DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest); michael@0: } michael@0: void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) { michael@0: DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest); michael@0: } michael@0: void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) { michael@0: DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest); michael@0: } michael@0: void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) { michael@0: DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest); michael@0: } michael@0: michael@0: #undef DISPATCH_FLOATING_POINT_OP michael@0: michael@0: void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label *fail, michael@0: MIRType outputType); michael@0: bool convertValueToFloatingPoint(JSContext *cx, const Value &v, FloatRegister output, michael@0: Label *fail, MIRType outputType); michael@0: bool convertConstantOrRegisterToFloatingPoint(JSContext *cx, ConstantOrRegister src, michael@0: FloatRegister output, Label *fail, michael@0: MIRType outputType); michael@0: void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output, michael@0: Label *fail, MIRType outputType); michael@0: michael@0: void convertInt32ValueToDouble(const Address &address, Register scratch, Label *done); michael@0: void convertValueToDouble(ValueOperand value, FloatRegister output, Label *fail) { michael@0: convertValueToFloatingPoint(value, output, fail, MIRType_Double); michael@0: } michael@0: bool convertValueToDouble(JSContext *cx, const Value &v, FloatRegister output, Label *fail) { michael@0: return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Double); michael@0: } michael@0: bool convertConstantOrRegisterToDouble(JSContext *cx, ConstantOrRegister src, michael@0: FloatRegister output, Label *fail) michael@0: { michael@0: return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Double); michael@0: } michael@0: void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label *fail) { michael@0: convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Double); michael@0: } michael@0: michael@0: void convertValueToFloat(ValueOperand value, FloatRegister output, Label *fail) { michael@0: convertValueToFloatingPoint(value, output, fail, MIRType_Float32); michael@0: } michael@0: bool convertValueToFloat(JSContext *cx, const Value &v, FloatRegister output, Label *fail) { michael@0: return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Float32); michael@0: } michael@0: bool convertConstantOrRegisterToFloat(JSContext *cx, ConstantOrRegister src, michael@0: FloatRegister output, Label *fail) michael@0: { michael@0: return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Float32); michael@0: } michael@0: void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label *fail) { michael@0: convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Float32); michael@0: } michael@0: michael@0: enum IntConversionBehavior { michael@0: IntConversion_Normal, michael@0: IntConversion_NegativeZeroCheck, michael@0: IntConversion_Truncate, michael@0: IntConversion_ClampToUint8, michael@0: }; michael@0: michael@0: enum IntConversionInputKind { michael@0: IntConversion_NumbersOnly, michael@0: IntConversion_NumbersOrBoolsOnly, michael@0: IntConversion_Any michael@0: }; michael@0: michael@0: // michael@0: // Functions for converting values to int. michael@0: // michael@0: void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp, michael@0: Label *truncateFail, Label *fail, IntConversionBehavior behavior); michael@0: michael@0: // Strings may be handled by providing labels to jump to when the behavior michael@0: // is truncation or clamping. The subroutine, usually an OOL call, is michael@0: // passed the unboxed string in |stringReg| and should convert it to a michael@0: // double store into |temp|. michael@0: void convertValueToInt(ValueOperand value, MDefinition *input, michael@0: Label *handleStringEntry, Label *handleStringRejoin, michael@0: Label *truncateDoubleSlow, michael@0: Register stringReg, FloatRegister temp, Register output, michael@0: Label *fail, IntConversionBehavior behavior, michael@0: IntConversionInputKind conversion = IntConversion_Any); michael@0: void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label *fail, michael@0: IntConversionBehavior behavior) michael@0: { michael@0: convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output, michael@0: fail, behavior); michael@0: } michael@0: bool convertValueToInt(JSContext *cx, const Value &v, Register output, Label *fail, michael@0: IntConversionBehavior behavior); michael@0: bool convertConstantOrRegisterToInt(JSContext *cx, ConstantOrRegister src, FloatRegister temp, michael@0: Register output, Label *fail, IntConversionBehavior behavior); michael@0: void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output, michael@0: Label *fail, IntConversionBehavior behavior); michael@0: michael@0: // michael@0: // Convenience functions for converting values to int32. michael@0: // michael@0: void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail, michael@0: bool negativeZeroCheck) michael@0: { michael@0: convertValueToInt(value, temp, output, fail, negativeZeroCheck michael@0: ? IntConversion_NegativeZeroCheck michael@0: : IntConversion_Normal); michael@0: } michael@0: void convertValueToInt32(ValueOperand value, MDefinition *input, michael@0: FloatRegister temp, Register output, Label *fail, michael@0: bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any) michael@0: { michael@0: convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail, michael@0: negativeZeroCheck michael@0: ? IntConversion_NegativeZeroCheck michael@0: : IntConversion_Normal, michael@0: conversion); michael@0: } michael@0: bool convertValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail, michael@0: bool negativeZeroCheck) michael@0: { michael@0: return convertValueToInt(cx, v, output, fail, negativeZeroCheck michael@0: ? IntConversion_NegativeZeroCheck michael@0: : IntConversion_Normal); michael@0: } michael@0: bool convertConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp, michael@0: Register output, Label *fail, bool negativeZeroCheck) michael@0: { michael@0: return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck michael@0: ? IntConversion_NegativeZeroCheck michael@0: : IntConversion_Normal); michael@0: } michael@0: void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output, michael@0: Label *fail, bool negativeZeroCheck) michael@0: { michael@0: convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck michael@0: ? IntConversion_NegativeZeroCheck michael@0: : IntConversion_Normal); michael@0: } michael@0: michael@0: // michael@0: // Convenience functions for truncating values to int32. michael@0: // michael@0: void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail) { michael@0: convertValueToInt(value, temp, output, fail, IntConversion_Truncate); michael@0: } michael@0: void truncateValueToInt32(ValueOperand value, MDefinition *input, michael@0: Label *handleStringEntry, Label *handleStringRejoin, michael@0: Label *truncateDoubleSlow, michael@0: Register stringReg, FloatRegister temp, Register output, Label *fail) michael@0: { michael@0: convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow, michael@0: stringReg, temp, output, fail, IntConversion_Truncate); michael@0: } michael@0: void truncateValueToInt32(ValueOperand value, MDefinition *input, michael@0: FloatRegister temp, Register output, Label *fail) michael@0: { michael@0: convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail, michael@0: IntConversion_Truncate); michael@0: } michael@0: bool truncateValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail) { michael@0: return convertValueToInt(cx, v, output, fail, IntConversion_Truncate); michael@0: } michael@0: bool truncateConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp, michael@0: Register output, Label *fail) michael@0: { michael@0: return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate); michael@0: } michael@0: void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output, michael@0: Label *fail) michael@0: { michael@0: convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate); michael@0: } michael@0: michael@0: // Convenience functions for clamping values to uint8. michael@0: void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label *fail) { michael@0: convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8); michael@0: } michael@0: void clampValueToUint8(ValueOperand value, MDefinition *input, michael@0: Label *handleStringEntry, Label *handleStringRejoin, michael@0: Register stringReg, FloatRegister temp, Register output, Label *fail) michael@0: { michael@0: convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr, michael@0: stringReg, temp, output, fail, IntConversion_ClampToUint8); michael@0: } michael@0: void clampValueToUint8(ValueOperand value, MDefinition *input, michael@0: FloatRegister temp, Register output, Label *fail) michael@0: { michael@0: convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail, michael@0: IntConversion_ClampToUint8); michael@0: } michael@0: bool clampValueToUint8(JSContext *cx, const Value &v, Register output, Label *fail) { michael@0: return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8); michael@0: } michael@0: bool clampConstantOrRegisterToUint8(JSContext *cx, ConstantOrRegister src, FloatRegister temp, michael@0: Register output, Label *fail) michael@0: { michael@0: return convertConstantOrRegisterToInt(cx, src, temp, output, fail, michael@0: IntConversion_ClampToUint8); michael@0: } michael@0: void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output, michael@0: Label *fail) michael@0: { michael@0: convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8); michael@0: } michael@0: michael@0: public: michael@0: class AfterICSaveLive { michael@0: friend class MacroAssembler; michael@0: AfterICSaveLive(uint32_t initialStack) michael@0: #ifdef JS_DEBUG michael@0: : initialStack(initialStack) michael@0: #endif michael@0: {} michael@0: michael@0: #ifdef JS_DEBUG michael@0: public: michael@0: uint32_t initialStack; michael@0: #endif michael@0: }; michael@0: michael@0: AfterICSaveLive icSaveLive(RegisterSet &liveRegs) { michael@0: PushRegsInMask(liveRegs); michael@0: return AfterICSaveLive(framePushed()); michael@0: } michael@0: michael@0: bool icBuildOOLFakeExitFrame(void *fakeReturnAddr, AfterICSaveLive &aic) { michael@0: return buildOOLFakeExitFrame(fakeReturnAddr); michael@0: } michael@0: michael@0: void icRestoreLive(RegisterSet &liveRegs, AfterICSaveLive &aic) { michael@0: JS_ASSERT(framePushed() == aic.initialStack); michael@0: PopRegsInMask(liveRegs); michael@0: } michael@0: }; michael@0: michael@0: static inline Assembler::DoubleCondition michael@0: JSOpToDoubleCondition(JSOp op) michael@0: { michael@0: switch (op) { michael@0: case JSOP_EQ: michael@0: case JSOP_STRICTEQ: michael@0: return Assembler::DoubleEqual; michael@0: case JSOP_NE: michael@0: case JSOP_STRICTNE: michael@0: return Assembler::DoubleNotEqualOrUnordered; michael@0: case JSOP_LT: michael@0: return Assembler::DoubleLessThan; michael@0: case JSOP_LE: michael@0: return Assembler::DoubleLessThanOrEqual; michael@0: case JSOP_GT: michael@0: return Assembler::DoubleGreaterThan; michael@0: case JSOP_GE: michael@0: return Assembler::DoubleGreaterThanOrEqual; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected comparison operation"); michael@0: } michael@0: } michael@0: michael@0: // Note: the op may have been inverted during lowering (to put constants in a michael@0: // position where they can be immediates), so it is important to use the michael@0: // lir->jsop() instead of the mir->jsop() when it is present. michael@0: static inline Assembler::Condition michael@0: JSOpToCondition(JSOp op, bool isSigned) michael@0: { michael@0: if (isSigned) { michael@0: switch (op) { michael@0: case JSOP_EQ: michael@0: case JSOP_STRICTEQ: michael@0: return Assembler::Equal; michael@0: case JSOP_NE: michael@0: case JSOP_STRICTNE: michael@0: return Assembler::NotEqual; michael@0: case JSOP_LT: michael@0: return Assembler::LessThan; michael@0: case JSOP_LE: michael@0: return Assembler::LessThanOrEqual; michael@0: case JSOP_GT: michael@0: return Assembler::GreaterThan; michael@0: case JSOP_GE: michael@0: return Assembler::GreaterThanOrEqual; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation"); michael@0: } michael@0: } else { michael@0: switch (op) { michael@0: case JSOP_EQ: michael@0: case JSOP_STRICTEQ: michael@0: return Assembler::Equal; michael@0: case JSOP_NE: michael@0: case JSOP_STRICTNE: michael@0: return Assembler::NotEqual; michael@0: case JSOP_LT: michael@0: return Assembler::Below; michael@0: case JSOP_LE: michael@0: return Assembler::BelowOrEqual; michael@0: case JSOP_GT: michael@0: return Assembler::Above; michael@0: case JSOP_GE: michael@0: return Assembler::AboveOrEqual; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation"); michael@0: } michael@0: } michael@0: } michael@0: michael@0: } // namespace jit michael@0: } // namespace js michael@0: michael@0: #endif // JS_ION michael@0: michael@0: #endif /* jit_IonMacroAssembler_h */