michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef jit_arm_Assembler_arm_h michael@0: #define jit_arm_Assembler_arm_h michael@0: michael@0: #include "mozilla/ArrayUtils.h" michael@0: #include "mozilla/Attributes.h" michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "assembler/assembler/AssemblerBufferWithConstantPool.h" michael@0: #include "jit/arm/Architecture-arm.h" michael@0: #include "jit/CompactBuffer.h" michael@0: #include "jit/IonCode.h" michael@0: #include "jit/shared/Assembler-shared.h" michael@0: #include "jit/shared/IonAssemblerBufferWithConstantPools.h" michael@0: michael@0: namespace js { michael@0: namespace jit { michael@0: michael@0: //NOTE: there are duplicates in this list! michael@0: // sometimes we want to specifically refer to the michael@0: // link register as a link register (bl lr is much michael@0: // clearer than bl r14). HOWEVER, this register can michael@0: // easily be a gpr when it is not busy holding the return michael@0: // address. michael@0: static MOZ_CONSTEXPR_VAR Register r0 = { Registers::r0 }; michael@0: static MOZ_CONSTEXPR_VAR Register r1 = { Registers::r1 }; michael@0: static MOZ_CONSTEXPR_VAR Register r2 = { Registers::r2 }; michael@0: static MOZ_CONSTEXPR_VAR Register r3 = { Registers::r3 }; michael@0: static MOZ_CONSTEXPR_VAR Register r4 = { Registers::r4 }; michael@0: static MOZ_CONSTEXPR_VAR Register r5 = { Registers::r5 }; michael@0: static MOZ_CONSTEXPR_VAR Register r6 = { Registers::r6 }; michael@0: static MOZ_CONSTEXPR_VAR Register r7 = { Registers::r7 }; michael@0: static MOZ_CONSTEXPR_VAR Register r8 = { Registers::r8 }; michael@0: static MOZ_CONSTEXPR_VAR Register r9 = { Registers::r9 }; michael@0: static MOZ_CONSTEXPR_VAR Register r10 = { Registers::r10 }; michael@0: static MOZ_CONSTEXPR_VAR Register r11 = { Registers::r11 }; michael@0: static MOZ_CONSTEXPR_VAR Register r12 = { Registers::ip }; michael@0: static MOZ_CONSTEXPR_VAR Register ip = { Registers::ip }; michael@0: static MOZ_CONSTEXPR_VAR Register sp = { Registers::sp }; michael@0: static MOZ_CONSTEXPR_VAR Register r14 = { Registers::lr }; michael@0: static MOZ_CONSTEXPR_VAR Register lr = { Registers::lr }; michael@0: static MOZ_CONSTEXPR_VAR Register pc = { Registers::pc }; michael@0: michael@0: static MOZ_CONSTEXPR_VAR Register ScratchRegister = {Registers::ip}; michael@0: michael@0: static MOZ_CONSTEXPR_VAR Register OsrFrameReg = r3; michael@0: static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = r8; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempReg0 = r5; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempReg1 = r6; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempReg2 = r7; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempReg3 = r8; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempReg4 = r0; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempReg5 = r1; michael@0: michael@0: static MOZ_CONSTEXPR_VAR Register IntArgReg0 = r0; michael@0: static MOZ_CONSTEXPR_VAR Register IntArgReg1 = r1; michael@0: static MOZ_CONSTEXPR_VAR Register IntArgReg2 = r2; michael@0: static MOZ_CONSTEXPR_VAR Register IntArgReg3 = r3; michael@0: static MOZ_CONSTEXPR_VAR Register GlobalReg = r10; michael@0: static MOZ_CONSTEXPR_VAR Register HeapReg = r11; michael@0: static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { r5, r6, r7, r8 }; michael@0: static const uint32_t NumCallTempNonArgRegs = michael@0: mozilla::ArrayLength(CallTempNonArgRegs); michael@0: class ABIArgGenerator michael@0: { michael@0: unsigned intRegIndex_; michael@0: unsigned floatRegIndex_; michael@0: uint32_t stackOffset_; michael@0: ABIArg current_; michael@0: michael@0: public: michael@0: ABIArgGenerator(); michael@0: ABIArg next(MIRType argType); michael@0: ABIArg ¤t() { return current_; } michael@0: uint32_t stackBytesConsumedSoFar() const { return stackOffset_; } michael@0: static const Register NonArgReturnVolatileReg0; michael@0: static const Register NonArgReturnVolatileReg1; michael@0: }; michael@0: michael@0: static MOZ_CONSTEXPR_VAR Register PreBarrierReg = r1; michael@0: michael@0: static MOZ_CONSTEXPR_VAR Register InvalidReg = { Registers::invalid_reg }; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { FloatRegisters::invalid_freg }; michael@0: michael@0: static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = r3; michael@0: static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = r2; michael@0: static MOZ_CONSTEXPR_VAR Register StackPointer = sp; michael@0: static MOZ_CONSTEXPR_VAR Register FramePointer = InvalidReg; michael@0: static MOZ_CONSTEXPR_VAR Register ReturnReg = r0; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = { FloatRegisters::d0 }; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = { FloatRegisters::d15 }; michael@0: michael@0: static MOZ_CONSTEXPR_VAR FloatRegister NANReg = { FloatRegisters::d14 }; michael@0: michael@0: // Registers used in the GenerateFFIIonExit Enable Activation block. michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = r4; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = r0; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = r1; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = r2; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = r3; michael@0: michael@0: // Registers used in the GenerateFFIIonExit Disable Activation block. michael@0: // None of these may be the second scratch register (lr). michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = r2; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = r3; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = r0; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = r1; michael@0: static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = r4; michael@0: michael@0: michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d0 = {FloatRegisters::d0}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d1 = {FloatRegisters::d1}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d2 = {FloatRegisters::d2}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d3 = {FloatRegisters::d3}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d4 = {FloatRegisters::d4}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d5 = {FloatRegisters::d5}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d6 = {FloatRegisters::d6}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d7 = {FloatRegisters::d7}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d8 = {FloatRegisters::d8}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d9 = {FloatRegisters::d9}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d10 = {FloatRegisters::d10}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d11 = {FloatRegisters::d11}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d12 = {FloatRegisters::d12}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d13 = {FloatRegisters::d13}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d14 = {FloatRegisters::d14}; michael@0: static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15}; michael@0: michael@0: // For maximal awesomeness, 8 should be sufficent. michael@0: // ldrd/strd (dual-register load/store) operate in a single cycle michael@0: // when the address they are dealing with is 8 byte aligned. michael@0: // Also, the ARM abi wants the stack to be 8 byte aligned at michael@0: // function boundaries. I'm trying to make sure this is always true. michael@0: static const uint32_t StackAlignment = 8; michael@0: static const uint32_t CodeAlignment = 8; michael@0: static const bool StackKeptAligned = true; michael@0: static const uint32_t NativeFrameSize = sizeof(void*); michael@0: static const uint32_t AlignmentAtPrologue = 0; michael@0: static const uint32_t AlignmentMidPrologue = 4; michael@0: michael@0: michael@0: static const Scale ScalePointer = TimesFour; michael@0: michael@0: class Instruction; michael@0: class InstBranchImm; michael@0: uint32_t RM(Register r); michael@0: uint32_t RS(Register r); michael@0: uint32_t RD(Register r); michael@0: uint32_t RT(Register r); michael@0: uint32_t RN(Register r); michael@0: michael@0: uint32_t maybeRD(Register r); michael@0: uint32_t maybeRT(Register r); michael@0: uint32_t maybeRN(Register r); michael@0: michael@0: Register toRN (Instruction &i); michael@0: Register toRM (Instruction &i); michael@0: Register toRD (Instruction &i); michael@0: Register toR (Instruction &i); michael@0: michael@0: class VFPRegister; michael@0: uint32_t VD(VFPRegister vr); michael@0: uint32_t VN(VFPRegister vr); michael@0: uint32_t VM(VFPRegister vr); michael@0: michael@0: class VFPRegister michael@0: { michael@0: public: michael@0: // What type of data is being stored in this register? michael@0: // UInt / Int are specifically for vcvt, where we need michael@0: // to know how the data is supposed to be converted. michael@0: enum RegType { michael@0: Double = 0x0, michael@0: Single = 0x1, michael@0: UInt = 0x2, michael@0: Int = 0x3 michael@0: }; michael@0: michael@0: protected: michael@0: RegType kind : 2; michael@0: // ARM doesn't have more than 32 registers... michael@0: // don't take more bits than we'll need. michael@0: // Presently, I don't have plans to address the upper michael@0: // and lower halves of the double registers seprately, so michael@0: // 5 bits should suffice. If I do decide to address them seprately michael@0: // (vmov, I'm looking at you), I will likely specify it as a separate michael@0: // field. michael@0: uint32_t _code : 5; michael@0: bool _isInvalid : 1; michael@0: bool _isMissing : 1; michael@0: michael@0: VFPRegister(int r, RegType k) michael@0: : kind(k), _code (r), _isInvalid(false), _isMissing(false) michael@0: { } michael@0: michael@0: public: michael@0: VFPRegister() michael@0: : _isInvalid(true), _isMissing(false) michael@0: { } michael@0: michael@0: VFPRegister(bool b) michael@0: : _isInvalid(false), _isMissing(b) michael@0: { } michael@0: michael@0: VFPRegister(FloatRegister fr) michael@0: : kind(Double), _code(fr.code()), _isInvalid(false), _isMissing(false) michael@0: { michael@0: JS_ASSERT(_code == (unsigned)fr.code()); michael@0: } michael@0: michael@0: VFPRegister(FloatRegister fr, RegType k) michael@0: : kind(k), _code (fr.code()), _isInvalid(false), _isMissing(false) michael@0: { michael@0: JS_ASSERT(_code == (unsigned)fr.code()); michael@0: } michael@0: bool isDouble() const { return kind == Double; } michael@0: bool isSingle() const { return kind == Single; } michael@0: bool isFloat() const { return (kind == Double) || (kind == Single); } michael@0: bool isInt() const { return (kind == UInt) || (kind == Int); } michael@0: bool isSInt() const { return kind == Int; } michael@0: bool isUInt() const { return kind == UInt; } michael@0: bool equiv(VFPRegister other) const { return other.kind == kind; } michael@0: size_t size() const { return (kind == Double) ? 8 : 4; } michael@0: bool isInvalid(); michael@0: bool isMissing(); michael@0: michael@0: VFPRegister doubleOverlay() const; michael@0: VFPRegister singleOverlay() const; michael@0: VFPRegister sintOverlay() const; michael@0: VFPRegister uintOverlay() const; michael@0: michael@0: struct VFPRegIndexSplit; michael@0: VFPRegIndexSplit encode(); michael@0: michael@0: // for serializing values michael@0: struct VFPRegIndexSplit { michael@0: const uint32_t block : 4; michael@0: const uint32_t bit : 1; michael@0: michael@0: private: michael@0: friend VFPRegIndexSplit js::jit::VFPRegister::encode(); michael@0: michael@0: VFPRegIndexSplit (uint32_t block_, uint32_t bit_) michael@0: : block(block_), bit(bit_) michael@0: { michael@0: JS_ASSERT (block == block_); michael@0: JS_ASSERT(bit == bit_); michael@0: } michael@0: }; michael@0: michael@0: uint32_t code() const { michael@0: return _code; michael@0: } michael@0: }; michael@0: michael@0: // For being passed into the generic vfp instruction generator when michael@0: // there is an instruction that only takes two registers michael@0: extern VFPRegister NoVFPRegister; michael@0: michael@0: struct ImmTag : public Imm32 michael@0: { michael@0: ImmTag(JSValueTag mask) michael@0: : Imm32(int32_t(mask)) michael@0: { } michael@0: }; michael@0: michael@0: struct ImmType : public ImmTag michael@0: { michael@0: ImmType(JSValueType type) michael@0: : ImmTag(JSVAL_TYPE_TO_TAG(type)) michael@0: { } michael@0: }; michael@0: michael@0: enum Index { michael@0: Offset = 0 << 21 | 1<<24, michael@0: PreIndex = 1<<21 | 1 << 24, michael@0: PostIndex = 0 << 21 | 0 << 24 michael@0: // The docs were rather unclear on this. it sounds like michael@0: // 1<<21 | 0 << 24 encodes dtrt michael@0: }; michael@0: michael@0: // Seriously, wtf arm michael@0: enum IsImmOp2_ { michael@0: IsImmOp2 = 1 << 25, michael@0: IsNotImmOp2 = 0 << 25 michael@0: }; michael@0: enum IsImmDTR_ { michael@0: IsImmDTR = 0 << 25, michael@0: IsNotImmDTR = 1 << 25 michael@0: }; michael@0: // For the extra memory operations, ldrd, ldrsb, ldrh michael@0: enum IsImmEDTR_ { michael@0: IsImmEDTR = 1 << 22, michael@0: IsNotImmEDTR = 0 << 22 michael@0: }; michael@0: michael@0: michael@0: enum ShiftType { michael@0: LSL = 0, // << 5 michael@0: LSR = 1, // << 5 michael@0: ASR = 2, // << 5 michael@0: ROR = 3, // << 5 michael@0: RRX = ROR // RRX is encoded as ROR with a 0 offset. michael@0: }; michael@0: michael@0: // The actual codes that get set by instructions michael@0: // and the codes that are checked by the conditions below. michael@0: struct ConditionCodes michael@0: { michael@0: bool Zero : 1; michael@0: bool Overflow : 1; michael@0: bool Carry : 1; michael@0: bool Minus : 1; michael@0: }; michael@0: michael@0: // Modes for STM/LDM. michael@0: // Names are the suffixes applied to michael@0: // the instruction. michael@0: enum DTMMode { michael@0: A = 0 << 24, // empty / after michael@0: B = 1 << 24, // full / before michael@0: D = 0 << 23, // decrement michael@0: I = 1 << 23, // increment michael@0: DA = D | A, michael@0: DB = D | B, michael@0: IA = I | A, michael@0: IB = I | B michael@0: }; michael@0: michael@0: enum DTMWriteBack { michael@0: WriteBack = 1 << 21, michael@0: NoWriteBack = 0 << 21 michael@0: }; michael@0: michael@0: enum SetCond_ { michael@0: SetCond = 1 << 20, michael@0: NoSetCond = 0 << 20 michael@0: }; michael@0: enum LoadStore { michael@0: IsLoad = 1 << 20, michael@0: IsStore = 0 << 20 michael@0: }; michael@0: // You almost never want to use this directly. michael@0: // Instead, you wantto pass in a signed constant, michael@0: // and let this bit be implicitly set for you. michael@0: // this is however, necessary if we want a negative index michael@0: enum IsUp_ { michael@0: IsUp = 1 << 23, michael@0: IsDown = 0 << 23 michael@0: }; michael@0: enum ALUOp { michael@0: op_mov = 0xd << 21, michael@0: op_mvn = 0xf << 21, michael@0: op_and = 0x0 << 21, michael@0: op_bic = 0xe << 21, michael@0: op_eor = 0x1 << 21, michael@0: op_orr = 0xc << 21, michael@0: op_adc = 0x5 << 21, michael@0: op_add = 0x4 << 21, michael@0: op_sbc = 0x6 << 21, michael@0: op_sub = 0x2 << 21, michael@0: op_rsb = 0x3 << 21, michael@0: op_rsc = 0x7 << 21, michael@0: op_cmn = 0xb << 21, michael@0: op_cmp = 0xa << 21, michael@0: op_teq = 0x9 << 21, michael@0: op_tst = 0x8 << 21, michael@0: op_invalid = -1 michael@0: }; michael@0: michael@0: michael@0: enum MULOp { michael@0: opm_mul = 0 << 21, michael@0: opm_mla = 1 << 21, michael@0: opm_umaal = 2 << 21, michael@0: opm_mls = 3 << 21, michael@0: opm_umull = 4 << 21, michael@0: opm_umlal = 5 << 21, michael@0: opm_smull = 6 << 21, michael@0: opm_smlal = 7 << 21 michael@0: }; michael@0: enum BranchTag { michael@0: op_b = 0x0a000000, michael@0: op_b_mask = 0x0f000000, michael@0: op_b_dest_mask = 0x00ffffff, michael@0: op_bl = 0x0b000000, michael@0: op_blx = 0x012fff30, michael@0: op_bx = 0x012fff10 michael@0: }; michael@0: michael@0: // Just like ALUOp, but for the vfp instruction set. michael@0: enum VFPOp { michael@0: opv_mul = 0x2 << 20, michael@0: opv_add = 0x3 << 20, michael@0: opv_sub = 0x3 << 20 | 0x1 << 6, michael@0: opv_div = 0x8 << 20, michael@0: opv_mov = 0xB << 20 | 0x1 << 6, michael@0: opv_abs = 0xB << 20 | 0x3 << 6, michael@0: opv_neg = 0xB << 20 | 0x1 << 6 | 0x1 << 16, michael@0: opv_sqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16, michael@0: opv_cmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16, michael@0: opv_cmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16 michael@0: }; michael@0: // Negate the operation, AND negate the immediate that we were passed in. michael@0: ALUOp ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest); michael@0: bool can_dbl(ALUOp op); michael@0: bool condsAreSafe(ALUOp op); michael@0: // If there is a variant of op that has a dest (think cmp/sub) michael@0: // return that variant of it. michael@0: ALUOp getDestVariant(ALUOp op); michael@0: michael@0: static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data); michael@0: static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0); michael@0: // All of these classes exist solely to shuffle data into the various operands. michael@0: // For example Operand2 can be an imm8, a register-shifted-by-a-constant or michael@0: // a register-shifted-by-a-register. I represent this in C++ by having a michael@0: // base class Operand2, which just stores the 32 bits of data as they will be michael@0: // encoded in the instruction. You cannot directly create an Operand2 michael@0: // since it is tricky, and not entirely sane to do so. Instead, you create michael@0: // one of its child classes, e.g. Imm8. Imm8's constructor takes a single michael@0: // integer argument. Imm8 will verify that its argument can be encoded michael@0: // as an ARM 12 bit imm8, encode it using an Imm8data, and finally call michael@0: // its parent's (Operand2) constructor with the Imm8data. The Operand2 michael@0: // constructor will then call the Imm8data's encode() function to extract michael@0: // the raw bits from it. In the future, we should be able to extract michael@0: // data from the Operand2 by asking it for its component Imm8data michael@0: // structures. The reason this is so horribly round-about is I wanted michael@0: // to have Imm8 and RegisterShiftedRegister inherit directly from Operand2 michael@0: // but have all of them take up only a single word of storage. michael@0: // I also wanted to avoid passing around raw integers at all michael@0: // since they are error prone. michael@0: class Op2Reg; michael@0: class O2RegImmShift; michael@0: class O2RegRegShift; michael@0: namespace datastore { michael@0: struct Reg michael@0: { michael@0: // the "second register" michael@0: uint32_t RM : 4; michael@0: // do we get another register for shifting michael@0: uint32_t RRS : 1; michael@0: ShiftType Type : 2; michael@0: // I'd like this to be a more sensible encoding, but that would michael@0: // need to be a struct and that would not pack :( michael@0: uint32_t ShiftAmount : 5; michael@0: uint32_t pad : 20; michael@0: michael@0: Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftamount) michael@0: : RM(rm), RRS(rsr), Type(type), ShiftAmount(shiftamount), pad(0) michael@0: { } michael@0: michael@0: uint32_t encode() { michael@0: return RM | RRS << 4 | Type << 5 | ShiftAmount << 7; michael@0: } michael@0: explicit Reg(const Op2Reg &op) { michael@0: memcpy(this, &op, sizeof(*this)); michael@0: } michael@0: }; michael@0: michael@0: // Op2 has a mode labelled "", which is arm's magical michael@0: // immediate encoding. Some instructions actually get 8 bits of michael@0: // data, which is called Imm8Data below. These should have edit michael@0: // distance > 1, but this is how it is for now. michael@0: struct Imm8mData michael@0: { michael@0: private: michael@0: uint32_t data : 8; michael@0: uint32_t rot : 4; michael@0: // Throw in an extra bit that will be 1 if we can't encode this michael@0: // properly. if we can encode it properly, a simple "|" will still michael@0: // suffice to meld it into the instruction. michael@0: uint32_t buff : 19; michael@0: public: michael@0: uint32_t invalid : 1; michael@0: michael@0: uint32_t encode() { michael@0: JS_ASSERT(!invalid); michael@0: return data | rot << 8; michael@0: }; michael@0: michael@0: // Default constructor makes an invalid immediate. michael@0: Imm8mData() michael@0: : data(0xff), rot(0xf), invalid(1) michael@0: { } michael@0: michael@0: Imm8mData(uint32_t data_, uint32_t rot_) michael@0: : data(data_), rot(rot_), invalid(0) michael@0: { michael@0: JS_ASSERT(data == data_); michael@0: JS_ASSERT(rot == rot_); michael@0: } michael@0: }; michael@0: michael@0: struct Imm8Data michael@0: { michael@0: private: michael@0: uint32_t imm4L : 4; michael@0: uint32_t pad : 4; michael@0: uint32_t imm4H : 4; michael@0: michael@0: public: michael@0: uint32_t encode() { michael@0: return imm4L | (imm4H << 8); michael@0: }; michael@0: Imm8Data(uint32_t imm) : imm4L(imm&0xf), imm4H(imm>>4) { michael@0: JS_ASSERT(imm <= 0xff); michael@0: } michael@0: }; michael@0: michael@0: // VLDR/VSTR take an 8 bit offset, which is implicitly left shifted michael@0: // by 2. michael@0: struct Imm8VFPOffData michael@0: { michael@0: private: michael@0: uint32_t data; michael@0: michael@0: public: michael@0: uint32_t encode() { michael@0: return data; michael@0: }; michael@0: Imm8VFPOffData(uint32_t imm) : data (imm) { michael@0: JS_ASSERT((imm & ~(0xff)) == 0); michael@0: } michael@0: }; michael@0: michael@0: // ARM can magically encode 256 very special immediates to be moved michael@0: // into a register. michael@0: struct Imm8VFPImmData michael@0: { michael@0: private: michael@0: uint32_t imm4L : 4; michael@0: uint32_t pad : 12; michael@0: uint32_t imm4H : 4; michael@0: int32_t isInvalid : 12; michael@0: michael@0: public: michael@0: Imm8VFPImmData() michael@0: : imm4L(-1U & 0xf), imm4H(-1U & 0xf), isInvalid(-1) michael@0: { } michael@0: michael@0: Imm8VFPImmData(uint32_t imm) michael@0: : imm4L(imm&0xf), imm4H(imm>>4), isInvalid(0) michael@0: { michael@0: JS_ASSERT(imm <= 0xff); michael@0: } michael@0: michael@0: uint32_t encode() { michael@0: if (isInvalid != 0) michael@0: return -1; michael@0: return imm4L | (imm4H << 16); michael@0: }; michael@0: }; michael@0: michael@0: struct Imm12Data michael@0: { michael@0: uint32_t data : 12; michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: michael@0: Imm12Data(uint32_t imm) michael@0: : data(imm) michael@0: { michael@0: JS_ASSERT(data == imm); michael@0: } michael@0: michael@0: }; michael@0: michael@0: struct RIS michael@0: { michael@0: uint32_t ShiftAmount : 5; michael@0: uint32_t encode () { michael@0: return ShiftAmount; michael@0: } michael@0: michael@0: RIS(uint32_t imm) michael@0: : ShiftAmount(imm) michael@0: { michael@0: JS_ASSERT(ShiftAmount == imm); michael@0: } michael@0: explicit RIS(Reg r) : ShiftAmount(r.ShiftAmount) {} michael@0: }; michael@0: michael@0: struct RRS michael@0: { michael@0: uint32_t MustZero : 1; michael@0: // the register that holds the shift amount michael@0: uint32_t RS : 4; michael@0: michael@0: RRS(uint32_t rs) michael@0: : RS(rs) michael@0: { michael@0: JS_ASSERT(rs == RS); michael@0: } michael@0: michael@0: uint32_t encode () { michael@0: return RS << 1; michael@0: } michael@0: }; michael@0: michael@0: } // namespace datastore michael@0: michael@0: class MacroAssemblerARM; michael@0: class Operand; michael@0: class Operand2 michael@0: { michael@0: friend class Operand; michael@0: friend class MacroAssemblerARM; michael@0: friend class InstALU; michael@0: public: michael@0: uint32_t oper : 31; michael@0: uint32_t invalid : 1; michael@0: bool isO2Reg() { michael@0: return !(oper & IsImmOp2); michael@0: } michael@0: Op2Reg toOp2Reg(); michael@0: bool isImm8() { michael@0: return oper & IsImmOp2; michael@0: } michael@0: michael@0: protected: michael@0: Operand2(datastore::Imm8mData base) michael@0: : oper(base.invalid ? -1 : (base.encode() | (uint32_t)IsImmOp2)), michael@0: invalid(base.invalid) michael@0: { } michael@0: michael@0: Operand2(datastore::Reg base) michael@0: : oper(base.encode() | (uint32_t)IsNotImmOp2) michael@0: { } michael@0: michael@0: private: michael@0: Operand2(int blob) michael@0: : oper(blob) michael@0: { } michael@0: michael@0: public: michael@0: uint32_t encode() { michael@0: return oper; michael@0: } michael@0: }; michael@0: michael@0: class Imm8 : public Operand2 michael@0: { michael@0: public: michael@0: static datastore::Imm8mData encodeImm(uint32_t imm) { michael@0: // mozilla::CountLeadingZeroes32(imm) requires imm != 0. michael@0: if (imm == 0) michael@0: return datastore::Imm8mData(0, 0); michael@0: int left = mozilla::CountLeadingZeroes32(imm) & 30; michael@0: // See if imm is a simple value that can be encoded with a rotate of 0. michael@0: // This is effectively imm <= 0xff, but I assume this can be optimized michael@0: // more michael@0: if (left >= 24) michael@0: return datastore::Imm8mData(imm, 0); michael@0: michael@0: // Mask out the 8 bits following the first bit that we found, see if we michael@0: // have 0 yet. michael@0: int no_imm = imm & ~(0xff << (24 - left)); michael@0: if (no_imm == 0) { michael@0: return datastore::Imm8mData(imm >> (24 - left), ((8+left) >> 1)); michael@0: } michael@0: // Look for the most signifigant bit set, once again. michael@0: int right = 32 - (mozilla::CountLeadingZeroes32(no_imm) & 30); michael@0: // If it is in the bottom 8 bits, there is a chance that this is a michael@0: // wraparound case. michael@0: if (right >= 8) michael@0: return datastore::Imm8mData(); michael@0: // Rather than masking out bits and checking for 0, just rotate the michael@0: // immediate that we were passed in, and see if it fits into 8 bits. michael@0: unsigned int mask = imm << (8 - right) | imm >> (24 + right); michael@0: if (mask <= 0xff) michael@0: return datastore::Imm8mData(mask, (8-right) >> 1); michael@0: return datastore::Imm8mData(); michael@0: } michael@0: // pair template? michael@0: struct TwoImm8mData michael@0: { michael@0: datastore::Imm8mData fst, snd; michael@0: michael@0: TwoImm8mData() michael@0: : fst(), snd() michael@0: { } michael@0: michael@0: TwoImm8mData(datastore::Imm8mData _fst, datastore::Imm8mData _snd) michael@0: : fst(_fst), snd(_snd) michael@0: { } michael@0: }; michael@0: michael@0: static TwoImm8mData encodeTwoImms(uint32_t); michael@0: Imm8(uint32_t imm) michael@0: : Operand2(encodeImm(imm)) michael@0: { } michael@0: }; michael@0: michael@0: class Op2Reg : public Operand2 michael@0: { michael@0: public: michael@0: Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm) michael@0: : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode())) michael@0: { } michael@0: michael@0: Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg) michael@0: : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode())) michael@0: { } michael@0: bool isO2RegImmShift() { michael@0: datastore::Reg r(*this); michael@0: return !r.RRS; michael@0: } michael@0: O2RegImmShift toO2RegImmShift(); michael@0: bool isO2RegRegShift() { michael@0: datastore::Reg r(*this); michael@0: return r.RRS; michael@0: } michael@0: O2RegRegShift toO2RegRegShift(); michael@0: michael@0: bool checkType(ShiftType type) { michael@0: datastore::Reg r(*this); michael@0: return r.Type == type; michael@0: } michael@0: bool checkRM(Register rm) { michael@0: datastore::Reg r(*this); michael@0: return r.RM == rm.code(); michael@0: } michael@0: bool getRM(Register *rm) { michael@0: datastore::Reg r(*this); michael@0: *rm = Register::FromCode(r.RM); michael@0: return true; michael@0: } michael@0: }; michael@0: michael@0: class O2RegImmShift : public Op2Reg michael@0: { michael@0: public: michael@0: O2RegImmShift(Register rn, ShiftType type, uint32_t shift) michael@0: : Op2Reg(rn, type, datastore::RIS(shift)) michael@0: { } michael@0: int getShift() { michael@0: datastore::Reg r(*this); michael@0: datastore::RIS ris(r); michael@0: return ris.ShiftAmount; michael@0: } michael@0: }; michael@0: michael@0: class O2RegRegShift : public Op2Reg michael@0: { michael@0: public: michael@0: O2RegRegShift(Register rn, ShiftType type, Register rs) michael@0: : Op2Reg(rn, type, datastore::RRS(rs.code())) michael@0: { } michael@0: }; michael@0: michael@0: O2RegImmShift O2Reg(Register r); michael@0: O2RegImmShift lsl (Register r, int amt); michael@0: O2RegImmShift lsr (Register r, int amt); michael@0: O2RegImmShift asr (Register r, int amt); michael@0: O2RegImmShift rol (Register r, int amt); michael@0: O2RegImmShift ror (Register r, int amt); michael@0: michael@0: O2RegRegShift lsl (Register r, Register amt); michael@0: O2RegRegShift lsr (Register r, Register amt); michael@0: O2RegRegShift asr (Register r, Register amt); michael@0: O2RegRegShift ror (Register r, Register amt); michael@0: michael@0: // An offset from a register to be used for ldr/str. This should include michael@0: // the sign bit, since ARM has "signed-magnitude" offsets. That is it encodes michael@0: // an unsigned offset, then the instruction specifies if the offset is positive michael@0: // or negative. The +/- bit is necessary if the instruction set wants to be michael@0: // able to have a negative register offset e.g. ldr pc, [r1,-r2]; michael@0: class DtrOff michael@0: { michael@0: uint32_t data; michael@0: michael@0: protected: michael@0: DtrOff(datastore::Imm12Data immdata, IsUp_ iu) michael@0: : data(immdata.encode() | (uint32_t)IsImmDTR | ((uint32_t)iu)) michael@0: { } michael@0: michael@0: DtrOff(datastore::Reg reg, IsUp_ iu = IsUp) michael@0: : data(reg.encode() | (uint32_t) IsNotImmDTR | iu) michael@0: { } michael@0: michael@0: public: michael@0: uint32_t encode() { return data; } michael@0: }; michael@0: michael@0: class DtrOffImm : public DtrOff michael@0: { michael@0: public: michael@0: DtrOffImm(int32_t imm) michael@0: : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)), imm >= 0 ? IsUp : IsDown) michael@0: { michael@0: JS_ASSERT(mozilla::Abs(imm) < 4096); michael@0: } michael@0: }; michael@0: michael@0: class DtrOffReg : public DtrOff michael@0: { michael@0: // These are designed to be called by a constructor of a subclass. michael@0: // Constructing the necessary RIS/RRS structures are annoying michael@0: protected: michael@0: DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp) michael@0: : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu) michael@0: { } michael@0: michael@0: DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp) michael@0: : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu) michael@0: { } michael@0: }; michael@0: michael@0: class DtrRegImmShift : public DtrOffReg michael@0: { michael@0: public: michael@0: DtrRegImmShift(Register rn, ShiftType type, uint32_t shift, IsUp_ iu = IsUp) michael@0: : DtrOffReg(rn, type, datastore::RIS(shift), iu) michael@0: { } michael@0: }; michael@0: michael@0: class DtrRegRegShift : public DtrOffReg michael@0: { michael@0: public: michael@0: DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp) michael@0: : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu) michael@0: { } michael@0: }; michael@0: michael@0: // we will frequently want to bundle a register with its offset so that we have michael@0: // an "operand" to a load instruction. michael@0: class DTRAddr michael@0: { michael@0: uint32_t data; michael@0: michael@0: public: michael@0: DTRAddr(Register reg, DtrOff dtr) michael@0: : data(dtr.encode() | (reg.code() << 16)) michael@0: { } michael@0: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: Register getBase() { michael@0: return Register::FromCode((data >> 16) &0xf); michael@0: } michael@0: private: michael@0: friend class Operand; michael@0: DTRAddr(uint32_t blob) michael@0: : data(blob) michael@0: { } michael@0: }; michael@0: michael@0: // Offsets for the extended data transfer instructions: michael@0: // ldrsh, ldrd, ldrsb, etc. michael@0: class EDtrOff michael@0: { michael@0: uint32_t data; michael@0: michael@0: protected: michael@0: EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp) michael@0: : data(imm8.encode() | IsImmEDTR | (uint32_t)iu) michael@0: { } michael@0: michael@0: EDtrOff(Register rm, IsUp_ iu = IsUp) michael@0: : data(rm.code() | IsNotImmEDTR | iu) michael@0: { } michael@0: michael@0: public: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: }; michael@0: michael@0: class EDtrOffImm : public EDtrOff michael@0: { michael@0: public: michael@0: EDtrOffImm(int32_t imm) michael@0: : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)), (imm >= 0) ? IsUp : IsDown) michael@0: { michael@0: JS_ASSERT(mozilla::Abs(imm) < 256); michael@0: } michael@0: }; michael@0: michael@0: // this is the most-derived class, since the extended data michael@0: // transfer instructions don't support any sort of modifying the michael@0: // "index" operand michael@0: class EDtrOffReg : public EDtrOff michael@0: { michael@0: public: michael@0: EDtrOffReg(Register rm) michael@0: : EDtrOff(rm) michael@0: { } michael@0: }; michael@0: michael@0: class EDtrAddr michael@0: { michael@0: uint32_t data; michael@0: michael@0: public: michael@0: EDtrAddr(Register r, EDtrOff off) michael@0: : data(RN(r) | off.encode()) michael@0: { } michael@0: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: }; michael@0: michael@0: class VFPOff michael@0: { michael@0: uint32_t data; michael@0: michael@0: protected: michael@0: VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup) michael@0: : data(imm.encode() | (uint32_t)isup) michael@0: { } michael@0: michael@0: public: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: }; michael@0: michael@0: class VFPOffImm : public VFPOff michael@0: { michael@0: public: michael@0: VFPOffImm(int32_t imm) michael@0: : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4), imm < 0 ? IsDown : IsUp) michael@0: { michael@0: JS_ASSERT(mozilla::Abs(imm) <= 255 * 4); michael@0: } michael@0: }; michael@0: class VFPAddr michael@0: { michael@0: friend class Operand; michael@0: michael@0: uint32_t data; michael@0: michael@0: protected: michael@0: VFPAddr(uint32_t blob) michael@0: : data(blob) michael@0: { } michael@0: michael@0: public: michael@0: VFPAddr(Register base, VFPOff off) michael@0: : data(RN(base) | off.encode()) michael@0: { } michael@0: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: }; michael@0: michael@0: class VFPImm { michael@0: uint32_t data; michael@0: michael@0: public: michael@0: static const VFPImm one; michael@0: michael@0: VFPImm(uint32_t topWordOfDouble); michael@0: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: bool isValid() { michael@0: return data != -1U; michael@0: } michael@0: }; michael@0: michael@0: // A BOffImm is an immediate that is used for branches. Namely, it is the offset that will michael@0: // be encoded in the branch instruction. This is the only sane way of constructing a branch. michael@0: class BOffImm michael@0: { michael@0: uint32_t data; michael@0: michael@0: public: michael@0: uint32_t encode() { michael@0: return data; michael@0: } michael@0: int32_t decode() { michael@0: return ((((int32_t)data) << 8) >> 6) + 8; michael@0: } michael@0: michael@0: explicit BOffImm(int offset) michael@0: : data ((offset - 8) >> 2 & 0x00ffffff) michael@0: { michael@0: JS_ASSERT((offset & 0x3) == 0); michael@0: if (!isInRange(offset)) michael@0: CrashAtUnhandlableOOM("BOffImm"); michael@0: } michael@0: static bool isInRange(int offset) michael@0: { michael@0: if ((offset - 8) < -33554432) michael@0: return false; michael@0: if ((offset - 8) > 33554428) michael@0: return false; michael@0: return true; michael@0: } michael@0: static const int INVALID = 0x00800000; michael@0: BOffImm() michael@0: : data(INVALID) michael@0: { } michael@0: michael@0: bool isInvalid() { michael@0: return data == uint32_t(INVALID); michael@0: } michael@0: Instruction *getDest(Instruction *src); michael@0: michael@0: private: michael@0: friend class InstBranchImm; michael@0: BOffImm(Instruction &inst); michael@0: }; michael@0: michael@0: class Imm16 michael@0: { michael@0: uint32_t lower : 12; michael@0: uint32_t pad : 4; michael@0: uint32_t upper : 4; michael@0: uint32_t invalid : 12; michael@0: michael@0: public: michael@0: Imm16(); michael@0: Imm16(uint32_t imm); michael@0: Imm16(Instruction &inst); michael@0: michael@0: uint32_t encode() { michael@0: return lower | upper << 16; michael@0: } michael@0: uint32_t decode() { michael@0: return lower | upper << 12; michael@0: } michael@0: michael@0: bool isInvalid () { michael@0: return invalid; michael@0: } michael@0: }; michael@0: michael@0: /* I would preffer that these do not exist, since there are essentially michael@0: * no instructions that would ever take more than one of these, however, michael@0: * the MIR wants to only have one type of arguments to functions, so bugger. michael@0: */ michael@0: class Operand michael@0: { michael@0: // the encoding of registers is the same for OP2, DTR and EDTR michael@0: // yet the type system doesn't let us express this, so choices michael@0: // must be made. michael@0: public: michael@0: enum Tag_ { michael@0: OP2, michael@0: MEM, michael@0: FOP michael@0: }; michael@0: michael@0: private: michael@0: Tag_ Tag : 3; michael@0: uint32_t reg : 5; michael@0: int32_t offset; michael@0: uint32_t data; michael@0: michael@0: public: michael@0: Operand (Register reg_) michael@0: : Tag(OP2), reg(reg_.code()) michael@0: { } michael@0: michael@0: Operand (FloatRegister freg) michael@0: : Tag(FOP), reg(freg.code()) michael@0: { } michael@0: michael@0: Operand (Register base, Imm32 off) michael@0: : Tag(MEM), reg(base.code()), offset(off.value) michael@0: { } michael@0: michael@0: Operand (Register base, int32_t off) michael@0: : Tag(MEM), reg(base.code()), offset(off) michael@0: { } michael@0: michael@0: Operand (const Address &addr) michael@0: : Tag(MEM), reg(addr.base.code()), offset(addr.offset) michael@0: { } michael@0: michael@0: Tag_ getTag() const { michael@0: return Tag; michael@0: } michael@0: michael@0: Operand2 toOp2() const { michael@0: JS_ASSERT(Tag == OP2); michael@0: return O2Reg(Register::FromCode(reg)); michael@0: } michael@0: michael@0: Register toReg() const { michael@0: JS_ASSERT(Tag == OP2); michael@0: return Register::FromCode(reg); michael@0: } michael@0: michael@0: void toAddr(Register *r, Imm32 *dest) const { michael@0: JS_ASSERT(Tag == MEM); michael@0: *r = Register::FromCode(reg); michael@0: *dest = Imm32(offset); michael@0: } michael@0: Address toAddress() const { michael@0: return Address(Register::FromCode(reg), offset); michael@0: } michael@0: int32_t disp() const { michael@0: JS_ASSERT(Tag == MEM); michael@0: return offset; michael@0: } michael@0: michael@0: int32_t base() const { michael@0: JS_ASSERT(Tag == MEM); michael@0: return reg; michael@0: } michael@0: Register baseReg() const { michael@0: return Register::FromCode(reg); michael@0: } michael@0: DTRAddr toDTRAddr() const { michael@0: return DTRAddr(baseReg(), DtrOffImm(offset)); michael@0: } michael@0: VFPAddr toVFPAddr() const { michael@0: return VFPAddr(baseReg(), VFPOffImm(offset)); michael@0: } michael@0: }; michael@0: michael@0: void michael@0: PatchJump(CodeLocationJump &jump_, CodeLocationLabel label); michael@0: class InstructionIterator; michael@0: class Assembler; michael@0: typedef js::jit::AssemblerBufferWithConstantPool<1024, 4, Instruction, Assembler, 1> ARMBuffer; michael@0: michael@0: class Assembler : public AssemblerShared michael@0: { michael@0: public: michael@0: // ARM conditional constants michael@0: enum ARMCondition { michael@0: EQ = 0x00000000, // Zero michael@0: NE = 0x10000000, // Non-zero michael@0: CS = 0x20000000, michael@0: CC = 0x30000000, michael@0: MI = 0x40000000, michael@0: PL = 0x50000000, michael@0: VS = 0x60000000, michael@0: VC = 0x70000000, michael@0: HI = 0x80000000, michael@0: LS = 0x90000000, michael@0: GE = 0xa0000000, michael@0: LT = 0xb0000000, michael@0: GT = 0xc0000000, michael@0: LE = 0xd0000000, michael@0: AL = 0xe0000000 michael@0: }; michael@0: michael@0: enum Condition { michael@0: Equal = EQ, michael@0: NotEqual = NE, michael@0: Above = HI, michael@0: AboveOrEqual = CS, michael@0: Below = CC, michael@0: BelowOrEqual = LS, michael@0: GreaterThan = GT, michael@0: GreaterThanOrEqual = GE, michael@0: LessThan = LT, michael@0: LessThanOrEqual = LE, michael@0: Overflow = VS, michael@0: Signed = MI, michael@0: NotSigned = PL, michael@0: Zero = EQ, michael@0: NonZero = NE, michael@0: Always = AL, michael@0: michael@0: VFP_NotEqualOrUnordered = NE, michael@0: VFP_Equal = EQ, michael@0: VFP_Unordered = VS, michael@0: VFP_NotUnordered = VC, michael@0: VFP_GreaterThanOrEqualOrUnordered = CS, michael@0: VFP_GreaterThanOrEqual = GE, michael@0: VFP_GreaterThanOrUnordered = HI, michael@0: VFP_GreaterThan = GT, michael@0: VFP_LessThanOrEqualOrUnordered = LE, michael@0: VFP_LessThanOrEqual = LS, michael@0: VFP_LessThanOrUnordered = LT, michael@0: VFP_LessThan = CC // MI is valid too. michael@0: }; michael@0: michael@0: // Bit set when a DoubleCondition does not map to a single ARM condition. michael@0: // The macro assembler has to special-case these conditions, or else michael@0: // ConditionFromDoubleCondition will complain. michael@0: static const int DoubleConditionBitSpecial = 0x1; michael@0: michael@0: enum DoubleCondition { michael@0: // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. michael@0: DoubleOrdered = VFP_NotUnordered, michael@0: DoubleEqual = VFP_Equal, michael@0: DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial, michael@0: DoubleGreaterThan = VFP_GreaterThan, michael@0: DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual, michael@0: DoubleLessThan = VFP_LessThan, michael@0: DoubleLessThanOrEqual = VFP_LessThanOrEqual, michael@0: // If either operand is NaN, these conditions always evaluate to true. michael@0: DoubleUnordered = VFP_Unordered, michael@0: DoubleEqualOrUnordered = VFP_Equal | DoubleConditionBitSpecial, michael@0: DoubleNotEqualOrUnordered = VFP_NotEqualOrUnordered, michael@0: DoubleGreaterThanOrUnordered = VFP_GreaterThanOrUnordered, michael@0: DoubleGreaterThanOrEqualOrUnordered = VFP_GreaterThanOrEqualOrUnordered, michael@0: DoubleLessThanOrUnordered = VFP_LessThanOrUnordered, michael@0: DoubleLessThanOrEqualOrUnordered = VFP_LessThanOrEqualOrUnordered michael@0: }; michael@0: michael@0: Condition getCondition(uint32_t inst) { michael@0: return (Condition) (0xf0000000 & inst); michael@0: } michael@0: static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) { michael@0: JS_ASSERT(!(cond & DoubleConditionBitSpecial)); michael@0: return static_cast(cond); michael@0: } michael@0: michael@0: // :( this should be protected, but since CodeGenerator michael@0: // wants to use it, It needs to go out here :( michael@0: michael@0: BufferOffset nextOffset() { michael@0: return m_buffer.nextOffset(); michael@0: } michael@0: michael@0: protected: michael@0: BufferOffset labelOffset (Label *l) { michael@0: return BufferOffset(l->bound()); michael@0: } michael@0: michael@0: Instruction * editSrc (BufferOffset bo) { michael@0: return m_buffer.getInst(bo); michael@0: } michael@0: public: michael@0: void resetCounter(); michael@0: uint32_t actualOffset(uint32_t) const; michael@0: uint32_t actualIndex(uint32_t) const; michael@0: static uint8_t *PatchableJumpAddress(JitCode *code, uint32_t index); michael@0: BufferOffset actualOffset(BufferOffset) const; michael@0: protected: michael@0: michael@0: // structure for fixing up pc-relative loads/jumps when a the machine code michael@0: // gets moved (executable copy, gc, etc.) michael@0: struct RelativePatch michael@0: { michael@0: void *target; michael@0: Relocation::Kind kind; michael@0: RelativePatch(void *target, Relocation::Kind kind) michael@0: : target(target), kind(kind) michael@0: { } michael@0: }; michael@0: michael@0: // TODO: this should actually be a pool-like object michael@0: // It is currently a big hack, and probably shouldn't exist michael@0: js::Vector codeLabels_; michael@0: js::Vector jumps_; michael@0: js::Vector tmpJumpRelocations_; michael@0: js::Vector tmpDataRelocations_; michael@0: js::Vector tmpPreBarriers_; michael@0: michael@0: CompactBufferWriter jumpRelocations_; michael@0: CompactBufferWriter dataRelocations_; michael@0: CompactBufferWriter relocations_; michael@0: CompactBufferWriter preBarriers_; michael@0: michael@0: bool enoughMemory_; michael@0: michael@0: //typedef JSC::AssemblerBufferWithConstantPool<1024, 4, 4, js::jit::Assembler> ARMBuffer; michael@0: ARMBuffer m_buffer; michael@0: michael@0: // There is now a semi-unified interface for instruction generation. michael@0: // During assembly, there is an active buffer that instructions are michael@0: // being written into, but later, we may wish to modify instructions michael@0: // that have already been created. In order to do this, we call the michael@0: // same assembly function, but pass it a destination address, which michael@0: // will be overwritten with a new instruction. In order to do this very michael@0: // after assembly buffers no longer exist, when calling with a third michael@0: // dest parameter, a this object is still needed. dummy always happens michael@0: // to be null, but we shouldn't be looking at it in any case. michael@0: static Assembler *dummy; michael@0: mozilla::Array pools_; michael@0: Pool *int32Pool; michael@0: Pool *doublePool; michael@0: michael@0: public: michael@0: Assembler() michael@0: : enoughMemory_(true), michael@0: m_buffer(4, 4, 0, &pools_[0], 8), michael@0: int32Pool(m_buffer.getPool(1)), michael@0: doublePool(m_buffer.getPool(0)), michael@0: isFinished(false), michael@0: dtmActive(false), michael@0: dtmCond(Always) michael@0: { michael@0: } michael@0: michael@0: // We need to wait until an AutoIonContextAlloc is created by the michael@0: // IonMacroAssembler, before allocating any space. michael@0: void initWithAllocator() { michael@0: m_buffer.initWithAllocator(); michael@0: michael@0: // Set up the backwards double region michael@0: new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true); michael@0: // Set up the backwards 32 bit region michael@0: new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true); michael@0: // Set up the forwards double region michael@0: new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]); michael@0: // Set up the forwards 32 bit region michael@0: new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]); michael@0: for (int i = 0; i < 4; i++) { michael@0: if (pools_[i].poolData == nullptr) { michael@0: m_buffer.fail_oom(); michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: michael@0: static Condition InvertCondition(Condition cond); michael@0: michael@0: // MacroAssemblers hold onto gcthings, so they are traced by the GC. michael@0: void trace(JSTracer *trc); michael@0: void writeRelocation(BufferOffset src) { michael@0: tmpJumpRelocations_.append(src); michael@0: } michael@0: michael@0: // As opposed to x86/x64 version, the data relocation has to be executed michael@0: // before to recover the pointer, and not after. michael@0: void writeDataRelocation(const ImmGCPtr &ptr) { michael@0: if (ptr.value) michael@0: tmpDataRelocations_.append(nextOffset()); michael@0: } michael@0: void writePrebarrierOffset(CodeOffsetLabel label) { michael@0: tmpPreBarriers_.append(BufferOffset(label.offset())); michael@0: } michael@0: michael@0: enum RelocBranchStyle { michael@0: B_MOVWT, michael@0: B_LDR_BX, michael@0: B_LDR, michael@0: B_MOVW_ADD michael@0: }; michael@0: michael@0: enum RelocStyle { michael@0: L_MOVWT, michael@0: L_LDR michael@0: }; michael@0: michael@0: public: michael@0: // Given the start of a Control Flow sequence, grab the value that is finally branched to michael@0: // given the start of a function that loads an address into a register get the address that michael@0: // ends up in the register. michael@0: template michael@0: static const uint32_t * getCF32Target(Iter *iter); michael@0: michael@0: static uintptr_t getPointer(uint8_t *); michael@0: template michael@0: static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr); michael@0: michael@0: bool oom() const; michael@0: michael@0: void setPrinter(Sprinter *sp) { michael@0: } michael@0: michael@0: private: michael@0: bool isFinished; michael@0: public: michael@0: void finish(); michael@0: void executableCopy(void *buffer); michael@0: void copyJumpRelocationTable(uint8_t *dest); michael@0: void copyDataRelocationTable(uint8_t *dest); michael@0: void copyPreBarrierTable(uint8_t *dest); michael@0: michael@0: bool addCodeLabel(CodeLabel label); michael@0: size_t numCodeLabels() const { michael@0: return codeLabels_.length(); michael@0: } michael@0: CodeLabel codeLabel(size_t i) { michael@0: return codeLabels_[i]; michael@0: } michael@0: michael@0: // Size of the instruction stream, in bytes. michael@0: size_t size() const; michael@0: // Size of the jump relocation table, in bytes. michael@0: size_t jumpRelocationTableBytes() const; michael@0: size_t dataRelocationTableBytes() const; michael@0: size_t preBarrierTableBytes() const; michael@0: michael@0: // Size of the data table, in bytes. michael@0: size_t bytesNeeded() const; michael@0: michael@0: // Write a blob of binary into the instruction stream *OR* michael@0: // into a destination address. If dest is nullptr (the default), then the michael@0: // instruction gets written into the instruction stream. If dest is not null michael@0: // it is interpreted as a pointer to the location that we want the michael@0: // instruction to be written. michael@0: BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr); michael@0: // A static variant for the cases where we don't want to have an assembler michael@0: // object at all. Normally, you would use the dummy (nullptr) object. michael@0: static void writeInstStatic(uint32_t x, uint32_t *dest); michael@0: michael@0: public: michael@0: void writeCodePointer(AbsoluteLabel *label); michael@0: michael@0: BufferOffset align(int alignment); michael@0: BufferOffset as_nop(); michael@0: BufferOffset as_alu(Register dest, Register src1, Operand2 op2, michael@0: ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr); michael@0: michael@0: BufferOffset as_mov(Register dest, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr); michael@0: BufferOffset as_mvn(Register dest, Operand2 op2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: // logical operations michael@0: BufferOffset as_and(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_bic(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_eor(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_orr(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: // mathematical operations michael@0: BufferOffset as_adc(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_add(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_sbc(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_sub(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_rsb(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_rsc(Register dest, Register src1, michael@0: Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always); michael@0: // test operations michael@0: BufferOffset as_cmn(Register src1, Operand2 op2, michael@0: Condition c = Always); michael@0: BufferOffset as_cmp(Register src1, Operand2 op2, michael@0: Condition c = Always); michael@0: BufferOffset as_teq(Register src1, Operand2 op2, michael@0: Condition c = Always); michael@0: BufferOffset as_tst(Register src1, Operand2 op2, michael@0: Condition c = Always); michael@0: michael@0: // Not quite ALU worthy, but useful none the less: michael@0: // These also have the isue of these being formatted michael@0: // completly differently from the standard ALU operations. michael@0: BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr); michael@0: BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr); michael@0: michael@0: BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn, michael@0: MULOp op, SetCond_ sc, Condition c = Always); michael@0: BufferOffset as_mul(Register dest, Register src1, Register src2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_umaal(Register dest1, Register dest2, Register src1, Register src2, michael@0: Condition c = Always); michael@0: BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2, michael@0: Condition c = Always); michael@0: BufferOffset as_umull(Register dest1, Register dest2, Register src1, Register src2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_umlal(Register dest1, Register dest2, Register src1, Register src2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_smull(Register dest1, Register dest2, Register src1, Register src2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: BufferOffset as_smlal(Register dest1, Register dest2, Register src1, Register src2, michael@0: SetCond_ sc = NoSetCond, Condition c = Always); michael@0: michael@0: BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always); michael@0: BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always); michael@0: michael@0: // Data transfer instructions: ldr, str, ldrb, strb. michael@0: // Using an int to differentiate between 8 bits and 32 bits is michael@0: // overkill, but meh michael@0: BufferOffset as_dtr(LoadStore ls, int size, Index mode, michael@0: Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr); michael@0: // Handles all of the other integral data transferring functions: michael@0: // ldrsb, ldrsh, ldrd, etc. michael@0: // size is given in bits. michael@0: BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode, michael@0: Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr); michael@0: michael@0: BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask, michael@0: DTMMode mode, DTMWriteBack wb, Condition c = Always); michael@0: //overwrite a pool entry with new data. michael@0: void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data); michael@0: // load a 32 bit immediate from a pool into a register michael@0: BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always); michael@0: // make a patchable jump that can target the entire 32 bit address space. michael@0: BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always); michael@0: michael@0: // load a 64 bit floating point immediate from a pool into a register michael@0: BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always); michael@0: // load a 32 bit floating point immediate from a pool into a register michael@0: BufferOffset as_FImm32Pool(VFPRegister dest, float value, Condition c = Always); michael@0: michael@0: // Control flow stuff: michael@0: michael@0: // bx can *only* branch to a register michael@0: // never to an immediate. michael@0: BufferOffset as_bx(Register r, Condition c = Always, bool isPatchable = false); michael@0: michael@0: // Branch can branch to an immediate *or* to a register. michael@0: // Branches to immediates are pc relative, branches to registers michael@0: // are absolute michael@0: BufferOffset as_b(BOffImm off, Condition c, bool isPatchable = false); michael@0: michael@0: BufferOffset as_b(Label *l, Condition c = Always, bool isPatchable = false); michael@0: BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst); michael@0: michael@0: // blx can go to either an immediate or a register. michael@0: // When blx'ing to a register, we change processor mode michael@0: // depending on the low bit of the register michael@0: // when blx'ing to an immediate, we *always* change processor state. michael@0: BufferOffset as_blx(Label *l); michael@0: michael@0: BufferOffset as_blx(Register r, Condition c = Always); michael@0: BufferOffset as_bl(BOffImm off, Condition c); michael@0: // bl can only branch+link to an immediate, never to a register michael@0: // it never changes processor state michael@0: BufferOffset as_bl(); michael@0: // bl #imm can have a condition code, blx #imm cannot. michael@0: // blx reg can be conditional. michael@0: BufferOffset as_bl(Label *l, Condition c); michael@0: BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst); michael@0: michael@0: BufferOffset as_mrs(Register r, Condition c = Always); michael@0: BufferOffset as_msr(Register r, Condition c = Always); michael@0: // VFP instructions! michael@0: private: michael@0: michael@0: enum vfp_size { michael@0: isDouble = 1 << 8, michael@0: isSingle = 0 << 8 michael@0: }; michael@0: michael@0: BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr); michael@0: // Unityped variants: all registers hold the same (ieee754 single/double) michael@0: // notably not included are vcvt; vmov vd, #imm; vmov rt, vn. michael@0: BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: VFPOp op, Condition c = Always); michael@0: michael@0: public: michael@0: BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always); michael@0: michael@0: BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always); michael@0: michael@0: BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always); michael@0: michael@0: BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c = Always); michael@0: michael@0: BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm, michael@0: Condition c = Always); michael@0: BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always); michael@0: michael@0: // specifically, a move between two same sized-registers michael@0: BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always); michael@0: /*xfer between Core and VFP*/ michael@0: enum FloatToCore_ { michael@0: FloatToCore = 1 << 20, michael@0: CoreToFloat = 0 << 20 michael@0: }; michael@0: michael@0: private: michael@0: enum VFPXferSize { michael@0: WordTransfer = 0x02000010, michael@0: DoubleTransfer = 0x00400010 michael@0: }; michael@0: michael@0: public: michael@0: // Unlike the next function, moving between the core registers and vfp michael@0: // registers can't be *that* properly typed. Namely, since I don't want to michael@0: // munge the type VFPRegister to also include core registers. Thus, the core michael@0: // and vfp registers are passed in based on their type, and src/dest is michael@0: // determined by the float2core. michael@0: michael@0: BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c, michael@0: Condition c = Always, int idx = 0); michael@0: michael@0: // our encoding actually allows just the src and the dest (and theiyr types) michael@0: // to uniquely specify the encoding that we are going to use. michael@0: BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false, michael@0: Condition c = Always); michael@0: // hard coded to a 32 bit fixed width result for now michael@0: BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always); michael@0: michael@0: /* xfer between VFP and memory*/ michael@0: BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr, michael@0: Condition c = Always /* vfp doesn't have a wb option*/, michael@0: uint32_t *dest = nullptr); michael@0: michael@0: // VFP's ldm/stm work differently from the standard arm ones. michael@0: // You can only transfer a range michael@0: michael@0: BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length, michael@0: /*also has update conditions*/Condition c = Always); michael@0: michael@0: BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always); michael@0: michael@0: BufferOffset as_vmrs(Register r, Condition c = Always); michael@0: BufferOffset as_vmsr(Register r, Condition c = Always); michael@0: // label operations michael@0: bool nextLink(BufferOffset b, BufferOffset *next); michael@0: void bind(Label *label, BufferOffset boff = BufferOffset()); michael@0: void bind(RepatchLabel *label); michael@0: uint32_t currentOffset() { michael@0: return nextOffset().getOffset(); michael@0: } michael@0: void retarget(Label *label, Label *target); michael@0: // I'm going to pretend this doesn't exist for now. michael@0: void retarget(Label *label, void *target, Relocation::Kind reloc); michael@0: michael@0: void Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address); michael@0: michael@0: // See Bind michael@0: size_t labelOffsetToPatchOffset(size_t offset) { michael@0: return actualOffset(offset); michael@0: } michael@0: michael@0: void call(Label *label); michael@0: void call(void *target); michael@0: michael@0: void as_bkpt(); michael@0: michael@0: public: michael@0: static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader); michael@0: static void TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader); michael@0: michael@0: protected: michael@0: void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) { michael@0: enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind)); michael@0: if (kind == Relocation::JITCODE) michael@0: writeRelocation(src); michael@0: } michael@0: michael@0: public: michael@0: // The buffer is about to be linked, make sure any constant pools or excess michael@0: // bookkeeping has been flushed to the instruction stream. michael@0: void flush() { michael@0: JS_ASSERT(!isFinished); michael@0: m_buffer.flushPool(); michael@0: return; michael@0: } michael@0: michael@0: // Copy the assembly code to the given buffer, and perform any pending michael@0: // relocations relying on the target address. michael@0: void executableCopy(uint8_t *buffer); michael@0: michael@0: // Actual assembly emitting functions. michael@0: michael@0: // Since I can't think of a reasonable default for the mode, I'm going to michael@0: // leave it as a required argument. michael@0: void startDataTransferM(LoadStore ls, Register rm, michael@0: DTMMode mode, DTMWriteBack update = NoWriteBack, michael@0: Condition c = Always) michael@0: { michael@0: JS_ASSERT(!dtmActive); michael@0: dtmUpdate = update; michael@0: dtmBase = rm; michael@0: dtmLoadStore = ls; michael@0: dtmLastReg = -1; michael@0: dtmRegBitField = 0; michael@0: dtmActive = 1; michael@0: dtmCond = c; michael@0: dtmMode = mode; michael@0: } michael@0: michael@0: void transferReg(Register rn) { michael@0: JS_ASSERT(dtmActive); michael@0: JS_ASSERT(rn.code() > dtmLastReg); michael@0: dtmRegBitField |= 1 << rn.code(); michael@0: if (dtmLoadStore == IsLoad && rn.code() == 13 && dtmBase.code() == 13) { michael@0: MOZ_ASSUME_UNREACHABLE("ARM Spec says this is invalid"); michael@0: } michael@0: } michael@0: void finishDataTransfer() { michael@0: dtmActive = false; michael@0: as_dtm(dtmLoadStore, dtmBase, dtmRegBitField, dtmMode, dtmUpdate, dtmCond); michael@0: } michael@0: michael@0: void startFloatTransferM(LoadStore ls, Register rm, michael@0: DTMMode mode, DTMWriteBack update = NoWriteBack, michael@0: Condition c = Always) michael@0: { michael@0: JS_ASSERT(!dtmActive); michael@0: dtmActive = true; michael@0: dtmUpdate = update; michael@0: dtmLoadStore = ls; michael@0: dtmBase = rm; michael@0: dtmCond = c; michael@0: dtmLastReg = -1; michael@0: dtmMode = mode; michael@0: dtmDelta = 0; michael@0: } michael@0: void transferFloatReg(VFPRegister rn) michael@0: { michael@0: if (dtmLastReg == -1) { michael@0: vdtmFirstReg = rn.code(); michael@0: } else { michael@0: if (dtmDelta == 0) { michael@0: dtmDelta = rn.code() - dtmLastReg; michael@0: JS_ASSERT(dtmDelta == 1 || dtmDelta == -1); michael@0: } michael@0: JS_ASSERT(dtmLastReg >= 0); michael@0: JS_ASSERT(rn.code() == unsigned(dtmLastReg) + dtmDelta); michael@0: } michael@0: dtmLastReg = rn.code(); michael@0: } michael@0: void finishFloatTransfer() { michael@0: JS_ASSERT(dtmActive); michael@0: dtmActive = false; michael@0: JS_ASSERT(dtmLastReg != -1); michael@0: dtmDelta = dtmDelta ? dtmDelta : 1; michael@0: // fencepost problem. michael@0: int len = dtmDelta * (dtmLastReg - vdtmFirstReg) + 1; michael@0: as_vdtm(dtmLoadStore, dtmBase, michael@0: VFPRegister(FloatRegister::FromCode(Min(vdtmFirstReg, dtmLastReg))), michael@0: len, dtmCond); michael@0: } michael@0: michael@0: private: michael@0: int dtmRegBitField; michael@0: int vdtmFirstReg; michael@0: int dtmLastReg; michael@0: int dtmDelta; michael@0: Register dtmBase; michael@0: DTMWriteBack dtmUpdate; michael@0: DTMMode dtmMode; michael@0: LoadStore dtmLoadStore; michael@0: bool dtmActive; michael@0: Condition dtmCond; michael@0: michael@0: public: michael@0: enum { michael@0: padForAlign8 = (int)0x00, michael@0: padForAlign16 = (int)0x0000, michael@0: padForAlign32 = (int)0xe12fff7f // 'bkpt 0xffff' michael@0: }; michael@0: michael@0: // API for speaking with the IonAssemblerBufferWithConstantPools michael@0: // generate an initial placeholder instruction that we want to later fix up michael@0: static void insertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token); michael@0: // take the stub value that was written in before, and write in an actual load michael@0: // using the index we'd computed previously as well as the address of the pool start. michael@0: static bool patchConstantPoolLoad(void* loadAddr, void* constPoolAddr); michael@0: // this is a callback for when we have filled a pool, and MUST flush it now. michael@0: // The pool requires the assembler to place a branch past the pool, and it michael@0: // calls this function. michael@0: static uint32_t placeConstantPoolBarrier(int offset); michael@0: // END API michael@0: michael@0: // move our entire pool into the instruction stream michael@0: // This is to force an opportunistic dump of the pool, prefferably when it michael@0: // is more convenient to do a dump. michael@0: void dumpPool(); michael@0: void flushBuffer(); michael@0: void enterNoPool(); michael@0: void leaveNoPool(); michael@0: // this should return a BOffImm, but I didn't want to require everyplace that used the michael@0: // AssemblerBuffer to make that class. michael@0: static ptrdiff_t getBranchOffset(const Instruction *i); michael@0: static void retargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true); michael@0: static void retargetNearBranch(Instruction *i, int offset, bool final = true); michael@0: static void retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond); michael@0: michael@0: static void writePoolHeader(uint8_t *start, Pool *p, bool isNatural); michael@0: static void writePoolFooter(uint8_t *start, Pool *p, bool isNatural); michael@0: static void writePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest); michael@0: michael@0: michael@0: static uint32_t patchWrite_NearCallSize(); michael@0: static uint32_t nopSize() { return 4; } michael@0: static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); michael@0: static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, michael@0: PatchedImmPtr expectedValue); michael@0: static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, michael@0: ImmPtr expectedValue); michael@0: static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm); michael@0: static uint32_t alignDoubleArg(uint32_t offset) { michael@0: return (offset+1)&~1; michael@0: } michael@0: static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr); michael@0: // Toggle a jmp or cmp emitted by toggledJump(). michael@0: michael@0: static void ToggleToJmp(CodeLocationLabel inst_); michael@0: static void ToggleToCmp(CodeLocationLabel inst_); michael@0: michael@0: static void ToggleCall(CodeLocationLabel inst_, bool enabled); michael@0: michael@0: static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst); michael@0: void processCodeLabels(uint8_t *rawCode); michael@0: bool bailed() { michael@0: return m_buffer.bail(); michael@0: } michael@0: }; // Assembler michael@0: michael@0: // An Instruction is a structure for both encoding and decoding any and all ARM instructions. michael@0: // many classes have not been implemented thusfar. michael@0: class Instruction michael@0: { michael@0: uint32_t data; michael@0: michael@0: protected: michael@0: // This is not for defaulting to always, this is for instructions that michael@0: // cannot be made conditional, and have the usually invalid 4b1111 cond field michael@0: Instruction (uint32_t data_, bool fake = false) : data(data_ | 0xf0000000) { michael@0: JS_ASSERT (fake || ((data_ & 0xf0000000) == 0)); michael@0: } michael@0: // Standard constructor michael@0: Instruction (uint32_t data_, Assembler::Condition c) : data(data_ | (uint32_t) c) { michael@0: JS_ASSERT ((data_ & 0xf0000000) == 0); michael@0: } michael@0: // You should never create an instruction directly. You should create a michael@0: // more specific instruction which will eventually call one of these michael@0: // constructors for you. michael@0: public: michael@0: uint32_t encode() const { michael@0: return data; michael@0: } michael@0: // Check if this instruction is really a particular case michael@0: template michael@0: bool is() const { return C::isTHIS(*this); } michael@0: michael@0: // safely get a more specific variant of this pointer michael@0: template michael@0: C *as() const { return C::asTHIS(*this); } michael@0: michael@0: const Instruction & operator=(const Instruction &src) { michael@0: data = src.data; michael@0: return *this; michael@0: } michael@0: // Since almost all instructions have condition codes, the condition michael@0: // code extractor resides in the base class. michael@0: void extractCond(Assembler::Condition *c) { michael@0: if (data >> 28 != 0xf ) michael@0: *c = (Assembler::Condition)(data & 0xf0000000); michael@0: } michael@0: // Get the next instruction in the instruction stream. michael@0: // This does neat things like ignoreconstant pools and their guards. michael@0: Instruction *next(); michael@0: michael@0: // Sometimes, an api wants a uint32_t (or a pointer to it) rather than michael@0: // an instruction. raw() just coerces this into a pointer to a uint32_t michael@0: const uint32_t *raw() const { return &data; } michael@0: uint32_t size() const { return 4; } michael@0: }; // Instruction michael@0: michael@0: // make sure that it is the right size michael@0: JS_STATIC_ASSERT(sizeof(Instruction) == 4); michael@0: michael@0: // Data Transfer Instructions michael@0: class InstDTR : public Instruction michael@0: { michael@0: public: michael@0: enum IsByte_ { michael@0: IsByte = 0x00400000, michael@0: IsWord = 0x00000000 michael@0: }; michael@0: static const int IsDTR = 0x04000000; michael@0: static const int IsDTRMask = 0x0c000000; michael@0: michael@0: // TODO: Replace the initialization with something that is safer. michael@0: InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr, Assembler::Condition c) michael@0: : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c) michael@0: { } michael@0: michael@0: static bool isTHIS(const Instruction &i); michael@0: static InstDTR *asTHIS(const Instruction &i); michael@0: michael@0: }; michael@0: JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction)); michael@0: michael@0: class InstLDR : public InstDTR michael@0: { michael@0: public: michael@0: InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c) michael@0: : InstDTR(IsLoad, IsWord, mode, rt, addr, c) michael@0: { } michael@0: static bool isTHIS(const Instruction &i); michael@0: static InstLDR *asTHIS(const Instruction &i); michael@0: michael@0: }; michael@0: JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR)); michael@0: michael@0: class InstNOP : public Instruction michael@0: { michael@0: static const uint32_t NopInst = 0x0320f000; michael@0: michael@0: public: michael@0: InstNOP() michael@0: : Instruction(NopInst, Assembler::Always) michael@0: { } michael@0: michael@0: static bool isTHIS(const Instruction &i); michael@0: static InstNOP *asTHIS(Instruction &i); michael@0: }; michael@0: michael@0: // Branching to a register, or calling a register michael@0: class InstBranchReg : public Instruction michael@0: { michael@0: protected: michael@0: // Don't use BranchTag yourself, use a derived instruction. michael@0: enum BranchTag { michael@0: IsBX = 0x012fff10, michael@0: IsBLX = 0x012fff30 michael@0: }; michael@0: static const uint32_t IsBRegMask = 0x0ffffff0; michael@0: InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c) michael@0: : Instruction(tag | rm.code(), c) michael@0: { } michael@0: public: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstBranchReg *asTHIS (const Instruction &i); michael@0: // Get the register that is being branched to michael@0: void extractDest(Register *dest); michael@0: // Make sure we are branching to a pre-known register michael@0: bool checkDest(Register dest); michael@0: }; michael@0: JS_STATIC_ASSERT(sizeof(InstBranchReg) == sizeof(Instruction)); michael@0: michael@0: // Branching to an immediate offset, or calling an immediate offset michael@0: class InstBranchImm : public Instruction michael@0: { michael@0: protected: michael@0: enum BranchTag { michael@0: IsB = 0x0a000000, michael@0: IsBL = 0x0b000000 michael@0: }; michael@0: static const uint32_t IsBImmMask = 0x0f000000; michael@0: michael@0: InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c) michael@0: : Instruction(tag | off.encode(), c) michael@0: { } michael@0: michael@0: public: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstBranchImm *asTHIS (const Instruction &i); michael@0: void extractImm(BOffImm *dest); michael@0: }; michael@0: JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction)); michael@0: michael@0: // Very specific branching instructions. michael@0: class InstBXReg : public InstBranchReg michael@0: { michael@0: public: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstBXReg *asTHIS (const Instruction &i); michael@0: }; michael@0: class InstBLXReg : public InstBranchReg michael@0: { michael@0: public: michael@0: InstBLXReg(Register reg, Assembler::Condition c) michael@0: : InstBranchReg(IsBLX, reg, c) michael@0: { } michael@0: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstBLXReg *asTHIS (const Instruction &i); michael@0: }; michael@0: class InstBImm : public InstBranchImm michael@0: { michael@0: public: michael@0: InstBImm(BOffImm off, Assembler::Condition c) michael@0: : InstBranchImm(IsB, off, c) michael@0: { } michael@0: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstBImm *asTHIS (const Instruction &i); michael@0: }; michael@0: class InstBLImm : public InstBranchImm michael@0: { michael@0: public: michael@0: InstBLImm(BOffImm off, Assembler::Condition c) michael@0: : InstBranchImm(IsBL, off, c) michael@0: { } michael@0: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstBLImm *asTHIS (Instruction &i); michael@0: }; michael@0: michael@0: // Both movw and movt. The layout of both the immediate and the destination michael@0: // register is the same so the code is being shared. michael@0: class InstMovWT : public Instruction michael@0: { michael@0: protected: michael@0: enum WT { michael@0: IsW = 0x03000000, michael@0: IsT = 0x03400000 michael@0: }; michael@0: static const uint32_t IsWTMask = 0x0ff00000; michael@0: michael@0: InstMovWT(Register rd, Imm16 imm, WT wt, Assembler::Condition c) michael@0: : Instruction (RD(rd) | imm.encode() | wt, c) michael@0: { } michael@0: michael@0: public: michael@0: void extractImm(Imm16 *dest); michael@0: void extractDest(Register *dest); michael@0: bool checkImm(Imm16 dest); michael@0: bool checkDest(Register dest); michael@0: michael@0: static bool isTHIS (Instruction &i); michael@0: static InstMovWT *asTHIS (Instruction &i); michael@0: michael@0: }; michael@0: JS_STATIC_ASSERT(sizeof(InstMovWT) == sizeof(Instruction)); michael@0: michael@0: class InstMovW : public InstMovWT michael@0: { michael@0: public: michael@0: InstMovW (Register rd, Imm16 imm, Assembler::Condition c) michael@0: : InstMovWT(rd, imm, IsW, c) michael@0: { } michael@0: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstMovW *asTHIS (const Instruction &i); michael@0: }; michael@0: michael@0: class InstMovT : public InstMovWT michael@0: { michael@0: public: michael@0: InstMovT (Register rd, Imm16 imm, Assembler::Condition c) michael@0: : InstMovWT(rd, imm, IsT, c) michael@0: { } michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstMovT *asTHIS (const Instruction &i); michael@0: }; michael@0: michael@0: class InstALU : public Instruction michael@0: { michael@0: static const int32_t ALUMask = 0xc << 24; michael@0: public: michael@0: InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c) michael@0: : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c) michael@0: { } michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstALU *asTHIS (const Instruction &i); michael@0: void extractOp(ALUOp *ret); michael@0: bool checkOp(ALUOp op); michael@0: void extractDest(Register *ret); michael@0: bool checkDest(Register rd); michael@0: void extractOp1(Register *ret); michael@0: bool checkOp1(Register rn); michael@0: Operand2 extractOp2(); michael@0: }; michael@0: michael@0: class InstCMP : public InstALU michael@0: { michael@0: public: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstCMP *asTHIS (const Instruction &i); michael@0: }; michael@0: michael@0: class InstMOV : public InstALU michael@0: { michael@0: public: michael@0: static bool isTHIS (const Instruction &i); michael@0: static InstMOV *asTHIS (const Instruction &i); michael@0: }; michael@0: michael@0: michael@0: class InstructionIterator { michael@0: private: michael@0: Instruction *i; michael@0: public: michael@0: InstructionIterator(Instruction *i_); michael@0: Instruction *next() { michael@0: i = i->next(); michael@0: return cur(); michael@0: } michael@0: Instruction *cur() const { michael@0: return i; michael@0: } michael@0: }; michael@0: michael@0: static const uint32_t NumIntArgRegs = 4; michael@0: static const uint32_t NumFloatArgRegs = 8; michael@0: michael@0: static inline bool michael@0: GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out) michael@0: { michael@0: if (usedIntArgs >= NumIntArgRegs) michael@0: return false; michael@0: *out = Register::FromCode(usedIntArgs); michael@0: return true; michael@0: } michael@0: michael@0: // Get a register in which we plan to put a quantity that will be used as an michael@0: // integer argument. This differs from GetIntArgReg in that if we have no more michael@0: // actual argument registers to use we will fall back on using whatever michael@0: // CallTempReg* don't overlap the argument registers, and only fail once those michael@0: // run out too. michael@0: static inline bool michael@0: GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out) michael@0: { michael@0: if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) michael@0: return true; michael@0: // Unfortunately, we have to assume things about the point at which michael@0: // GetIntArgReg returns false, because we need to know how many registers it michael@0: // can allocate. michael@0: usedIntArgs -= NumIntArgRegs; michael@0: if (usedIntArgs >= NumCallTempNonArgRegs) michael@0: return false; michael@0: *out = CallTempNonArgRegs[usedIntArgs]; michael@0: return true; michael@0: } michael@0: michael@0: michael@0: #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) michael@0: michael@0: static inline uint32_t michael@0: GetArgStackDisp(uint32_t arg) michael@0: { michael@0: JS_ASSERT(!useHardFpABI()); michael@0: JS_ASSERT(arg >= NumIntArgRegs); michael@0: return (arg - NumIntArgRegs) * sizeof(intptr_t); michael@0: } michael@0: michael@0: #endif michael@0: michael@0: michael@0: #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) michael@0: michael@0: static inline bool michael@0: GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister *out) michael@0: { michael@0: JS_ASSERT(useHardFpABI()); michael@0: if (usedFloatArgs >= NumFloatArgRegs) michael@0: return false; michael@0: *out = FloatRegister::FromCode(usedFloatArgs); michael@0: return true; michael@0: } michael@0: michael@0: static inline uint32_t michael@0: GetIntArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding) michael@0: { michael@0: JS_ASSERT(useHardFpABI()); michael@0: JS_ASSERT(usedIntArgs >= NumIntArgRegs); michael@0: uint32_t doubleSlots = Max(0, (int32_t)usedFloatArgs - (int32_t)NumFloatArgRegs); michael@0: doubleSlots *= 2; michael@0: int intSlots = usedIntArgs - NumIntArgRegs; michael@0: return (intSlots + doubleSlots + *padding) * sizeof(intptr_t); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: GetFloat32ArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding) michael@0: { michael@0: JS_ASSERT(useHardFpABI()); michael@0: JS_ASSERT(usedFloatArgs >= NumFloatArgRegs); michael@0: uint32_t intSlots = 0; michael@0: if (usedIntArgs > NumIntArgRegs) michael@0: intSlots = usedIntArgs - NumIntArgRegs; michael@0: uint32_t float32Slots = usedFloatArgs - NumFloatArgRegs; michael@0: return (intSlots + float32Slots + *padding) * sizeof(intptr_t); michael@0: } michael@0: michael@0: static inline uint32_t michael@0: GetDoubleArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding) michael@0: { michael@0: JS_ASSERT(useHardFpABI()); michael@0: JS_ASSERT(usedFloatArgs >= NumFloatArgRegs); michael@0: uint32_t intSlots = 0; michael@0: if (usedIntArgs > NumIntArgRegs) { michael@0: intSlots = usedIntArgs - NumIntArgRegs; michael@0: // update the amount of padding required. michael@0: *padding += (*padding + usedIntArgs) % 2; michael@0: } michael@0: uint32_t doubleSlots = usedFloatArgs - NumFloatArgRegs; michael@0: doubleSlots *= 2; michael@0: return (intSlots + doubleSlots + *padding) * sizeof(intptr_t); michael@0: } michael@0: michael@0: #endif michael@0: michael@0: michael@0: michael@0: class DoubleEncoder { michael@0: uint32_t rep(bool b, uint32_t count) { michael@0: uint32_t ret = 0; michael@0: for (uint32_t i = 0; i < count; i++) michael@0: ret = (ret << 1) | b; michael@0: return ret; michael@0: } michael@0: michael@0: uint32_t encode(uint8_t value) { michael@0: //ARM ARM "VFP modified immediate constants" michael@0: // aBbbbbbb bbcdefgh 000... michael@0: // we want to return the top 32 bits of the double michael@0: // the rest are 0. michael@0: bool a = value >> 7; michael@0: bool b = value >> 6 & 1; michael@0: bool B = !b; michael@0: uint32_t cdefgh = value & 0x3f; michael@0: return a << 31 | michael@0: B << 30 | michael@0: rep(b, 8) << 22 | michael@0: cdefgh << 16; michael@0: } michael@0: michael@0: struct DoubleEntry michael@0: { michael@0: uint32_t dblTop; michael@0: datastore::Imm8VFPImmData data; michael@0: michael@0: DoubleEntry() michael@0: : dblTop(-1) michael@0: { } michael@0: DoubleEntry(uint32_t dblTop_, datastore::Imm8VFPImmData data_) michael@0: : dblTop(dblTop_), data(data_) michael@0: { } michael@0: }; michael@0: michael@0: mozilla::Array table; michael@0: michael@0: public: michael@0: DoubleEncoder() michael@0: { michael@0: for (int i = 0; i < 256; i++) { michael@0: table[i] = DoubleEntry(encode(i), datastore::Imm8VFPImmData(i)); michael@0: } michael@0: } michael@0: michael@0: bool lookup(uint32_t top, datastore::Imm8VFPImmData *ret) { michael@0: for (int i = 0; i < 256; i++) { michael@0: if (table[i].dblTop == top) { michael@0: *ret = table[i].data; michael@0: return true; michael@0: } michael@0: } michael@0: return false; michael@0: } michael@0: }; michael@0: michael@0: class AutoForbidPools { michael@0: Assembler *masm_; michael@0: public: michael@0: AutoForbidPools(Assembler *masm) : masm_(masm) { michael@0: masm_->enterNoPool(); michael@0: } michael@0: ~AutoForbidPools() { michael@0: masm_->leaveNoPool(); michael@0: } michael@0: }; michael@0: michael@0: } // namespace jit michael@0: } // namespace js michael@0: michael@0: #endif /* jit_arm_Assembler_arm_h */