michael@0: /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- michael@0: * vim: set ts=8 sts=4 et sw=4 tw=99: michael@0: * This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #include "jit/arm/Assembler-arm.h" michael@0: michael@0: #include "mozilla/DebugOnly.h" michael@0: #include "mozilla/MathAlgorithms.h" michael@0: michael@0: #include "jscompartment.h" michael@0: #include "jsutil.h" michael@0: michael@0: #include "assembler/jit/ExecutableAllocator.h" michael@0: #include "gc/Marking.h" michael@0: #include "jit/arm/MacroAssembler-arm.h" michael@0: #include "jit/JitCompartment.h" michael@0: michael@0: using namespace js; michael@0: using namespace js::jit; michael@0: michael@0: using mozilla::CountLeadingZeroes32; michael@0: michael@0: // Note this is used for inter-AsmJS calls and may pass arguments and results michael@0: // in floating point registers even if the system ABI does not. michael@0: ABIArgGenerator::ABIArgGenerator() : michael@0: intRegIndex_(0), michael@0: floatRegIndex_(0), michael@0: stackOffset_(0), michael@0: current_() michael@0: {} michael@0: michael@0: ABIArg michael@0: ABIArgGenerator::next(MIRType type) michael@0: { michael@0: switch (type) { michael@0: case MIRType_Int32: michael@0: case MIRType_Pointer: michael@0: if (intRegIndex_ == NumIntArgRegs) { michael@0: current_ = ABIArg(stackOffset_); michael@0: stackOffset_ += sizeof(uint32_t); michael@0: break; michael@0: } michael@0: current_ = ABIArg(Register::FromCode(intRegIndex_)); michael@0: intRegIndex_++; michael@0: break; michael@0: case MIRType_Float32: michael@0: case MIRType_Double: michael@0: if (floatRegIndex_ == NumFloatArgRegs) { michael@0: static const int align = sizeof(double) - 1; michael@0: stackOffset_ = (stackOffset_ + align) & ~align; michael@0: current_ = ABIArg(stackOffset_); michael@0: stackOffset_ += sizeof(uint64_t); michael@0: break; michael@0: } michael@0: current_ = ABIArg(FloatRegister::FromCode(floatRegIndex_)); michael@0: floatRegIndex_++; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); michael@0: } michael@0: michael@0: return current_; michael@0: } michael@0: const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4; michael@0: const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5; michael@0: michael@0: // Encode a standard register when it is being used as src1, the dest, and michael@0: // an extra register. These should never be called with an InvalidReg. michael@0: uint32_t michael@0: js::jit::RT(Register r) michael@0: { michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 12; michael@0: } michael@0: michael@0: uint32_t michael@0: js::jit::RN(Register r) michael@0: { michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 16; michael@0: } michael@0: michael@0: uint32_t michael@0: js::jit::RD(Register r) michael@0: { michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 12; michael@0: } michael@0: michael@0: uint32_t michael@0: js::jit::RM(Register r) michael@0: { michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 8; michael@0: } michael@0: michael@0: // Encode a standard register when it is being used as src1, the dest, and michael@0: // an extra register. For these, an InvalidReg is used to indicate a optional michael@0: // register that has been omitted. michael@0: uint32_t michael@0: js::jit::maybeRT(Register r) michael@0: { michael@0: if (r == InvalidReg) michael@0: return 0; michael@0: michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 12; michael@0: } michael@0: michael@0: uint32_t michael@0: js::jit::maybeRN(Register r) michael@0: { michael@0: if (r == InvalidReg) michael@0: return 0; michael@0: michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 16; michael@0: } michael@0: michael@0: uint32_t michael@0: js::jit::maybeRD(Register r) michael@0: { michael@0: if (r == InvalidReg) michael@0: return 0; michael@0: michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return r.code() << 12; michael@0: } michael@0: michael@0: Register michael@0: js::jit::toRD(Instruction &i) michael@0: { michael@0: return Register::FromCode((i.encode()>>12) & 0xf); michael@0: } michael@0: Register michael@0: js::jit::toR(Instruction &i) michael@0: { michael@0: return Register::FromCode(i.encode() & 0xf); michael@0: } michael@0: michael@0: Register michael@0: js::jit::toRM(Instruction &i) michael@0: { michael@0: return Register::FromCode((i.encode()>>8) & 0xf); michael@0: } michael@0: michael@0: Register michael@0: js::jit::toRN(Instruction &i) michael@0: { michael@0: return Register::FromCode((i.encode()>>16) & 0xf); michael@0: } michael@0: michael@0: uint32_t michael@0: js::jit::VD(VFPRegister vr) michael@0: { michael@0: if (vr.isMissing()) michael@0: return 0; michael@0: michael@0: //bits 15,14,13,12, 22 michael@0: VFPRegister::VFPRegIndexSplit s = vr.encode(); michael@0: return s.bit << 22 | s.block << 12; michael@0: } michael@0: uint32_t michael@0: js::jit::VN(VFPRegister vr) michael@0: { michael@0: if (vr.isMissing()) michael@0: return 0; michael@0: michael@0: // bits 19,18,17,16, 7 michael@0: VFPRegister::VFPRegIndexSplit s = vr.encode(); michael@0: return s.bit << 7 | s.block << 16; michael@0: } michael@0: uint32_t michael@0: js::jit::VM(VFPRegister vr) michael@0: { michael@0: if (vr.isMissing()) michael@0: return 0; michael@0: michael@0: // bits 5, 3,2,1,0 michael@0: VFPRegister::VFPRegIndexSplit s = vr.encode(); michael@0: return s.bit << 5 | s.block; michael@0: } michael@0: michael@0: VFPRegister::VFPRegIndexSplit michael@0: jit::VFPRegister::encode() michael@0: { michael@0: JS_ASSERT(!_isInvalid); michael@0: michael@0: switch (kind) { michael@0: case Double: michael@0: return VFPRegIndexSplit(_code &0xf , _code >> 4); michael@0: case Single: michael@0: return VFPRegIndexSplit(_code >> 1, _code & 1); michael@0: default: michael@0: // vfp register treated as an integer, NOT a gpr michael@0: return VFPRegIndexSplit(_code >> 1, _code & 1); michael@0: } michael@0: } michael@0: michael@0: VFPRegister js::jit::NoVFPRegister(true); michael@0: michael@0: bool michael@0: InstDTR::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & IsDTRMask) == (uint32_t)IsDTR; michael@0: } michael@0: michael@0: InstDTR * michael@0: InstDTR::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstDTR*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstLDR::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & IsDTRMask) == (uint32_t)IsDTR; michael@0: } michael@0: michael@0: InstLDR * michael@0: InstLDR::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstLDR*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: InstNOP * michael@0: InstNOP::asTHIS(Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstNOP*) (&i); michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstNOP::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & 0x0fffffff) == NopInst; michael@0: } michael@0: michael@0: bool michael@0: InstBranchReg::isTHIS(const Instruction &i) michael@0: { michael@0: return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i); michael@0: } michael@0: michael@0: InstBranchReg * michael@0: InstBranchReg::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstBranchReg*)&i; michael@0: return nullptr; michael@0: } michael@0: void michael@0: InstBranchReg::extractDest(Register *dest) michael@0: { michael@0: *dest = toR(*this); michael@0: } michael@0: bool michael@0: InstBranchReg::checkDest(Register dest) michael@0: { michael@0: return dest == toR(*this); michael@0: } michael@0: michael@0: bool michael@0: InstBranchImm::isTHIS(const Instruction &i) michael@0: { michael@0: return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i); michael@0: } michael@0: michael@0: InstBranchImm * michael@0: InstBranchImm::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstBranchImm*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: void michael@0: InstBranchImm::extractImm(BOffImm *dest) michael@0: { michael@0: *dest = BOffImm(*this); michael@0: } michael@0: michael@0: bool michael@0: InstBXReg::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & IsBRegMask) == IsBX; michael@0: } michael@0: michael@0: InstBXReg * michael@0: InstBXReg::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstBXReg*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstBLXReg::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & IsBRegMask) == IsBLX; michael@0: michael@0: } michael@0: InstBLXReg * michael@0: InstBLXReg::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstBLXReg*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstBImm::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode () & IsBImmMask) == IsB; michael@0: } michael@0: InstBImm * michael@0: InstBImm::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstBImm*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstBLImm::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode () & IsBImmMask) == IsBL; michael@0: michael@0: } michael@0: InstBLImm * michael@0: InstBLImm::asTHIS(Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstBLImm*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstMovWT::isTHIS(Instruction &i) michael@0: { michael@0: return InstMovW::isTHIS(i) || InstMovT::isTHIS(i); michael@0: } michael@0: InstMovWT * michael@0: InstMovWT::asTHIS(Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstMovWT*)&i; michael@0: return nullptr; michael@0: } michael@0: michael@0: void michael@0: InstMovWT::extractImm(Imm16 *imm) michael@0: { michael@0: *imm = Imm16(*this); michael@0: } michael@0: bool michael@0: InstMovWT::checkImm(Imm16 imm) michael@0: { michael@0: return imm.decode() == Imm16(*this).decode(); michael@0: } michael@0: michael@0: void michael@0: InstMovWT::extractDest(Register *dest) michael@0: { michael@0: *dest = toRD(*this); michael@0: } michael@0: bool michael@0: InstMovWT::checkDest(Register dest) michael@0: { michael@0: return dest == toRD(*this); michael@0: } michael@0: michael@0: bool michael@0: InstMovW::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & IsWTMask) == IsW; michael@0: } michael@0: michael@0: InstMovW * michael@0: InstMovW::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstMovW*) (&i); michael@0: return nullptr; michael@0: } michael@0: InstMovT * michael@0: InstMovT::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstMovT*) (&i); michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstMovT::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & IsWTMask) == IsT; michael@0: } michael@0: michael@0: InstALU * michael@0: InstALU::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstALU*) (&i); michael@0: return nullptr; michael@0: } michael@0: bool michael@0: InstALU::isTHIS(const Instruction &i) michael@0: { michael@0: return (i.encode() & ALUMask) == 0; michael@0: } michael@0: void michael@0: InstALU::extractOp(ALUOp *ret) michael@0: { michael@0: *ret = ALUOp(encode() & (0xf << 21)); michael@0: } michael@0: bool michael@0: InstALU::checkOp(ALUOp op) michael@0: { michael@0: ALUOp mine; michael@0: extractOp(&mine); michael@0: return mine == op; michael@0: } michael@0: void michael@0: InstALU::extractDest(Register *ret) michael@0: { michael@0: *ret = toRD(*this); michael@0: } michael@0: bool michael@0: InstALU::checkDest(Register rd) michael@0: { michael@0: return rd == toRD(*this); michael@0: } michael@0: void michael@0: InstALU::extractOp1(Register *ret) michael@0: { michael@0: *ret = toRN(*this); michael@0: } michael@0: bool michael@0: InstALU::checkOp1(Register rn) michael@0: { michael@0: return rn == toRN(*this); michael@0: } michael@0: Operand2 michael@0: InstALU::extractOp2() michael@0: { michael@0: return Operand2(encode()); michael@0: } michael@0: michael@0: InstCMP * michael@0: InstCMP::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstCMP*) (&i); michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstCMP::isTHIS(const Instruction &i) michael@0: { michael@0: return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp); michael@0: } michael@0: michael@0: InstMOV * michael@0: InstMOV::asTHIS(const Instruction &i) michael@0: { michael@0: if (isTHIS(i)) michael@0: return (InstMOV*) (&i); michael@0: return nullptr; michael@0: } michael@0: michael@0: bool michael@0: InstMOV::isTHIS(const Instruction &i) michael@0: { michael@0: return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov); michael@0: } michael@0: michael@0: Op2Reg michael@0: Operand2::toOp2Reg() { michael@0: return *(Op2Reg*)this; michael@0: } michael@0: O2RegImmShift michael@0: Op2Reg::toO2RegImmShift() { michael@0: return *(O2RegImmShift*)this; michael@0: } michael@0: O2RegRegShift michael@0: Op2Reg::toO2RegRegShift() { michael@0: return *(O2RegRegShift*)this; michael@0: } michael@0: michael@0: Imm16::Imm16(Instruction &inst) michael@0: : lower(inst.encode() & 0xfff), michael@0: upper(inst.encode() >> 16), michael@0: invalid(0xfff) michael@0: { } michael@0: michael@0: Imm16::Imm16(uint32_t imm) michael@0: : lower(imm & 0xfff), pad(0), michael@0: upper((imm>>12) & 0xf), michael@0: invalid(0) michael@0: { michael@0: JS_ASSERT(decode() == imm); michael@0: } michael@0: michael@0: Imm16::Imm16() michael@0: : invalid(0xfff) michael@0: { } michael@0: michael@0: void michael@0: jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label) michael@0: { michael@0: // We need to determine if this jump can fit into the standard 24+2 bit address michael@0: // or if we need a larger branch (or just need to use our pool entry) michael@0: Instruction *jump = (Instruction*)jump_.raw(); michael@0: Assembler::Condition c; michael@0: jump->extractCond(&c); michael@0: JS_ASSERT(jump->is() || jump->is()); michael@0: michael@0: int jumpOffset = label.raw() - jump_.raw(); michael@0: if (BOffImm::isInRange(jumpOffset)) { michael@0: // This instruction started off as a branch, and will remain one michael@0: Assembler::retargetNearBranch(jump, jumpOffset, c); michael@0: } else { michael@0: // This instruction started off as a branch, but now needs to be demoted to an ldr. michael@0: uint8_t **slot = reinterpret_cast(jump_.jumpTableEntry()); michael@0: Assembler::retargetFarBranch(jump, slot, label.raw(), c); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Assembler::finish() michael@0: { michael@0: flush(); michael@0: JS_ASSERT(!isFinished); michael@0: isFinished = true; michael@0: michael@0: for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++) { michael@0: int offset = tmpDataRelocations_[i].getOffset(); michael@0: int real_offset = offset + m_buffer.poolSizeBefore(offset); michael@0: dataRelocations_.writeUnsigned(real_offset); michael@0: } michael@0: michael@0: for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) { michael@0: int offset = tmpJumpRelocations_[i].getOffset(); michael@0: int real_offset = offset + m_buffer.poolSizeBefore(offset); michael@0: jumpRelocations_.writeUnsigned(real_offset); michael@0: } michael@0: michael@0: for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++) { michael@0: int offset = tmpPreBarriers_[i].getOffset(); michael@0: int real_offset = offset + m_buffer.poolSizeBefore(offset); michael@0: preBarriers_.writeUnsigned(real_offset); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Assembler::executableCopy(uint8_t *buffer) michael@0: { michael@0: JS_ASSERT(isFinished); michael@0: m_buffer.executableCopy(buffer); michael@0: AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); michael@0: } michael@0: michael@0: void michael@0: Assembler::resetCounter() michael@0: { michael@0: m_buffer.resetCounter(); michael@0: } michael@0: michael@0: uint32_t michael@0: Assembler::actualOffset(uint32_t off_) const michael@0: { michael@0: return off_ + m_buffer.poolSizeBefore(off_); michael@0: } michael@0: michael@0: uint32_t michael@0: Assembler::actualIndex(uint32_t idx_) const michael@0: { michael@0: ARMBuffer::PoolEntry pe(idx_); michael@0: return m_buffer.poolEntryOffset(pe); michael@0: } michael@0: michael@0: uint8_t * michael@0: Assembler::PatchableJumpAddress(JitCode *code, uint32_t pe_) michael@0: { michael@0: return code->raw() + pe_; michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::actualOffset(BufferOffset off_) const michael@0: { michael@0: return BufferOffset(off_.getOffset() + m_buffer.poolSizeBefore(off_.getOffset())); michael@0: } michael@0: michael@0: class RelocationIterator michael@0: { michael@0: CompactBufferReader reader_; michael@0: // offset in bytes michael@0: uint32_t offset_; michael@0: michael@0: public: michael@0: RelocationIterator(CompactBufferReader &reader) michael@0: : reader_(reader) michael@0: { } michael@0: michael@0: bool read() { michael@0: if (!reader_.more()) michael@0: return false; michael@0: offset_ = reader_.readUnsigned(); michael@0: return true; michael@0: } michael@0: michael@0: uint32_t offset() const { michael@0: return offset_; michael@0: } michael@0: }; michael@0: michael@0: template michael@0: const uint32_t * michael@0: Assembler::getCF32Target(Iter *iter) michael@0: { michael@0: Instruction *inst1 = iter->cur(); michael@0: Instruction *inst2 = iter->next(); michael@0: Instruction *inst3 = iter->next(); michael@0: Instruction *inst4 = iter->next(); michael@0: michael@0: if (inst1->is()) { michael@0: // see if we have a simple case, b #offset michael@0: BOffImm imm; michael@0: InstBranchImm *jumpB = inst1->as(); michael@0: jumpB->extractImm(&imm); michael@0: return imm.getDest(inst1)->raw(); michael@0: } michael@0: michael@0: if (inst1->is() && inst2->is() && michael@0: (inst3->is() || inst3->is() || inst4->is())) michael@0: { michael@0: // see if we have the complex case, michael@0: // movw r_temp, #imm1 michael@0: // movt r_temp, #imm2 michael@0: // bx r_temp michael@0: // OR michael@0: // movw r_temp, #imm1 michael@0: // movt r_temp, #imm2 michael@0: // str pc, [sp] michael@0: // bx r_temp michael@0: michael@0: Imm16 targ_bot; michael@0: Imm16 targ_top; michael@0: Register temp; michael@0: michael@0: // Extract both the temp register and the bottom immediate. michael@0: InstMovW *bottom = inst1->as(); michael@0: bottom->extractImm(&targ_bot); michael@0: bottom->extractDest(&temp); michael@0: michael@0: // Extract the top part of the immediate. michael@0: InstMovT *top = inst2->as(); michael@0: top->extractImm(&targ_top); michael@0: michael@0: // Make sure they are being loaded into the same register. michael@0: JS_ASSERT(top->checkDest(temp)); michael@0: michael@0: // Make sure we're branching to the same register. michael@0: #ifdef DEBUG michael@0: // A toggled call sometimes has a NOP instead of a branch for the third instruction. michael@0: // No way to assert that it's valid in that situation. michael@0: if (!inst3->is()) { michael@0: InstBranchReg *realBranch = inst3->is() ? inst3->as() michael@0: : inst4->as(); michael@0: JS_ASSERT(realBranch->checkDest(temp)); michael@0: } michael@0: #endif michael@0: michael@0: uint32_t *dest = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16)); michael@0: return dest; michael@0: } michael@0: michael@0: if (inst1->is()) { michael@0: InstLDR *load = inst1->as(); michael@0: uint32_t inst = load->encode(); michael@0: // get the address of the instruction as a raw pointer michael@0: char *dataInst = reinterpret_cast(load); michael@0: IsUp_ iu = IsUp_(inst & IsUp); michael@0: int32_t offset = inst & 0xfff; michael@0: if (iu != IsUp) { michael@0: offset = - offset; michael@0: } michael@0: uint32_t **ptr = (uint32_t **)&dataInst[offset + 8]; michael@0: return *ptr; michael@0: michael@0: } michael@0: michael@0: MOZ_ASSUME_UNREACHABLE("unsupported branch relocation"); michael@0: } michael@0: michael@0: uintptr_t michael@0: Assembler::getPointer(uint8_t *instPtr) michael@0: { michael@0: InstructionIterator iter((Instruction*)instPtr); michael@0: uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr); michael@0: return ret; michael@0: } michael@0: michael@0: template michael@0: const uint32_t * michael@0: Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style) michael@0: { michael@0: Instruction *load1 = start->cur(); michael@0: Instruction *load2 = start->next(); michael@0: michael@0: if (load1->is() && load2->is()) { michael@0: // see if we have the complex case, michael@0: // movw r_temp, #imm1 michael@0: // movt r_temp, #imm2 michael@0: michael@0: Imm16 targ_bot; michael@0: Imm16 targ_top; michael@0: Register temp; michael@0: michael@0: // Extract both the temp register and the bottom immediate. michael@0: InstMovW *bottom = load1->as(); michael@0: bottom->extractImm(&targ_bot); michael@0: bottom->extractDest(&temp); michael@0: michael@0: // Extract the top part of the immediate. michael@0: InstMovT *top = load2->as(); michael@0: top->extractImm(&targ_top); michael@0: michael@0: // Make sure they are being loaded intothe same register. michael@0: JS_ASSERT(top->checkDest(temp)); michael@0: michael@0: if (dest) michael@0: *dest = temp; michael@0: if (style) michael@0: *style = L_MOVWT; michael@0: michael@0: uint32_t *value = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16)); michael@0: return value; michael@0: } michael@0: if (load1->is()) { michael@0: InstLDR *load = load1->as(); michael@0: uint32_t inst = load->encode(); michael@0: // get the address of the instruction as a raw pointer michael@0: char *dataInst = reinterpret_cast(load); michael@0: IsUp_ iu = IsUp_(inst & IsUp); michael@0: int32_t offset = inst & 0xfff; michael@0: if (iu == IsDown) michael@0: offset = - offset; michael@0: if (dest) michael@0: *dest = toRD(*load); michael@0: if (style) michael@0: *style = L_LDR; michael@0: uint32_t **ptr = (uint32_t **)&dataInst[offset + 8]; michael@0: return *ptr; michael@0: } michael@0: MOZ_ASSUME_UNREACHABLE("unsupported relocation"); michael@0: } michael@0: michael@0: static JitCode * michael@0: CodeFromJump(InstructionIterator *jump) michael@0: { michael@0: uint8_t *target = (uint8_t *)Assembler::getCF32Target(jump); michael@0: return JitCode::FromExecutable(target); michael@0: } michael@0: michael@0: void michael@0: Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) michael@0: { michael@0: RelocationIterator iter(reader); michael@0: while (iter.read()) { michael@0: InstructionIterator institer((Instruction *) (code->raw() + iter.offset())); michael@0: JitCode *child = CodeFromJump(&institer); michael@0: MarkJitCodeUnbarriered(trc, &child, "rel32"); michael@0: } michael@0: } michael@0: michael@0: static void michael@0: TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader) michael@0: { michael@0: while (reader.more()) { michael@0: size_t offset = reader.readUnsigned(); michael@0: InstructionIterator iter((Instruction*)(buffer+offset)); michael@0: void *ptr = const_cast(js::jit::Assembler::getPtr32Target(&iter)); michael@0: // No barrier needed since these are constants. michael@0: gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); michael@0: } michael@0: michael@0: } michael@0: static void michael@0: TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer, michael@0: js::Vector *locs) michael@0: { michael@0: for (unsigned int idx = 0; idx < locs->length(); idx++) { michael@0: BufferOffset bo = (*locs)[idx]; michael@0: ARMBuffer::AssemblerBufferInstIterator iter(bo, buffer); michael@0: void *ptr = const_cast(jit::Assembler::getPtr32Target(&iter)); michael@0: michael@0: // No barrier needed since these are constants. michael@0: gc::MarkGCThingUnbarriered(trc, reinterpret_cast(&ptr), "ion-masm-ptr"); michael@0: } michael@0: michael@0: } michael@0: void michael@0: Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) michael@0: { michael@0: ::TraceDataRelocations(trc, code->raw(), reader); michael@0: } michael@0: michael@0: void michael@0: Assembler::copyJumpRelocationTable(uint8_t *dest) michael@0: { michael@0: if (jumpRelocations_.length()) michael@0: memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length()); michael@0: } michael@0: michael@0: void michael@0: Assembler::copyDataRelocationTable(uint8_t *dest) michael@0: { michael@0: if (dataRelocations_.length()) michael@0: memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length()); michael@0: } michael@0: michael@0: void michael@0: Assembler::copyPreBarrierTable(uint8_t *dest) michael@0: { michael@0: if (preBarriers_.length()) michael@0: memcpy(dest, preBarriers_.buffer(), preBarriers_.length()); michael@0: } michael@0: michael@0: void michael@0: Assembler::trace(JSTracer *trc) michael@0: { michael@0: for (size_t i = 0; i < jumps_.length(); i++) { michael@0: RelativePatch &rp = jumps_[i]; michael@0: if (rp.kind == Relocation::JITCODE) { michael@0: JitCode *code = JitCode::FromExecutable((uint8_t*)rp.target); michael@0: MarkJitCodeUnbarriered(trc, &code, "masmrel32"); michael@0: JS_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target)); michael@0: } michael@0: } michael@0: michael@0: if (tmpDataRelocations_.length()) michael@0: ::TraceDataRelocations(trc, &m_buffer, &tmpDataRelocations_); michael@0: } michael@0: michael@0: void michael@0: Assembler::processCodeLabels(uint8_t *rawCode) michael@0: { michael@0: for (size_t i = 0; i < codeLabels_.length(); i++) { michael@0: CodeLabel label = codeLabels_[i]; michael@0: Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset())); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Assembler::writeCodePointer(AbsoluteLabel *absoluteLabel) { michael@0: JS_ASSERT(!absoluteLabel->bound()); michael@0: BufferOffset off = writeInst(LabelBase::INVALID_OFFSET); michael@0: michael@0: // x86/x64 makes general use of AbsoluteLabel and weaves a linked list of michael@0: // uses of an AbsoluteLabel through the assembly. ARM only uses labels michael@0: // for the case statements of switch jump tables. Thus, for simplicity, we michael@0: // simply treat the AbsoluteLabel as a label and bind it to the offset of michael@0: // the jump table entry that needs to be patched. michael@0: LabelBase *label = absoluteLabel; michael@0: label->bind(off.getOffset()); michael@0: } michael@0: michael@0: void michael@0: Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address) michael@0: { michael@0: // See writeCodePointer comment. michael@0: uint32_t off = actualOffset(label->offset()); michael@0: *reinterpret_cast(rawCode + off) = address; michael@0: } michael@0: michael@0: Assembler::Condition michael@0: Assembler::InvertCondition(Condition cond) michael@0: { michael@0: const uint32_t ConditionInversionBit = 0x10000000; michael@0: return Condition(ConditionInversionBit ^ cond); michael@0: } michael@0: michael@0: Imm8::TwoImm8mData michael@0: Imm8::encodeTwoImms(uint32_t imm) michael@0: { michael@0: // In the ideal case, we are looking for a number that (in binary) looks like: michael@0: // 0b((00)*)n_1((00)*)n_2((00)*) michael@0: // left n1 mid n2 michael@0: // where both n_1 and n_2 fit into 8 bits. michael@0: // since this is being done with rotates, we also need to handle the case michael@0: // that one of these numbers is in fact split between the left and right michael@0: // sides, in which case the constant will look like: michael@0: // 0bn_1a((00)*)n_2((00)*)n_1b michael@0: // n1a mid n2 rgh n1b michael@0: // also remember, values are rotated by multiples of two, and left, michael@0: // mid or right can have length zero michael@0: uint32_t imm1, imm2; michael@0: int left = CountLeadingZeroes32(imm) & 0x1E; michael@0: uint32_t no_n1 = imm & ~(0xff << (24 - left)); michael@0: michael@0: // not technically needed: this case only happens if we can encode michael@0: // as a single imm8m. There is a perfectly reasonable encoding in this michael@0: // case, but we shouldn't encourage people to do things like this. michael@0: if (no_n1 == 0) michael@0: return TwoImm8mData(); michael@0: michael@0: int mid = CountLeadingZeroes32(no_n1) & 0x1E; michael@0: uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f)); michael@0: michael@0: if (no_n2 == 0) { michael@0: // we hit the easy case, no wraparound. michael@0: // note: a single constant *may* look like this. michael@0: int imm1shift = left + 8; michael@0: int imm2shift = mid + 8; michael@0: imm1 = (imm >> (32 - imm1shift)) & 0xff; michael@0: if (imm2shift >= 32) { michael@0: imm2shift = 0; michael@0: // this assert does not always hold michael@0: //assert((imm & 0xff) == no_n1); michael@0: // in fact, this would lead to some incredibly subtle bugs. michael@0: imm2 = no_n1; michael@0: } else { michael@0: imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff; michael@0: JS_ASSERT( ((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == michael@0: imm2); michael@0: } michael@0: JS_ASSERT((imm1shift & 0x1) == 0); michael@0: JS_ASSERT((imm2shift & 0x1) == 0); michael@0: return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1), michael@0: datastore::Imm8mData(imm2, imm2shift >> 1)); michael@0: } michael@0: michael@0: // either it wraps, or it does not fit. michael@0: // if we initially chopped off more than 8 bits, then it won't fit. michael@0: if (left >= 8) michael@0: return TwoImm8mData(); michael@0: michael@0: int right = 32 - (CountLeadingZeroes32(no_n2) & 30); michael@0: // all remaining set bits *must* fit into the lower 8 bits michael@0: // the right == 8 case should be handled by the previous case. michael@0: if (right > 8) michael@0: return TwoImm8mData(); michael@0: michael@0: // make sure the initial bits that we removed for no_n1 michael@0: // fit into the 8-(32-right) leftmost bits michael@0: if (((imm & (0xff << (24 - left))) << (8-right)) != 0) { michael@0: // BUT we may have removed more bits than we needed to for no_n1 michael@0: // 0x04104001 e.g. we can encode 0x104 with a single op, then michael@0: // 0x04000001 with a second, but we try to encode 0x0410000 michael@0: // and find that we need a second op for 0x4000, and 0x1 cannot michael@0: // be included in the encoding of 0x04100000 michael@0: no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right))); michael@0: mid = CountLeadingZeroes32(no_n1) & 30; michael@0: no_n2 = michael@0: no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31)); michael@0: if (no_n2 != 0) michael@0: return TwoImm8mData(); michael@0: } michael@0: michael@0: // now assemble all of this information into a two coherent constants michael@0: // it is a rotate right from the lower 8 bits. michael@0: int imm1shift = 8 - right; michael@0: imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift))); michael@0: JS_ASSERT ((imm1shift&~0x1e) == 0); michael@0: // left + 8 + mid is the position of the leftmost bit of n_2. michael@0: // we needed to rotate 0x000000ab right by 8 in order to get michael@0: // 0xab000000, then shift again by the leftmost bit in order to michael@0: // get the constant that we care about. michael@0: int imm2shift = mid + 8; michael@0: imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff; michael@0: JS_ASSERT((imm1shift & 0x1) == 0); michael@0: JS_ASSERT((imm2shift & 0x1) == 0); michael@0: return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1), michael@0: datastore::Imm8mData(imm2, imm2shift >> 1)); michael@0: } michael@0: michael@0: ALUOp michael@0: jit::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest) michael@0: { michael@0: // find an alternate ALUOp to get the job done, and use a different imm. michael@0: *negDest = dest; michael@0: switch (op) { michael@0: case op_mov: michael@0: *imm = Imm32(~imm->value); michael@0: return op_mvn; michael@0: case op_mvn: michael@0: *imm = Imm32(~imm->value); michael@0: return op_mov; michael@0: case op_and: michael@0: *imm = Imm32(~imm->value); michael@0: return op_bic; michael@0: case op_bic: michael@0: *imm = Imm32(~imm->value); michael@0: return op_and; michael@0: case op_add: michael@0: *imm = Imm32(-imm->value); michael@0: return op_sub; michael@0: case op_sub: michael@0: *imm = Imm32(-imm->value); michael@0: return op_add; michael@0: case op_cmp: michael@0: *imm = Imm32(-imm->value); michael@0: return op_cmn; michael@0: case op_cmn: michael@0: *imm = Imm32(-imm->value); michael@0: return op_cmp; michael@0: case op_tst: michael@0: JS_ASSERT(dest == InvalidReg); michael@0: *imm = Imm32(~imm->value); michael@0: *negDest = ScratchRegister; michael@0: return op_bic; michael@0: // orr has orn on thumb2 only. michael@0: default: michael@0: return op_invalid; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: jit::can_dbl(ALUOp op) michael@0: { michael@0: // some instructions can't be processed as two separate instructions michael@0: // such as and, and possibly add (when we're setting ccodes). michael@0: // there is also some hilarity with *reading* condition codes. michael@0: // for example, adc dest, src1, 0xfff; (add with carry) can be split up michael@0: // into adc dest, src1, 0xf00; add dest, dest, 0xff, since "reading" the michael@0: // condition code increments the result by one conditionally, that only needs michael@0: // to be done on one of the two instructions. michael@0: switch (op) { michael@0: case op_bic: michael@0: case op_add: michael@0: case op_sub: michael@0: case op_eor: michael@0: case op_orr: michael@0: return true; michael@0: default: michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: bool michael@0: jit::condsAreSafe(ALUOp op) { michael@0: // Even when we are setting condition codes, sometimes we can michael@0: // get away with splitting an operation into two. michael@0: // for example, if our immediate is 0x00ff00ff, and the operation is eors michael@0: // we can split this in half, since x ^ 0x00ff0000 ^ 0x000000ff should michael@0: // set all of its condition codes exactly the same as x ^ 0x00ff00ff. michael@0: // However, if the operation were adds, michael@0: // we cannot split this in half. If the source on the add is michael@0: // 0xfff00ff0, the result sholud be 0xef10ef, but do we set the overflow bit michael@0: // or not? Depending on which half is performed first (0x00ff0000 michael@0: // or 0x000000ff) the V bit will be set differently, and *not* updating michael@0: // the V bit would be wrong. Theoretically, the following should work michael@0: // adds r0, r1, 0x00ff0000; michael@0: // addsvs r0, r1, 0x000000ff; michael@0: // addvc r0, r1, 0x000000ff; michael@0: // but this is 3 instructions, and at that point, we might as well use michael@0: // something else. michael@0: switch(op) { michael@0: case op_bic: michael@0: case op_orr: michael@0: case op_eor: michael@0: return true; michael@0: default: michael@0: return false; michael@0: } michael@0: } michael@0: michael@0: ALUOp michael@0: jit::getDestVariant(ALUOp op) michael@0: { michael@0: // all of the compare operations are dest-less variants of a standard michael@0: // operation. Given the dest-less variant, return the dest-ful variant. michael@0: switch (op) { michael@0: case op_cmp: michael@0: return op_sub; michael@0: case op_cmn: michael@0: return op_add; michael@0: case op_tst: michael@0: return op_and; michael@0: case op_teq: michael@0: return op_eor; michael@0: default: michael@0: return op; michael@0: } michael@0: } michael@0: michael@0: O2RegImmShift michael@0: jit::O2Reg(Register r) { michael@0: return O2RegImmShift(r, LSL, 0); michael@0: } michael@0: michael@0: O2RegImmShift michael@0: jit::lsl(Register r, int amt) michael@0: { michael@0: JS_ASSERT(0 <= amt && amt <= 31); michael@0: return O2RegImmShift(r, LSL, amt); michael@0: } michael@0: michael@0: O2RegImmShift michael@0: jit::lsr(Register r, int amt) michael@0: { michael@0: JS_ASSERT(1 <= amt && amt <= 32); michael@0: return O2RegImmShift(r, LSR, amt); michael@0: } michael@0: michael@0: O2RegImmShift michael@0: jit::ror(Register r, int amt) michael@0: { michael@0: JS_ASSERT(1 <= amt && amt <= 31); michael@0: return O2RegImmShift(r, ROR, amt); michael@0: } michael@0: O2RegImmShift michael@0: jit::rol(Register r, int amt) michael@0: { michael@0: JS_ASSERT(1 <= amt && amt <= 31); michael@0: return O2RegImmShift(r, ROR, 32 - amt); michael@0: } michael@0: michael@0: O2RegImmShift michael@0: jit::asr (Register r, int amt) michael@0: { michael@0: JS_ASSERT(1 <= amt && amt <= 32); michael@0: return O2RegImmShift(r, ASR, amt); michael@0: } michael@0: michael@0: michael@0: O2RegRegShift michael@0: jit::lsl(Register r, Register amt) michael@0: { michael@0: return O2RegRegShift(r, LSL, amt); michael@0: } michael@0: michael@0: O2RegRegShift michael@0: jit::lsr(Register r, Register amt) michael@0: { michael@0: return O2RegRegShift(r, LSR, amt); michael@0: } michael@0: michael@0: O2RegRegShift michael@0: jit::ror(Register r, Register amt) michael@0: { michael@0: return O2RegRegShift(r, ROR, amt); michael@0: } michael@0: michael@0: O2RegRegShift michael@0: jit::asr (Register r, Register amt) michael@0: { michael@0: return O2RegRegShift(r, ASR, amt); michael@0: } michael@0: michael@0: static js::jit::DoubleEncoder doubleEncoder; michael@0: michael@0: /* static */ const js::jit::VFPImm js::jit::VFPImm::one(0x3FF00000); michael@0: michael@0: js::jit::VFPImm::VFPImm(uint32_t top) michael@0: { michael@0: data = -1; michael@0: datastore::Imm8VFPImmData tmp; michael@0: if (doubleEncoder.lookup(top, &tmp)) michael@0: data = tmp.encode(); michael@0: } michael@0: michael@0: BOffImm::BOffImm(Instruction &inst) michael@0: : data(inst.encode() & 0x00ffffff) michael@0: { michael@0: } michael@0: michael@0: Instruction * michael@0: BOffImm::getDest(Instruction *src) michael@0: { michael@0: // TODO: It is probably worthwhile to verify that src is actually a branch michael@0: // NOTE: This does not explicitly shift the offset of the destination left by 2, michael@0: // since it is indexing into an array of instruction sized objects. michael@0: return &src[(((int32_t)data<<8)>>8) + 2]; michael@0: } michael@0: michael@0: //VFPRegister implementation michael@0: VFPRegister michael@0: VFPRegister::doubleOverlay() const michael@0: { michael@0: JS_ASSERT(!_isInvalid); michael@0: if (kind != Double) { michael@0: JS_ASSERT(_code % 2 == 0); michael@0: return VFPRegister(_code >> 1, Double); michael@0: } michael@0: return *this; michael@0: } michael@0: VFPRegister michael@0: VFPRegister::singleOverlay() const michael@0: { michael@0: JS_ASSERT(!_isInvalid); michael@0: if (kind == Double) { michael@0: // There are no corresponding float registers for d16-d31 michael@0: JS_ASSERT(_code < 16); michael@0: return VFPRegister(_code << 1, Single); michael@0: } michael@0: michael@0: JS_ASSERT(_code % 2 == 0); michael@0: return VFPRegister(_code, Single); michael@0: } michael@0: michael@0: VFPRegister michael@0: VFPRegister::sintOverlay() const michael@0: { michael@0: JS_ASSERT(!_isInvalid); michael@0: if (kind == Double) { michael@0: // There are no corresponding float registers for d16-d31 michael@0: ASSERT(_code < 16); michael@0: return VFPRegister(_code << 1, Int); michael@0: } michael@0: michael@0: JS_ASSERT(_code % 2 == 0); michael@0: return VFPRegister(_code, Int); michael@0: } michael@0: VFPRegister michael@0: VFPRegister::uintOverlay() const michael@0: { michael@0: JS_ASSERT(!_isInvalid); michael@0: if (kind == Double) { michael@0: // There are no corresponding float registers for d16-d31 michael@0: ASSERT(_code < 16); michael@0: return VFPRegister(_code << 1, UInt); michael@0: } michael@0: michael@0: JS_ASSERT(_code % 2 == 0); michael@0: return VFPRegister(_code, UInt); michael@0: } michael@0: michael@0: bool michael@0: VFPRegister::isInvalid() michael@0: { michael@0: return _isInvalid; michael@0: } michael@0: michael@0: bool michael@0: VFPRegister::isMissing() michael@0: { michael@0: JS_ASSERT(!_isInvalid); michael@0: return _isMissing; michael@0: } michael@0: michael@0: michael@0: bool michael@0: Assembler::oom() const michael@0: { michael@0: return m_buffer.oom() || michael@0: !enoughMemory_ || michael@0: jumpRelocations_.oom() || michael@0: dataRelocations_.oom() || michael@0: preBarriers_.oom(); michael@0: } michael@0: michael@0: bool michael@0: Assembler::addCodeLabel(CodeLabel label) michael@0: { michael@0: return codeLabels_.append(label); michael@0: } michael@0: michael@0: // Size of the instruction stream, in bytes. Including pools. This function expects michael@0: // all pools that need to be placed have been placed. If they haven't then we michael@0: // need to go an flush the pools :( michael@0: size_t michael@0: Assembler::size() const michael@0: { michael@0: return m_buffer.size(); michael@0: } michael@0: // Size of the relocation table, in bytes. michael@0: size_t michael@0: Assembler::jumpRelocationTableBytes() const michael@0: { michael@0: return jumpRelocations_.length(); michael@0: } michael@0: size_t michael@0: Assembler::dataRelocationTableBytes() const michael@0: { michael@0: return dataRelocations_.length(); michael@0: } michael@0: michael@0: size_t michael@0: Assembler::preBarrierTableBytes() const michael@0: { michael@0: return preBarriers_.length(); michael@0: } michael@0: michael@0: // Size of the data table, in bytes. michael@0: size_t michael@0: Assembler::bytesNeeded() const michael@0: { michael@0: return size() + michael@0: jumpRelocationTableBytes() + michael@0: dataRelocationTableBytes() + michael@0: preBarrierTableBytes(); michael@0: } michael@0: michael@0: // write a blob of binary into the instruction stream michael@0: BufferOffset michael@0: Assembler::writeInst(uint32_t x, uint32_t *dest) michael@0: { michael@0: if (dest == nullptr) michael@0: return m_buffer.putInt(x); michael@0: michael@0: writeInstStatic(x, dest); michael@0: return BufferOffset(); michael@0: } michael@0: void michael@0: Assembler::writeInstStatic(uint32_t x, uint32_t *dest) michael@0: { michael@0: JS_ASSERT(dest != nullptr); michael@0: *dest = x; michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::align(int alignment) michael@0: { michael@0: BufferOffset ret; michael@0: if (alignment == 8) { michael@0: while (!m_buffer.isAligned(alignment)) { michael@0: BufferOffset tmp = as_nop(); michael@0: if (!ret.assigned()) michael@0: ret = tmp; michael@0: } michael@0: } else { michael@0: flush(); michael@0: JS_ASSERT((alignment & (alignment - 1)) == 0); michael@0: while (size() & (alignment-1)) { michael@0: BufferOffset tmp = as_nop(); michael@0: if (!ret.assigned()) michael@0: ret = tmp; michael@0: } michael@0: } michael@0: return ret; michael@0: michael@0: } michael@0: BufferOffset michael@0: Assembler::as_nop() michael@0: { michael@0: return writeInst(0xe320f000); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_alu(Register dest, Register src1, Operand2 op2, michael@0: ALUOp op, SetCond_ sc, Condition c, Instruction *instdest) michael@0: { michael@0: return writeInst((int)op | (int)sc | (int) c | op2.encode() | michael@0: ((dest == InvalidReg) ? 0 : RD(dest)) | michael@0: ((src1 == InvalidReg) ? 0 : RN(src1)), (uint32_t*)instdest); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest) michael@0: { michael@0: return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, InvalidReg, op2, op_mvn, sc, c); michael@0: } michael@0: michael@0: // Logical operations. michael@0: BufferOffset michael@0: Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_and, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_bic, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_eor, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_orr, sc, c); michael@0: } michael@0: michael@0: // Mathematical operations. michael@0: BufferOffset michael@0: Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_adc, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_add, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_sbc, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_sub, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_rsb, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_alu(dest, src1, op2, op_rsc, sc, c); michael@0: } michael@0: michael@0: // Test operations. michael@0: BufferOffset michael@0: Assembler::as_cmn(Register src1, Operand2 op2, Condition c) michael@0: { michael@0: return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_cmp(Register src1, Operand2 op2, Condition c) michael@0: { michael@0: return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_teq(Register src1, Operand2 op2, Condition c) michael@0: { michael@0: return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_tst(Register src1, Operand2 op2, Condition c) michael@0: { michael@0: return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c); michael@0: } michael@0: michael@0: // Not quite ALU worthy, but useful none the less: michael@0: // These also have the isue of these being formatted michael@0: // completly differently from the standard ALU operations. michael@0: BufferOffset michael@0: Assembler::as_movw(Register dest, Imm16 imm, Condition c, Instruction *pos) michael@0: { michael@0: JS_ASSERT(hasMOVWT()); michael@0: return writeInst(0x03000000 | c | imm.encode() | RD(dest), (uint32_t*)pos); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_movt(Register dest, Imm16 imm, Condition c, Instruction *pos) michael@0: { michael@0: JS_ASSERT(hasMOVWT()); michael@0: return writeInst(0x03400000 | c | imm.encode() | RD(dest), (uint32_t*)pos); michael@0: } michael@0: michael@0: static const int mull_tag = 0x90; michael@0: michael@0: BufferOffset michael@0: Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn, michael@0: MULOp op, SetCond_ sc, Condition c) michael@0: { michael@0: michael@0: return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c) michael@0: { michael@0: return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_mla(Register dest, Register acc, Register src1, Register src2, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: return as_genmul(dest, acc, src1, src2, opm_mla, sc, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c) michael@0: { michael@0: return as_genmul(destHI, destLO, src1, src2, opm_umaal, NoSetCond, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c) michael@0: { michael@0: return as_genmul(dest, acc, src1, src2, opm_mls, NoSetCond, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2, michael@0: SetCond_ sc, Condition c) michael@0: { michael@0: return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c) michael@0: { michael@0: return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code()); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c) michael@0: { michael@0: return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code()); michael@0: } michael@0: michael@0: // Data transfer instructions: ldr, str, ldrb, strb. michael@0: // Using an int to differentiate between 8 bits and 32 bits is michael@0: // overkill, but meh michael@0: BufferOffset michael@0: Assembler::as_dtr(LoadStore ls, int size, Index mode, michael@0: Register rt, DTRAddr addr, Condition c, uint32_t *dest) michael@0: { michael@0: JS_ASSERT (mode == Offset || (rt != addr.getBase() && pc != addr.getBase())); michael@0: JS_ASSERT(size == 32 || size == 8); michael@0: return writeInst( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | michael@0: RT(rt) | addr.encode(), dest); michael@0: michael@0: } michael@0: class PoolHintData { michael@0: public: michael@0: enum LoadType { michael@0: // set 0 to bogus, since that is the value most likely to be michael@0: // accidentally left somewhere. michael@0: poolBOGUS = 0, michael@0: poolDTR = 1, michael@0: poolBranch = 2, michael@0: poolVDTR = 3 michael@0: }; michael@0: michael@0: private: michael@0: uint32_t index : 16; michael@0: uint32_t cond : 4; michael@0: LoadType loadType : 2; michael@0: uint32_t destReg : 5; michael@0: uint32_t destType : 1; michael@0: uint32_t ONES : 4; michael@0: michael@0: static const uint32_t expectedOnes = 0xfu; michael@0: michael@0: public: michael@0: void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const Register &destReg_) { michael@0: index = index_; michael@0: JS_ASSERT(index == index_); michael@0: cond = cond_ >> 28; michael@0: JS_ASSERT(cond == cond_ >> 28); michael@0: loadType = lt; michael@0: ONES = expectedOnes; michael@0: destReg = destReg_.code(); michael@0: destType = 0; michael@0: } michael@0: void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const VFPRegister &destReg_) { michael@0: JS_ASSERT(destReg_.isFloat()); michael@0: index = index_; michael@0: JS_ASSERT(index == index_); michael@0: cond = cond_ >> 28; michael@0: JS_ASSERT(cond == cond_ >> 28); michael@0: loadType = lt; michael@0: ONES = expectedOnes; michael@0: destReg = destReg_.isDouble() ? destReg_.code() : destReg_.doubleOverlay().code(); michael@0: destType = destReg_.isDouble(); michael@0: } michael@0: Assembler::Condition getCond() { michael@0: return Assembler::Condition(cond << 28); michael@0: } michael@0: michael@0: Register getReg() { michael@0: return Register::FromCode(destReg); michael@0: } michael@0: VFPRegister getVFPReg() { michael@0: VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg)); michael@0: return destType ? r : r.singleOverlay(); michael@0: } michael@0: michael@0: int32_t getIndex() { michael@0: return index; michael@0: } michael@0: void setIndex(uint32_t index_) { michael@0: JS_ASSERT(ONES == expectedOnes && loadType != poolBOGUS); michael@0: index = index_; michael@0: JS_ASSERT(index == index_); michael@0: } michael@0: michael@0: LoadType getLoadType() { michael@0: // If this *was* a poolBranch, but the branch has already been bound michael@0: // then this isn't going to look like a real poolhintdata, but we still michael@0: // want to lie about it so everyone knows it *used* to be a branch. michael@0: if (ONES != expectedOnes) michael@0: return PoolHintData::poolBranch; michael@0: return loadType; michael@0: } michael@0: michael@0: bool isValidPoolHint() { michael@0: // Most instructions cannot have a condition that is 0xf. Notable exceptions are michael@0: // blx and the entire NEON instruction set. For the purposes of pool loads, and michael@0: // possibly patched branches, the possible instructions are ldr and b, neither of michael@0: // which can have a condition code of 0xf. michael@0: return ONES == expectedOnes; michael@0: } michael@0: }; michael@0: michael@0: union PoolHintPun { michael@0: PoolHintData phd; michael@0: uint32_t raw; michael@0: }; michael@0: michael@0: // Handles all of the other integral data transferring functions: michael@0: // ldrsb, ldrsh, ldrd, etc. michael@0: // size is given in bits. michael@0: BufferOffset michael@0: Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode, michael@0: Register rt, EDtrAddr addr, Condition c, uint32_t *dest) michael@0: { michael@0: int extra_bits2 = 0; michael@0: int extra_bits1 = 0; michael@0: switch(size) { michael@0: case 8: michael@0: JS_ASSERT(IsSigned); michael@0: JS_ASSERT(ls!=IsStore); michael@0: extra_bits1 = 0x1; michael@0: extra_bits2 = 0x2; michael@0: break; michael@0: case 16: michael@0: //case 32: michael@0: // doesn't need to be handled-- it is handled by the default ldr/str michael@0: extra_bits2 = 0x01; michael@0: extra_bits1 = (ls == IsStore) ? 0 : 1; michael@0: if (IsSigned) { michael@0: JS_ASSERT(ls != IsStore); michael@0: extra_bits2 |= 0x2; michael@0: } michael@0: break; michael@0: case 64: michael@0: extra_bits2 = (ls == IsStore) ? 0x3 : 0x2; michael@0: extra_bits1 = 0; michael@0: break; michael@0: default: michael@0: MOZ_ASSUME_UNREACHABLE("SAY WHAT?"); michael@0: } michael@0: return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | michael@0: addr.encode() | RT(rt) | mode | c, dest); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask, michael@0: DTMMode mode, DTMWriteBack wb, Condition c) michael@0: { michael@0: return writeInst(0x08000000 | RN(rn) | ls | michael@0: mode | mask | c | wb); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c) michael@0: { michael@0: PoolHintPun php; michael@0: php.phd.init(0, c, PoolHintData::poolDTR, dest); michael@0: return m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value); michael@0: } michael@0: michael@0: void michael@0: Assembler::as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data) michael@0: { michael@0: JS_ASSERT(addr->is()); michael@0: int32_t offset = addr->encode() & 0xfff; michael@0: if ((addr->encode() & IsUp) != IsUp) michael@0: offset = -offset; michael@0: char * rawAddr = reinterpret_cast(addr); michael@0: uint32_t * dest = reinterpret_cast(&rawAddr[offset + 8]); michael@0: *dest = data; michael@0: Condition orig_cond; michael@0: addr->extractCond(&orig_cond); michael@0: JS_ASSERT(orig_cond == c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe, Condition c) michael@0: { michael@0: PoolHintPun php; michael@0: php.phd.init(0, c, PoolHintData::poolBranch, pc); michael@0: m_buffer.markNextAsBranch(); michael@0: BufferOffset ret = m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value, pe); michael@0: // If this label is already bound, then immediately replace the stub load with michael@0: // a correct branch. michael@0: if (label->bound()) { michael@0: BufferOffset dest(label); michael@0: as_b(dest.diffB(ret), c, ret); michael@0: } else { michael@0: label->use(ret.getOffset()); michael@0: } michael@0: return ret; michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c) michael@0: { michael@0: JS_ASSERT(dest.isDouble()); michael@0: PoolHintPun php; michael@0: php.phd.init(0, c, PoolHintData::poolVDTR, dest); michael@0: return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&value); michael@0: } michael@0: michael@0: struct PaddedFloat32 michael@0: { michael@0: float value; michael@0: uint32_t padding; michael@0: }; michael@0: JS_STATIC_ASSERT(sizeof(PaddedFloat32) == sizeof(double)); michael@0: michael@0: BufferOffset michael@0: Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c) michael@0: { michael@0: /* michael@0: * Insert floats into the double pool as they have the same limitations on michael@0: * immediate offset. This wastes 4 bytes padding per float. An alternative michael@0: * would be to have a separate pool for floats. michael@0: */ michael@0: JS_ASSERT(dest.isSingle()); michael@0: PoolHintPun php; michael@0: php.phd.init(0, c, PoolHintData::poolVDTR, dest); michael@0: PaddedFloat32 pf = { value, 0 }; michael@0: return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&pf); michael@0: } michael@0: michael@0: // Pool callbacks stuff: michael@0: void michael@0: Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token) michael@0: { michael@0: uint32_t *load = (uint32_t*) load_; michael@0: PoolHintPun php; michael@0: php.raw = *load; michael@0: php.phd.setIndex(token); michael@0: *load = php.raw; michael@0: } michael@0: // patchConstantPoolLoad takes the address of the instruction that wants to be patched, and michael@0: //the address of the start of the constant pool, and figures things out from there. michael@0: bool michael@0: Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) michael@0: { michael@0: PoolHintData data = *(PoolHintData*)loadAddr; michael@0: uint32_t *instAddr = (uint32_t*) loadAddr; michael@0: int offset = (char *)constPoolAddr - (char *)loadAddr; michael@0: switch(data.getLoadType()) { michael@0: case PoolHintData::poolBOGUS: michael@0: MOZ_ASSUME_UNREACHABLE("bogus load type!"); michael@0: case PoolHintData::poolDTR: michael@0: dummy->as_dtr(IsLoad, 32, Offset, data.getReg(), michael@0: DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr); michael@0: break; michael@0: case PoolHintData::poolBranch: michael@0: // Either this used to be a poolBranch, and the label was already bound, so it was michael@0: // replaced with a real branch, or this may happen in the future. michael@0: // If this is going to happen in the future, then the actual bits that are written here michael@0: // don't matter (except the condition code, since that is always preserved across michael@0: // patchings) but if it does not get bound later, michael@0: // then we want to make sure this is a load from the pool entry (and the pool entry michael@0: // should be nullptr so it will crash). michael@0: if (data.isValidPoolHint()) { michael@0: dummy->as_dtr(IsLoad, 32, Offset, pc, michael@0: DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), michael@0: data.getCond(), instAddr); michael@0: } michael@0: break; michael@0: case PoolHintData::poolVDTR: { michael@0: VFPRegister dest = data.getVFPReg(); michael@0: int32_t imm = offset + (8 * data.getIndex()) - 8; michael@0: if (imm < -1023 || imm > 1023) michael@0: return false; michael@0: dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr); michael@0: break; michael@0: } michael@0: } michael@0: return true; michael@0: } michael@0: michael@0: uint32_t michael@0: Assembler::placeConstantPoolBarrier(int offset) michael@0: { michael@0: // BUG: 700526 michael@0: // this is still an active path, however, we do not hit it in the test michael@0: // suite at all. michael@0: MOZ_ASSUME_UNREACHABLE("ARMAssembler holdover"); michael@0: } michael@0: michael@0: // Control flow stuff: michael@0: michael@0: // bx can *only* branch to a register michael@0: // never to an immediate. michael@0: BufferOffset michael@0: Assembler::as_bx(Register r, Condition c, bool isPatchable) michael@0: { michael@0: BufferOffset ret = writeInst(((int) c) | op_bx | r.code()); michael@0: if (c == Always && !isPatchable) michael@0: m_buffer.markGuard(); michael@0: return ret; michael@0: } michael@0: void michael@0: Assembler::writePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool) michael@0: { michael@0: BOffImm off = afterPool.diffB(branch); michael@0: *dest = InstBImm(off, Always); michael@0: } michael@0: // Branch can branch to an immediate *or* to a register. michael@0: // Branches to immediates are pc relative, branches to registers michael@0: // are absolute michael@0: BufferOffset michael@0: Assembler::as_b(BOffImm off, Condition c, bool isPatchable) michael@0: { michael@0: m_buffer.markNextAsBranch(); michael@0: BufferOffset ret =writeInst(((int)c) | op_b | off.encode()); michael@0: if (c == Always && !isPatchable) michael@0: m_buffer.markGuard(); michael@0: return ret; michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_b(Label *l, Condition c, bool isPatchable) michael@0: { michael@0: if (m_buffer.oom()) { michael@0: BufferOffset ret; michael@0: return ret; michael@0: } michael@0: m_buffer.markNextAsBranch(); michael@0: if (l->bound()) { michael@0: BufferOffset ret = as_nop(); michael@0: as_b(BufferOffset(l).diffB(ret), c, ret); michael@0: return ret; michael@0: } michael@0: michael@0: int32_t old; michael@0: BufferOffset ret; michael@0: if (l->used()) { michael@0: old = l->offset(); michael@0: // This will currently throw an assertion if we couldn't actually michael@0: // encode the offset of the branch. michael@0: if (!BOffImm::isInRange(old)) { michael@0: m_buffer.fail_bail(); michael@0: return ret; michael@0: } michael@0: ret = as_b(BOffImm(old), c, isPatchable); michael@0: } else { michael@0: old = LabelBase::INVALID_OFFSET; michael@0: BOffImm inv; michael@0: ret = as_b(inv, c, isPatchable); michael@0: } michael@0: DebugOnly check = l->use(ret.getOffset()); michael@0: JS_ASSERT(check == old); michael@0: return ret; michael@0: } michael@0: BufferOffset michael@0: Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) michael@0: { michael@0: *editSrc(inst) = InstBImm(off, c); michael@0: return inst; michael@0: } michael@0: michael@0: // blx can go to either an immediate or a register. michael@0: // When blx'ing to a register, we change processor state michael@0: // depending on the low bit of the register michael@0: // when blx'ing to an immediate, we *always* change processor state. michael@0: michael@0: BufferOffset michael@0: Assembler::as_blx(Register r, Condition c) michael@0: { michael@0: return writeInst(((int) c) | op_blx | r.code()); michael@0: } michael@0: michael@0: // bl can only branch to an pc-relative immediate offset michael@0: // It cannot change the processor state. michael@0: BufferOffset michael@0: Assembler::as_bl(BOffImm off, Condition c) michael@0: { michael@0: m_buffer.markNextAsBranch(); michael@0: return writeInst(((int)c) | op_bl | off.encode()); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_bl(Label *l, Condition c) michael@0: { michael@0: if (m_buffer.oom()) { michael@0: BufferOffset ret; michael@0: return ret; michael@0: } michael@0: m_buffer.markNextAsBranch(); michael@0: if (l->bound()) { michael@0: BufferOffset ret = as_nop(); michael@0: as_bl(BufferOffset(l).diffB(ret), c, ret); michael@0: return ret; michael@0: } michael@0: michael@0: int32_t old; michael@0: BufferOffset ret; michael@0: // See if the list was empty :( michael@0: if (l->used()) { michael@0: // This will currently throw an assertion if we couldn't actually michael@0: // encode the offset of the branch. michael@0: old = l->offset(); michael@0: if (!BOffImm::isInRange(old)) { michael@0: m_buffer.fail_bail(); michael@0: return ret; michael@0: } michael@0: ret = as_bl(BOffImm(old), c); michael@0: } else { michael@0: old = LabelBase::INVALID_OFFSET; michael@0: BOffImm inv; michael@0: ret = as_bl(inv, c); michael@0: } michael@0: DebugOnly check = l->use(ret.getOffset()); michael@0: JS_ASSERT(check == old); michael@0: return ret; michael@0: } michael@0: BufferOffset michael@0: Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst) michael@0: { michael@0: *editSrc(inst) = InstBLImm(off, c); michael@0: return inst; michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_mrs(Register r, Condition c) michael@0: { michael@0: return writeInst(0x010f0000 | int(c) | RD(r)); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_msr(Register r, Condition c) michael@0: { michael@0: // hardcode the 'mask' field to 0b11 for now. it is bits 18 and 19, which are the two high bits of the 'c' in this constant. michael@0: JS_ASSERT((r.code() & ~0xf) == 0); michael@0: return writeInst(0x012cf000 | int(c) | r.code()); michael@0: } michael@0: michael@0: // VFP instructions! michael@0: enum vfp_tags { michael@0: vfp_tag = 0x0C000A00, michael@0: vfp_arith = 0x02000000 michael@0: }; michael@0: BufferOffset michael@0: Assembler::writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest) michael@0: { michael@0: JS_ASSERT((sz & blob) == 0); michael@0: JS_ASSERT((vfp_tag & blob) == 0); michael@0: return writeInst(vfp_tag | sz | blob, dest); michael@0: } michael@0: michael@0: // Unityped variants: all registers hold the same (ieee754 single/double) michael@0: // notably not included are vcvt; vmov vd, #imm; vmov rt, vn. michael@0: BufferOffset michael@0: Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: VFPOp op, Condition c) michael@0: { michael@0: // Make sure we believe that all of our operands are the same kind michael@0: JS_ASSERT_IF(!vn.isMissing(), vd.equiv(vn)); michael@0: JS_ASSERT_IF(!vm.isMissing(), vd.equiv(vm)); michael@0: vfp_size sz = vd.isDouble() ? isDouble : isSingle; michael@0: return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | vfp_arith | c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: return as_vfp_float(vd, vn, vm, opv_add, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: return as_vfp_float(vd, vn, vm, opv_div, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: return as_vfp_float(vd, vn, vm, opv_mul, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: return as_vfp_float(vd, vn, vm, opv_mul, c); michael@0: MOZ_ASSUME_UNREACHABLE("Feature NYI"); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("Feature NYI"); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: MOZ_ASSUME_UNREACHABLE("Feature NYI"); michael@0: return BufferOffset(); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) michael@0: { michael@0: return as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) michael@0: { michael@0: return as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) michael@0: { michael@0: return as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: return as_vfp_float(vd, vn, vm, opv_sub, c); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, michael@0: Condition c) michael@0: { michael@0: return as_vfp_float(vd, NoVFPRegister, vm, opv_cmp, c); michael@0: } michael@0: BufferOffset michael@0: Assembler::as_vcmpz(VFPRegister vd, Condition c) michael@0: { michael@0: return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c); michael@0: } michael@0: michael@0: // Specifically, a move between two same sized-registers. michael@0: BufferOffset michael@0: Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) michael@0: { michael@0: return as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c); michael@0: } michael@0: //xfer between Core and VFP michael@0: michael@0: // Unlike the next function, moving between the core registers and vfp michael@0: // registers can't be *that* properly typed. Namely, since I don't want to michael@0: // munge the type VFPRegister to also include core registers. Thus, the core michael@0: // and vfp registers are passed in based on their type, and src/dest is michael@0: // determined by the float2core. michael@0: michael@0: BufferOffset michael@0: Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c, michael@0: Condition c, int idx) michael@0: { michael@0: vfp_size sz = isSingle; michael@0: if (vm.isDouble()) { michael@0: // Technically, this can be done with a vmov à la ARM ARM under vmov michael@0: // however, that requires at least an extra bit saying if the michael@0: // operation should be performed on the lower or upper half of the michael@0: // double. Moving a single to/from 2N/2N+1 isn't equivalent, michael@0: // since there are 32 single registers, and 32 double registers michael@0: // so there is no way to encode the last 16 double registers. michael@0: sz = isDouble; michael@0: JS_ASSERT(idx == 0 || idx == 1); michael@0: // If we are transferring a single half of the double michael@0: // then it must be moving a VFP reg to a core reg. michael@0: if (vt2 == InvalidReg) michael@0: JS_ASSERT(f2c == FloatToCore); michael@0: idx = idx << 21; michael@0: } else { michael@0: JS_ASSERT(idx == 0); michael@0: } michael@0: VFPXferSize xfersz = WordTransfer; michael@0: uint32_t (*encodeVFP)(VFPRegister) = VN; michael@0: if (vt2 != InvalidReg) { michael@0: // We are doing a 64 bit transfer. michael@0: xfersz = DoubleTransfer; michael@0: encodeVFP = VM; michael@0: } michael@0: michael@0: return writeVFPInst(sz, xfersz | f2c | c | michael@0: RT(vt1) | maybeRN(vt2) | encodeVFP(vm) | idx); michael@0: } michael@0: enum vcvt_destFloatness { michael@0: toInteger = 1 << 18, michael@0: toFloat = 0 << 18 michael@0: }; michael@0: enum vcvt_toZero { michael@0: toZero = 1 << 7, // use the default rounding mode, which rounds truncates michael@0: toFPSCR = 0 << 7 // use whatever rounding mode the fpscr specifies michael@0: }; michael@0: enum vcvt_Signedness { michael@0: toSigned = 1 << 16, michael@0: toUnsigned = 0 << 16, michael@0: fromSigned = 1 << 7, michael@0: fromUnsigned = 0 << 7 michael@0: }; michael@0: michael@0: // our encoding actually allows just the src and the dest (and their types) michael@0: // to uniquely specify the encoding that we are going to use. michael@0: BufferOffset michael@0: Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR, michael@0: Condition c) michael@0: { michael@0: // Unlike other cases, the source and dest types cannot be the same michael@0: JS_ASSERT(!vd.equiv(vm)); michael@0: vfp_size sz = isDouble; michael@0: if (vd.isFloat() && vm.isFloat()) { michael@0: // Doing a float -> float conversion michael@0: if (vm.isSingle()) michael@0: sz = isSingle; michael@0: return writeVFPInst(sz, c | 0x02B700C0 | michael@0: VM(vm) | VD(vd)); michael@0: } michael@0: michael@0: // At least one of the registers should be a float. michael@0: vcvt_destFloatness destFloat; michael@0: vcvt_Signedness opSign; michael@0: vcvt_toZero doToZero = toFPSCR; michael@0: JS_ASSERT(vd.isFloat() || vm.isFloat()); michael@0: if (vd.isSingle() || vm.isSingle()) { michael@0: sz = isSingle; michael@0: } michael@0: if (vd.isFloat()) { michael@0: destFloat = toFloat; michael@0: opSign = (vm.isSInt()) ? fromSigned : fromUnsigned; michael@0: } else { michael@0: destFloat = toInteger; michael@0: opSign = (vd.isSInt()) ? toSigned : toUnsigned; michael@0: doToZero = useFPSCR ? toFPSCR : toZero; michael@0: } michael@0: return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c) michael@0: { michael@0: JS_ASSERT(vd.isFloat()); michael@0: uint32_t sx = 0x1; michael@0: vfp_size sf = vd.isDouble() ? isDouble : isSingle; michael@0: int32_t imm5 = fixedPoint; michael@0: imm5 = (sx ? 32 : 16) - imm5; michael@0: JS_ASSERT(imm5 >= 0); michael@0: imm5 = imm5 >> 1 | (imm5 & 1) << 5; michael@0: return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 | michael@0: (!isSigned) << 16 | imm5 | c); michael@0: } michael@0: michael@0: // xfer between VFP and memory michael@0: BufferOffset michael@0: Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr, michael@0: Condition c /* vfp doesn't have a wb option*/, michael@0: uint32_t *dest) michael@0: { michael@0: vfp_size sz = vd.isDouble() ? isDouble : isSingle; michael@0: return writeVFPInst(sz, ls | 0x01000000 | addr.encode() | VD(vd) | c, dest); michael@0: } michael@0: michael@0: // VFP's ldm/stm work differently from the standard arm ones. michael@0: // You can only transfer a range michael@0: michael@0: BufferOffset michael@0: Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length, michael@0: /*also has update conditions*/Condition c) michael@0: { michael@0: JS_ASSERT(length <= 16 && length >= 0); michael@0: vfp_size sz = vd.isDouble() ? isDouble : isSingle; michael@0: michael@0: if (vd.isDouble()) michael@0: length *= 2; michael@0: michael@0: return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | michael@0: length | michael@0: dtmMode | dtmUpdate | dtmCond); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) michael@0: { michael@0: JS_ASSERT(imm.isValid()); michael@0: vfp_size sz = vd.isDouble() ? isDouble : isSingle; michael@0: return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000); michael@0: michael@0: } michael@0: BufferOffset michael@0: Assembler::as_vmrs(Register r, Condition c) michael@0: { michael@0: return writeInst(c | 0x0ef10a10 | RT(r)); michael@0: } michael@0: michael@0: BufferOffset michael@0: Assembler::as_vmsr(Register r, Condition c) michael@0: { michael@0: return writeInst(c | 0x0ee10a10 | RT(r)); michael@0: } michael@0: michael@0: bool michael@0: Assembler::nextLink(BufferOffset b, BufferOffset *next) michael@0: { michael@0: Instruction branch = *editSrc(b); michael@0: JS_ASSERT(branch.is()); michael@0: michael@0: BOffImm destOff; michael@0: branch.as()->extractImm(&destOff); michael@0: if (destOff.isInvalid()) michael@0: return false; michael@0: michael@0: // Propagate the next link back to the caller, by michael@0: // constructing a new BufferOffset into the space they michael@0: // provided. michael@0: new (next) BufferOffset(destOff.decode()); michael@0: return true; michael@0: } michael@0: michael@0: void michael@0: Assembler::bind(Label *label, BufferOffset boff) michael@0: { michael@0: if (label->used()) { michael@0: bool more; michael@0: // If our caller didn't give us an explicit target to bind to michael@0: // then we want to bind to the location of the next instruction michael@0: BufferOffset dest = boff.assigned() ? boff : nextOffset(); michael@0: BufferOffset b(label); michael@0: do { michael@0: BufferOffset next; michael@0: more = nextLink(b, &next); michael@0: Instruction branch = *editSrc(b); michael@0: Condition c; michael@0: branch.extractCond(&c); michael@0: if (branch.is()) michael@0: as_b(dest.diffB(b), c, b); michael@0: else if (branch.is()) michael@0: as_bl(dest.diffB(b), c, b); michael@0: else michael@0: MOZ_ASSUME_UNREACHABLE("crazy fixup!"); michael@0: b = next; michael@0: } while (more); michael@0: } michael@0: label->bind(nextOffset().getOffset()); michael@0: } michael@0: michael@0: void michael@0: Assembler::bind(RepatchLabel *label) michael@0: { michael@0: BufferOffset dest = nextOffset(); michael@0: if (label->used()) { michael@0: // If the label has a use, then change this use to refer to michael@0: // the bound label; michael@0: BufferOffset branchOff(label->offset()); michael@0: // Since this was created with a RepatchLabel, the value written in the michael@0: // instruction stream is not branch shaped, it is PoolHintData shaped. michael@0: Instruction *branch = editSrc(branchOff); michael@0: PoolHintPun p; michael@0: p.raw = branch->encode(); michael@0: Condition cond; michael@0: if (p.phd.isValidPoolHint()) michael@0: cond = p.phd.getCond(); michael@0: else michael@0: branch->extractCond(&cond); michael@0: as_b(dest.diffB(branchOff), cond, branchOff); michael@0: } michael@0: label->bind(dest.getOffset()); michael@0: } michael@0: michael@0: void michael@0: Assembler::retarget(Label *label, Label *target) michael@0: { michael@0: if (label->used()) { michael@0: if (target->bound()) { michael@0: bind(label, BufferOffset(target)); michael@0: } else if (target->used()) { michael@0: // The target is not bound but used. Prepend label's branch list michael@0: // onto target's. michael@0: BufferOffset labelBranchOffset(label); michael@0: BufferOffset next; michael@0: michael@0: // Find the head of the use chain for label. michael@0: while (nextLink(labelBranchOffset, &next)) michael@0: labelBranchOffset = next; michael@0: michael@0: // Then patch the head of label's use chain to the tail of michael@0: // target's use chain, prepending the entire use chain of target. michael@0: Instruction branch = *editSrc(labelBranchOffset); michael@0: Condition c; michael@0: branch.extractCond(&c); michael@0: int32_t prev = target->use(label->offset()); michael@0: if (branch.is()) michael@0: as_b(BOffImm(prev), c, labelBranchOffset); michael@0: else if (branch.is()) michael@0: as_bl(BOffImm(prev), c, labelBranchOffset); michael@0: else michael@0: MOZ_ASSUME_UNREACHABLE("crazy fixup!"); michael@0: } else { michael@0: // The target is unbound and unused. We can just take the head of michael@0: // the list hanging off of label, and dump that into target. michael@0: DebugOnly prev = target->use(label->offset()); michael@0: JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET); michael@0: } michael@0: } michael@0: label->reset(); michael@0: michael@0: } michael@0: michael@0: michael@0: void dbg_break() {} michael@0: static int stopBKPT = -1; michael@0: void michael@0: Assembler::as_bkpt() michael@0: { michael@0: // This is a count of how many times a breakpoint instruction has been generated. michael@0: // It is embedded into the instruction for debugging purposes. gdb will print "bkpt xxx" michael@0: // when you attempt to dissassemble a breakpoint with the number xxx embedded into it. michael@0: // If this breakpoint is being hit, then you can run (in gdb) michael@0: // >b dbg_break michael@0: // >b main michael@0: // >commands michael@0: // >set stopBKPT = xxx michael@0: // >c michael@0: // >end michael@0: michael@0: // which will set a breakpoint on the function dbg_break above michael@0: // set a scripted breakpoint on main that will set the (otherwise unmodified) michael@0: // value to the number of the breakpoint, so dbg_break will actuall be called michael@0: // and finally, when you run the executable, execution will halt when that michael@0: // breakpoint is generated michael@0: static int hit = 0; michael@0: if (stopBKPT == hit) michael@0: dbg_break(); michael@0: writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0)<<4)); michael@0: hit++; michael@0: } michael@0: michael@0: void michael@0: Assembler::dumpPool() michael@0: { michael@0: m_buffer.flushPool(); michael@0: } michael@0: michael@0: void michael@0: Assembler::flushBuffer() michael@0: { michael@0: m_buffer.flushPool(); michael@0: } michael@0: michael@0: void michael@0: Assembler::enterNoPool() michael@0: { michael@0: m_buffer.enterNoPool(); michael@0: } michael@0: michael@0: void michael@0: Assembler::leaveNoPool() michael@0: { michael@0: m_buffer.leaveNoPool(); michael@0: } michael@0: michael@0: ptrdiff_t michael@0: Assembler::getBranchOffset(const Instruction *i_) michael@0: { michael@0: if (!i_->is()) michael@0: return 0; michael@0: michael@0: InstBranchImm *i = i_->as(); michael@0: BOffImm dest; michael@0: i->extractImm(&dest); michael@0: return dest.decode(); michael@0: } michael@0: void michael@0: Assembler::retargetNearBranch(Instruction *i, int offset, bool final) michael@0: { michael@0: Assembler::Condition c; michael@0: i->extractCond(&c); michael@0: retargetNearBranch(i, offset, c, final); michael@0: } michael@0: michael@0: void michael@0: Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final) michael@0: { michael@0: // Retargeting calls is totally unsupported! michael@0: JS_ASSERT_IF(i->is(), i->is() || i->is()); michael@0: if (i->is()) michael@0: new (i) InstBLImm(BOffImm(offset), cond); michael@0: else michael@0: new (i) InstBImm(BOffImm(offset), cond); michael@0: michael@0: // Flush the cache, since an instruction was overwritten michael@0: if (final) michael@0: AutoFlushICache::flush(uintptr_t(i), 4); michael@0: } michael@0: michael@0: void michael@0: Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond) michael@0: { michael@0: int32_t offset = reinterpret_cast(slot) - reinterpret_cast(i); michael@0: if (!i->is()) { michael@0: new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond); michael@0: AutoFlushICache::flush(uintptr_t(i), 4); michael@0: } michael@0: *slot = dest; michael@0: michael@0: } michael@0: michael@0: struct PoolHeader : Instruction { michael@0: struct Header michael@0: { michael@0: // size should take into account the pool header. michael@0: // size is in units of Instruction (4bytes), not byte michael@0: uint32_t size : 15; michael@0: bool isNatural : 1; michael@0: uint32_t ONES : 16; michael@0: michael@0: Header(int size_, bool isNatural_) michael@0: : size(size_), michael@0: isNatural(isNatural_), michael@0: ONES(0xffff) michael@0: { } michael@0: michael@0: Header(const Instruction *i) { michael@0: JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t)); michael@0: memcpy(this, i, sizeof(Header)); michael@0: JS_ASSERT(ONES == 0xffff); michael@0: } michael@0: michael@0: uint32_t raw() const { michael@0: JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t)); michael@0: uint32_t dest; michael@0: memcpy(&dest, this, sizeof(Header)); michael@0: return dest; michael@0: } michael@0: }; michael@0: michael@0: PoolHeader(int size_, bool isNatural_) michael@0: : Instruction(Header(size_, isNatural_).raw(), true) michael@0: { } michael@0: michael@0: uint32_t size() const { michael@0: Header tmp(this); michael@0: return tmp.size; michael@0: } michael@0: uint32_t isNatural() const { michael@0: Header tmp(this); michael@0: return tmp.isNatural; michael@0: } michael@0: static bool isTHIS(const Instruction &i) { michael@0: return (*i.raw() & 0xffff0000) == 0xffff0000; michael@0: } michael@0: static const PoolHeader *asTHIS(const Instruction &i) { michael@0: if (!isTHIS(i)) michael@0: return nullptr; michael@0: return static_cast(&i); michael@0: } michael@0: }; michael@0: michael@0: michael@0: void michael@0: Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural) michael@0: { michael@0: STATIC_ASSERT(sizeof(PoolHeader) == 4); michael@0: uint8_t *pool = start+4; michael@0: // go through the usual rigaramarole to get the size of the pool. michael@0: pool = p[0].addPoolSize(pool); michael@0: pool = p[1].addPoolSize(pool); michael@0: pool = p[1].other->addPoolSize(pool); michael@0: pool = p[0].other->addPoolSize(pool); michael@0: uint32_t size = pool - start; michael@0: JS_ASSERT((size & 3) == 0); michael@0: size = size >> 2; michael@0: JS_ASSERT(size < (1 << 15)); michael@0: PoolHeader header(size, isNatural); michael@0: *(PoolHeader*)start = header; michael@0: } michael@0: michael@0: michael@0: void michael@0: Assembler::writePoolFooter(uint8_t *start, Pool *p, bool isNatural) michael@0: { michael@0: return; michael@0: } michael@0: michael@0: // The size of an arbitrary 32-bit call in the instruction stream. michael@0: // On ARM this sequence is |pc = ldr pc - 4; imm32| given that we michael@0: // never reach the imm32. michael@0: uint32_t michael@0: Assembler::patchWrite_NearCallSize() michael@0: { michael@0: return sizeof(uint32_t); michael@0: } michael@0: void michael@0: Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) michael@0: { michael@0: Instruction *inst = (Instruction *) start.raw(); michael@0: // Overwrite whatever instruction used to be here with a call. michael@0: // Since the destination is in the same function, it will be within range of the 24<<2 byte michael@0: // bl instruction. michael@0: uint8_t *dest = toCall.raw(); michael@0: new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always); michael@0: // Ensure everyone sees the code that was just written into memory. michael@0: michael@0: AutoFlushICache::flush(uintptr_t(inst), 4); michael@0: michael@0: } michael@0: void michael@0: Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, michael@0: PatchedImmPtr expectedValue) michael@0: { michael@0: Instruction *ptr = (Instruction *) label.raw(); michael@0: InstructionIterator iter(ptr); michael@0: Register dest; michael@0: Assembler::RelocStyle rs; michael@0: DebugOnly val = getPtr32Target(&iter, &dest, &rs); michael@0: JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value)); michael@0: reinterpret_cast(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)), michael@0: dest, Always, rs, ptr); michael@0: // L_LDR won't cause any instructions to be updated. michael@0: if (rs != L_LDR) { michael@0: AutoFlushICache::flush(uintptr_t(ptr), 4); michael@0: AutoFlushICache::flush(uintptr_t(ptr->next()), 4); michael@0: } michael@0: } michael@0: michael@0: void michael@0: Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) michael@0: { michael@0: patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value)); michael@0: } michael@0: michael@0: // This just stomps over memory with 32 bits of raw data. Its purpose is to michael@0: // overwrite the call of JITed code with 32 bits worth of an offset. This will michael@0: // is only meant to function on code that has been invalidated, so it should michael@0: // be totally safe. Since that instruction will never be executed again, a michael@0: // ICache flush should not be necessary michael@0: void michael@0: Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { michael@0: // Raw is going to be the return address. michael@0: uint32_t *raw = (uint32_t*)label.raw(); michael@0: // Overwrite the 4 bytes before the return address, which will michael@0: // end up being the call instruction. michael@0: *(raw-1) = imm.value; michael@0: } michael@0: michael@0: michael@0: uint8_t * michael@0: Assembler::nextInstruction(uint8_t *inst_, uint32_t *count) michael@0: { michael@0: Instruction *inst = reinterpret_cast(inst_); michael@0: if (count != nullptr) michael@0: *count += sizeof(Instruction); michael@0: return reinterpret_cast(inst->next()); michael@0: } michael@0: michael@0: static bool michael@0: InstIsGuard(Instruction *inst, const PoolHeader **ph) michael@0: { michael@0: Assembler::Condition c; michael@0: inst->extractCond(&c); michael@0: if (c != Assembler::Always) michael@0: return false; michael@0: if (!(inst->is() || inst->is())) michael@0: return false; michael@0: // See if the next instruction is a pool header. michael@0: *ph = (inst+1)->as(); michael@0: return *ph != nullptr; michael@0: } michael@0: michael@0: static bool michael@0: InstIsBNop(Instruction *inst) { michael@0: // In some special situations, it is necessary to insert a NOP michael@0: // into the instruction stream that nobody knows about, since nobody should know about michael@0: // it, make sure it gets skipped when Instruction::next() is called. michael@0: // this generates a very specific nop, namely a branch to the next instruction. michael@0: Assembler::Condition c; michael@0: inst->extractCond(&c); michael@0: if (c != Assembler::Always) michael@0: return false; michael@0: if (!inst->is()) michael@0: return false; michael@0: InstBImm *b = inst->as(); michael@0: BOffImm offset; michael@0: b->extractImm(&offset); michael@0: return offset.decode() == 4; michael@0: } michael@0: michael@0: static bool michael@0: InstIsArtificialGuard(Instruction *inst, const PoolHeader **ph) michael@0: { michael@0: if (!InstIsGuard(inst, ph)) michael@0: return false; michael@0: return !(*ph)->isNatural(); michael@0: } michael@0: michael@0: // Cases to be handled: michael@0: // 1) no pools or branches in sight => return this+1 michael@0: // 2) branch to next instruction => return this+2, because a nop needed to be inserted into the stream. michael@0: // 3) this+1 is an artificial guard for a pool => return first instruction after the pool michael@0: // 4) this+1 is a natural guard => return the branch michael@0: // 5) this is a branch, right before a pool => return first instruction after the pool michael@0: // in assembly form: michael@0: // 1) add r0, r0, r0 <= this michael@0: // add r1, r1, r1 <= returned value michael@0: // add r2, r2, r2 michael@0: // michael@0: // 2) add r0, r0, r0 <= this michael@0: // b foo michael@0: // foo: michael@0: // add r2, r2, r2 <= returned value michael@0: // michael@0: // 3) add r0, r0, r0 <= this michael@0: // b after_pool; michael@0: // .word 0xffff0002 # bit 15 being 0 indicates that the branch was not requested by the assembler michael@0: // 0xdeadbeef # the 2 indicates that there is 1 pool entry, and the pool header michael@0: // add r4, r4, r4 <= returned value michael@0: // 4) add r0, r0, r0 <= this michael@0: // b after_pool <= returned value michael@0: // .word 0xffff8002 # bit 15 being 1 indicates that the branch was requested by the assembler michael@0: // 0xdeadbeef michael@0: // add r4, r4, r4 michael@0: // 5) b after_pool <= this michael@0: // .word 0xffff8002 # bit 15 has no bearing on the returned value michael@0: // 0xdeadbeef michael@0: // add r4, r4, r4 <= returned value michael@0: michael@0: Instruction * michael@0: Instruction::next() michael@0: { michael@0: Instruction *ret = this+1; michael@0: const PoolHeader *ph; michael@0: // If this is a guard, and the next instruction is a header, always work around the pool michael@0: // If it isn't a guard, then start looking ahead. michael@0: if (InstIsGuard(this, &ph)) michael@0: return ret + ph->size(); michael@0: if (InstIsArtificialGuard(ret, &ph)) michael@0: return ret + 1 + ph->size(); michael@0: if (InstIsBNop(ret)) michael@0: return ret + 1; michael@0: return ret; michael@0: } michael@0: michael@0: void michael@0: Assembler::ToggleToJmp(CodeLocationLabel inst_) michael@0: { michael@0: uint32_t *ptr = (uint32_t *)inst_.raw(); michael@0: michael@0: DebugOnly inst = (Instruction *)inst_.raw(); michael@0: JS_ASSERT(inst->is()); michael@0: michael@0: // Zero bits 20-27, then set 24-27 to be correct for a branch. michael@0: // 20-23 will be party of the B's immediate, and should be 0. michael@0: *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20); michael@0: AutoFlushICache::flush(uintptr_t(ptr), 4); michael@0: } michael@0: michael@0: void michael@0: Assembler::ToggleToCmp(CodeLocationLabel inst_) michael@0: { michael@0: uint32_t *ptr = (uint32_t *)inst_.raw(); michael@0: michael@0: DebugOnly inst = (Instruction *)inst_.raw(); michael@0: JS_ASSERT(inst->is()); michael@0: michael@0: // Ensure that this masking operation doesn't affect the offset of the michael@0: // branch instruction when it gets toggled back. michael@0: JS_ASSERT((*ptr & (0xf << 20)) == 0); michael@0: michael@0: // Also make sure that the CMP is valid. Part of having a valid CMP is that michael@0: // all of the bits describing the destination in most ALU instructions are michael@0: // all unset (looks like it is encoding r0). michael@0: JS_ASSERT(toRD(*inst) == r0); michael@0: michael@0: // Zero out bits 20-27, then set them to be correct for a compare. michael@0: *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20); michael@0: michael@0: AutoFlushICache::flush(uintptr_t(ptr), 4); michael@0: } michael@0: michael@0: void michael@0: Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) michael@0: { michael@0: Instruction *inst = (Instruction *)inst_.raw(); michael@0: JS_ASSERT(inst->is() || inst->is()); michael@0: michael@0: if (inst->is()) { michael@0: // If it looks like the start of a movw/movt sequence, michael@0: // then make sure we have all of it (and advance the iterator michael@0: // past the full sequence) michael@0: inst = inst->next(); michael@0: JS_ASSERT(inst->is()); michael@0: } michael@0: michael@0: inst = inst->next(); michael@0: JS_ASSERT(inst->is() || inst->is()); michael@0: michael@0: if (enabled == inst->is()) { michael@0: // Nothing to do. michael@0: return; michael@0: } michael@0: michael@0: if (enabled) michael@0: *inst = InstBLXReg(ScratchRegister, Always); michael@0: else michael@0: *inst = InstNOP(); michael@0: michael@0: AutoFlushICache::flush(uintptr_t(inst), 4); michael@0: } michael@0: michael@0: void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst) michael@0: { michael@0: JS_ASSERT(inst->is()); michael@0: InstCMP *cmp = inst->as(); michael@0: michael@0: Register index; michael@0: cmp->extractOp1(&index); michael@0: michael@0: Operand2 op = cmp->extractOp2(); michael@0: JS_ASSERT(op.isImm8()); michael@0: michael@0: Imm8 imm8 = Imm8(heapSize); michael@0: JS_ASSERT(!imm8.invalid); michael@0: michael@0: *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always); michael@0: // NOTE: we don't update the Auto Flush Cache! this function is currently only called from michael@0: // within AsmJSModule::patchHeapAccesses, which does that for us. Don't call this! michael@0: } michael@0: michael@0: InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) { michael@0: const PoolHeader *ph; michael@0: // If this is a guard, and the next instruction is a header, always work around the pool michael@0: // If it isn't a guard, then start looking ahead. michael@0: if (InstIsArtificialGuard(i, &ph)) { michael@0: i = i->next(); michael@0: } michael@0: } michael@0: Assembler *Assembler::dummy = nullptr;