js/src/jit/arm/Assembler-arm.cpp

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #include "jit/arm/Assembler-arm.h"
     9 #include "mozilla/DebugOnly.h"
    10 #include "mozilla/MathAlgorithms.h"
    12 #include "jscompartment.h"
    13 #include "jsutil.h"
    15 #include "assembler/jit/ExecutableAllocator.h"
    16 #include "gc/Marking.h"
    17 #include "jit/arm/MacroAssembler-arm.h"
    18 #include "jit/JitCompartment.h"
    20 using namespace js;
    21 using namespace js::jit;
    23 using mozilla::CountLeadingZeroes32;
    25 // Note this is used for inter-AsmJS calls and may pass arguments and results
    26 // in floating point registers even if the system ABI does not.
    27 ABIArgGenerator::ABIArgGenerator() :
    28     intRegIndex_(0),
    29     floatRegIndex_(0),
    30     stackOffset_(0),
    31     current_()
    32 {}
    34 ABIArg
    35 ABIArgGenerator::next(MIRType type)
    36 {
    37     switch (type) {
    38       case MIRType_Int32:
    39       case MIRType_Pointer:
    40         if (intRegIndex_ == NumIntArgRegs) {
    41             current_ = ABIArg(stackOffset_);
    42             stackOffset_ += sizeof(uint32_t);
    43             break;
    44         }
    45         current_ = ABIArg(Register::FromCode(intRegIndex_));
    46         intRegIndex_++;
    47         break;
    48       case MIRType_Float32:
    49       case MIRType_Double:
    50         if (floatRegIndex_ == NumFloatArgRegs) {
    51             static const int align = sizeof(double) - 1;
    52             stackOffset_ = (stackOffset_ + align) & ~align;
    53             current_ = ABIArg(stackOffset_);
    54             stackOffset_ += sizeof(uint64_t);
    55             break;
    56         }
    57         current_ = ABIArg(FloatRegister::FromCode(floatRegIndex_));
    58         floatRegIndex_++;
    59         break;
    60       default:
    61         MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
    62     }
    64     return current_;
    65 }
    66 const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4;
    67 const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5;
    69 // Encode a standard register when it is being used as src1, the dest, and
    70 // an extra register. These should never be called with an InvalidReg.
    71 uint32_t
    72 js::jit::RT(Register r)
    73 {
    74     JS_ASSERT((r.code() & ~0xf) == 0);
    75     return r.code() << 12;
    76 }
    78 uint32_t
    79 js::jit::RN(Register r)
    80 {
    81     JS_ASSERT((r.code() & ~0xf) == 0);
    82     return r.code() << 16;
    83 }
    85 uint32_t
    86 js::jit::RD(Register r)
    87 {
    88     JS_ASSERT((r.code() & ~0xf) == 0);
    89     return r.code() << 12;
    90 }
    92 uint32_t
    93 js::jit::RM(Register r)
    94 {
    95     JS_ASSERT((r.code() & ~0xf) == 0);
    96     return r.code() << 8;
    97 }
    99 // Encode a standard register when it is being used as src1, the dest, and
   100 // an extra register.  For these, an InvalidReg is used to indicate a optional
   101 // register that has been omitted.
   102 uint32_t
   103 js::jit::maybeRT(Register r)
   104 {
   105     if (r == InvalidReg)
   106         return 0;
   108     JS_ASSERT((r.code() & ~0xf) == 0);
   109     return r.code() << 12;
   110 }
   112 uint32_t
   113 js::jit::maybeRN(Register r)
   114 {
   115     if (r == InvalidReg)
   116         return 0;
   118     JS_ASSERT((r.code() & ~0xf) == 0);
   119     return r.code() << 16;
   120 }
   122 uint32_t
   123 js::jit::maybeRD(Register r)
   124 {
   125     if (r == InvalidReg)
   126         return 0;
   128     JS_ASSERT((r.code() & ~0xf) == 0);
   129     return r.code() << 12;
   130 }
   132 Register
   133 js::jit::toRD(Instruction &i)
   134 {
   135     return Register::FromCode((i.encode()>>12) & 0xf);
   136 }
   137 Register
   138 js::jit::toR(Instruction &i)
   139 {
   140     return Register::FromCode(i.encode() & 0xf);
   141 }
   143 Register
   144 js::jit::toRM(Instruction &i)
   145 {
   146     return Register::FromCode((i.encode()>>8) & 0xf);
   147 }
   149 Register
   150 js::jit::toRN(Instruction &i)
   151 {
   152     return Register::FromCode((i.encode()>>16) & 0xf);
   153 }
   155 uint32_t
   156 js::jit::VD(VFPRegister vr)
   157 {
   158     if (vr.isMissing())
   159         return 0;
   161     //bits 15,14,13,12, 22
   162     VFPRegister::VFPRegIndexSplit s = vr.encode();
   163     return s.bit << 22 | s.block << 12;
   164 }
   165 uint32_t
   166 js::jit::VN(VFPRegister vr)
   167 {
   168     if (vr.isMissing())
   169         return 0;
   171     // bits 19,18,17,16, 7
   172     VFPRegister::VFPRegIndexSplit s = vr.encode();
   173     return s.bit << 7 | s.block << 16;
   174 }
   175 uint32_t
   176 js::jit::VM(VFPRegister vr)
   177 {
   178     if (vr.isMissing())
   179         return 0;
   181     // bits 5, 3,2,1,0
   182     VFPRegister::VFPRegIndexSplit s = vr.encode();
   183     return s.bit << 5 | s.block;
   184 }
   186 VFPRegister::VFPRegIndexSplit
   187 jit::VFPRegister::encode()
   188 {
   189     JS_ASSERT(!_isInvalid);
   191     switch (kind) {
   192       case Double:
   193         return VFPRegIndexSplit(_code &0xf , _code >> 4);
   194       case Single:
   195         return VFPRegIndexSplit(_code >> 1, _code & 1);
   196       default:
   197         // vfp register treated as an integer, NOT a gpr
   198         return VFPRegIndexSplit(_code >> 1, _code & 1);
   199     }
   200 }
   202 VFPRegister js::jit::NoVFPRegister(true);
   204 bool
   205 InstDTR::isTHIS(const Instruction &i)
   206 {
   207     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
   208 }
   210 InstDTR *
   211 InstDTR::asTHIS(const Instruction &i)
   212 {
   213     if (isTHIS(i))
   214         return (InstDTR*)&i;
   215     return nullptr;
   216 }
   218 bool
   219 InstLDR::isTHIS(const Instruction &i)
   220 {
   221     return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
   222 }
   224 InstLDR *
   225 InstLDR::asTHIS(const Instruction &i)
   226 {
   227     if (isTHIS(i))
   228         return (InstLDR*)&i;
   229     return nullptr;
   230 }
   232 InstNOP *
   233 InstNOP::asTHIS(Instruction &i)
   234 {
   235     if (isTHIS(i))
   236         return (InstNOP*) (&i);
   237     return nullptr;
   238 }
   240 bool
   241 InstNOP::isTHIS(const Instruction &i)
   242 {
   243     return (i.encode() & 0x0fffffff) == NopInst;
   244 }
   246 bool
   247 InstBranchReg::isTHIS(const Instruction &i)
   248 {
   249     return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i);
   250 }
   252 InstBranchReg *
   253 InstBranchReg::asTHIS(const Instruction &i)
   254 {
   255     if (isTHIS(i))
   256         return (InstBranchReg*)&i;
   257     return nullptr;
   258 }
   259 void
   260 InstBranchReg::extractDest(Register *dest)
   261 {
   262     *dest = toR(*this);
   263 }
   264 bool
   265 InstBranchReg::checkDest(Register dest)
   266 {
   267     return dest == toR(*this);
   268 }
   270 bool
   271 InstBranchImm::isTHIS(const Instruction &i)
   272 {
   273     return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i);
   274 }
   276 InstBranchImm *
   277 InstBranchImm::asTHIS(const Instruction &i)
   278 {
   279     if (isTHIS(i))
   280         return (InstBranchImm*)&i;
   281     return nullptr;
   282 }
   284 void
   285 InstBranchImm::extractImm(BOffImm *dest)
   286 {
   287     *dest = BOffImm(*this);
   288 }
   290 bool
   291 InstBXReg::isTHIS(const Instruction &i)
   292 {
   293     return (i.encode() & IsBRegMask) == IsBX;
   294 }
   296 InstBXReg *
   297 InstBXReg::asTHIS(const Instruction &i)
   298 {
   299     if (isTHIS(i))
   300         return (InstBXReg*)&i;
   301     return nullptr;
   302 }
   304 bool
   305 InstBLXReg::isTHIS(const Instruction &i)
   306 {
   307     return (i.encode() & IsBRegMask) == IsBLX;
   309 }
   310 InstBLXReg *
   311 InstBLXReg::asTHIS(const Instruction &i)
   312 {
   313     if (isTHIS(i))
   314         return (InstBLXReg*)&i;
   315     return nullptr;
   316 }
   318 bool
   319 InstBImm::isTHIS(const Instruction &i)
   320 {
   321     return (i.encode () & IsBImmMask) == IsB;
   322 }
   323 InstBImm *
   324 InstBImm::asTHIS(const Instruction &i)
   325 {
   326     if (isTHIS(i))
   327         return (InstBImm*)&i;
   328     return nullptr;
   329 }
   331 bool
   332 InstBLImm::isTHIS(const Instruction &i)
   333 {
   334     return (i.encode () & IsBImmMask) == IsBL;
   336 }
   337 InstBLImm *
   338 InstBLImm::asTHIS(Instruction &i)
   339 {
   340     if (isTHIS(i))
   341         return (InstBLImm*)&i;
   342     return nullptr;
   343 }
   345 bool
   346 InstMovWT::isTHIS(Instruction &i)
   347 {
   348     return  InstMovW::isTHIS(i) || InstMovT::isTHIS(i);
   349 }
   350 InstMovWT *
   351 InstMovWT::asTHIS(Instruction &i)
   352 {
   353     if (isTHIS(i))
   354         return (InstMovWT*)&i;
   355     return nullptr;
   356 }
   358 void
   359 InstMovWT::extractImm(Imm16 *imm)
   360 {
   361     *imm = Imm16(*this);
   362 }
   363 bool
   364 InstMovWT::checkImm(Imm16 imm)
   365 {
   366     return imm.decode() == Imm16(*this).decode();
   367 }
   369 void
   370 InstMovWT::extractDest(Register *dest)
   371 {
   372     *dest = toRD(*this);
   373 }
   374 bool
   375 InstMovWT::checkDest(Register dest)
   376 {
   377     return dest == toRD(*this);
   378 }
   380 bool
   381 InstMovW::isTHIS(const Instruction &i)
   382 {
   383     return (i.encode() & IsWTMask) == IsW;
   384 }
   386 InstMovW *
   387 InstMovW::asTHIS(const Instruction &i)
   388 {
   389     if (isTHIS(i))
   390         return (InstMovW*) (&i);
   391     return nullptr;
   392 }
   393 InstMovT *
   394 InstMovT::asTHIS(const Instruction &i)
   395 {
   396     if (isTHIS(i))
   397         return (InstMovT*) (&i);
   398     return nullptr;
   399 }
   401 bool
   402 InstMovT::isTHIS(const Instruction &i)
   403 {
   404     return (i.encode() & IsWTMask) == IsT;
   405 }
   407 InstALU *
   408 InstALU::asTHIS(const Instruction &i)
   409 {
   410     if (isTHIS(i))
   411         return (InstALU*) (&i);
   412     return nullptr;
   413 }
   414 bool
   415 InstALU::isTHIS(const Instruction &i)
   416 {
   417     return (i.encode() & ALUMask) == 0;
   418 }
   419 void
   420 InstALU::extractOp(ALUOp *ret)
   421 {
   422     *ret = ALUOp(encode() & (0xf << 21));
   423 }
   424 bool
   425 InstALU::checkOp(ALUOp op)
   426 {
   427     ALUOp mine;
   428     extractOp(&mine);
   429     return mine == op;
   430 }
   431 void
   432 InstALU::extractDest(Register *ret)
   433 {
   434     *ret = toRD(*this);
   435 }
   436 bool
   437 InstALU::checkDest(Register rd)
   438 {
   439     return rd == toRD(*this);
   440 }
   441 void
   442 InstALU::extractOp1(Register *ret)
   443 {
   444     *ret = toRN(*this);
   445 }
   446 bool
   447 InstALU::checkOp1(Register rn)
   448 {
   449     return rn == toRN(*this);
   450 }
   451 Operand2
   452 InstALU::extractOp2()
   453 {
   454     return Operand2(encode());
   455 }
   457 InstCMP *
   458 InstCMP::asTHIS(const Instruction &i)
   459 {
   460     if (isTHIS(i))
   461         return (InstCMP*) (&i);
   462     return nullptr;
   463 }
   465 bool
   466 InstCMP::isTHIS(const Instruction &i)
   467 {
   468     return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp);
   469 }
   471 InstMOV *
   472 InstMOV::asTHIS(const Instruction &i)
   473 {
   474     if (isTHIS(i))
   475         return (InstMOV*) (&i);
   476     return nullptr;
   477 }
   479 bool
   480 InstMOV::isTHIS(const Instruction &i)
   481 {
   482     return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov);
   483 }
   485 Op2Reg
   486 Operand2::toOp2Reg() {
   487     return *(Op2Reg*)this;
   488 }
   489 O2RegImmShift
   490 Op2Reg::toO2RegImmShift() {
   491     return *(O2RegImmShift*)this;
   492 }
   493 O2RegRegShift
   494 Op2Reg::toO2RegRegShift() {
   495     return *(O2RegRegShift*)this;
   496 }
   498 Imm16::Imm16(Instruction &inst)
   499   : lower(inst.encode() & 0xfff),
   500     upper(inst.encode() >> 16),
   501     invalid(0xfff)
   502 { }
   504 Imm16::Imm16(uint32_t imm)
   505   : lower(imm & 0xfff), pad(0),
   506     upper((imm>>12) & 0xf),
   507     invalid(0)
   508 {
   509     JS_ASSERT(decode() == imm);
   510 }
   512 Imm16::Imm16()
   513   : invalid(0xfff)
   514 { }
   516 void
   517 jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
   518 {
   519     // We need to determine if this jump can fit into the standard 24+2 bit address
   520     // or if we need a larger branch (or just need to use our pool entry)
   521     Instruction *jump = (Instruction*)jump_.raw();
   522     Assembler::Condition c;
   523     jump->extractCond(&c);
   524     JS_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
   526     int jumpOffset = label.raw() - jump_.raw();
   527     if (BOffImm::isInRange(jumpOffset)) {
   528         // This instruction started off as a branch, and will remain one
   529         Assembler::retargetNearBranch(jump, jumpOffset, c);
   530     } else {
   531         // This instruction started off as a branch, but now needs to be demoted to an ldr.
   532         uint8_t **slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
   533         Assembler::retargetFarBranch(jump, slot, label.raw(), c);
   534     }
   535 }
   537 void
   538 Assembler::finish()
   539 {
   540     flush();
   541     JS_ASSERT(!isFinished);
   542     isFinished = true;
   544     for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++) {
   545         int offset = tmpDataRelocations_[i].getOffset();
   546         int real_offset = offset + m_buffer.poolSizeBefore(offset);
   547         dataRelocations_.writeUnsigned(real_offset);
   548     }
   550     for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) {
   551         int offset = tmpJumpRelocations_[i].getOffset();
   552         int real_offset = offset + m_buffer.poolSizeBefore(offset);
   553         jumpRelocations_.writeUnsigned(real_offset);
   554     }
   556     for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++) {
   557         int offset = tmpPreBarriers_[i].getOffset();
   558         int real_offset = offset + m_buffer.poolSizeBefore(offset);
   559         preBarriers_.writeUnsigned(real_offset);
   560     }
   561 }
   563 void
   564 Assembler::executableCopy(uint8_t *buffer)
   565 {
   566     JS_ASSERT(isFinished);
   567     m_buffer.executableCopy(buffer);
   568     AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
   569 }
   571 void
   572 Assembler::resetCounter()
   573 {
   574     m_buffer.resetCounter();
   575 }
   577 uint32_t
   578 Assembler::actualOffset(uint32_t off_) const
   579 {
   580     return off_ + m_buffer.poolSizeBefore(off_);
   581 }
   583 uint32_t
   584 Assembler::actualIndex(uint32_t idx_) const
   585 {
   586     ARMBuffer::PoolEntry pe(idx_);
   587     return m_buffer.poolEntryOffset(pe);
   588 }
   590 uint8_t *
   591 Assembler::PatchableJumpAddress(JitCode *code, uint32_t pe_)
   592 {
   593     return code->raw() + pe_;
   594 }
   596 BufferOffset
   597 Assembler::actualOffset(BufferOffset off_) const
   598 {
   599     return BufferOffset(off_.getOffset() + m_buffer.poolSizeBefore(off_.getOffset()));
   600 }
   602 class RelocationIterator
   603 {
   604     CompactBufferReader reader_;
   605     // offset in bytes
   606     uint32_t offset_;
   608   public:
   609     RelocationIterator(CompactBufferReader &reader)
   610       : reader_(reader)
   611     { }
   613     bool read() {
   614         if (!reader_.more())
   615             return false;
   616         offset_ = reader_.readUnsigned();
   617         return true;
   618     }
   620     uint32_t offset() const {
   621         return offset_;
   622     }
   623 };
   625 template<class Iter>
   626 const uint32_t *
   627 Assembler::getCF32Target(Iter *iter)
   628 {
   629     Instruction *inst1 = iter->cur();
   630     Instruction *inst2 = iter->next();
   631     Instruction *inst3 = iter->next();
   632     Instruction *inst4 = iter->next();
   634     if (inst1->is<InstBranchImm>()) {
   635         // see if we have a simple case, b #offset
   636         BOffImm imm;
   637         InstBranchImm *jumpB = inst1->as<InstBranchImm>();
   638         jumpB->extractImm(&imm);
   639         return imm.getDest(inst1)->raw();
   640     }
   642     if (inst1->is<InstMovW>() && inst2->is<InstMovT>() &&
   643         (inst3->is<InstNOP>() || inst3->is<InstBranchReg>() || inst4->is<InstBranchReg>()))
   644     {
   645         // see if we have the complex case,
   646         // movw r_temp, #imm1
   647         // movt r_temp, #imm2
   648         // bx r_temp
   649         // OR
   650         // movw r_temp, #imm1
   651         // movt r_temp, #imm2
   652         // str pc, [sp]
   653         // bx r_temp
   655         Imm16 targ_bot;
   656         Imm16 targ_top;
   657         Register temp;
   659         // Extract both the temp register and the bottom immediate.
   660         InstMovW *bottom = inst1->as<InstMovW>();
   661         bottom->extractImm(&targ_bot);
   662         bottom->extractDest(&temp);
   664         // Extract the top part of the immediate.
   665         InstMovT *top = inst2->as<InstMovT>();
   666         top->extractImm(&targ_top);
   668         // Make sure they are being loaded into the same register.
   669         JS_ASSERT(top->checkDest(temp));
   671         // Make sure we're branching to the same register.
   672 #ifdef DEBUG
   673         // A toggled call sometimes has a NOP instead of a branch for the third instruction.
   674         // No way to assert that it's valid in that situation.
   675         if (!inst3->is<InstNOP>()) {
   676             InstBranchReg *realBranch = inst3->is<InstBranchReg>() ? inst3->as<InstBranchReg>()
   677                                                                    : inst4->as<InstBranchReg>();
   678             JS_ASSERT(realBranch->checkDest(temp));
   679         }
   680 #endif
   682         uint32_t *dest = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
   683         return dest;
   684     }
   686     if (inst1->is<InstLDR>()) {
   687         InstLDR *load = inst1->as<InstLDR>();
   688         uint32_t inst = load->encode();
   689         // get the address of the instruction as a raw pointer
   690         char *dataInst = reinterpret_cast<char*>(load);
   691         IsUp_ iu = IsUp_(inst & IsUp);
   692         int32_t offset = inst & 0xfff;
   693         if (iu != IsUp) {
   694             offset = - offset;
   695         }
   696         uint32_t **ptr = (uint32_t **)&dataInst[offset + 8];
   697         return *ptr;
   699     }
   701     MOZ_ASSUME_UNREACHABLE("unsupported branch relocation");
   702 }
   704 uintptr_t
   705 Assembler::getPointer(uint8_t *instPtr)
   706 {
   707     InstructionIterator iter((Instruction*)instPtr);
   708     uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr);
   709     return ret;
   710 }
   712 template<class Iter>
   713 const uint32_t *
   714 Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style)
   715 {
   716     Instruction *load1 = start->cur();
   717     Instruction *load2 = start->next();
   719     if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
   720         // see if we have the complex case,
   721         // movw r_temp, #imm1
   722         // movt r_temp, #imm2
   724         Imm16 targ_bot;
   725         Imm16 targ_top;
   726         Register temp;
   728         // Extract both the temp register and the bottom immediate.
   729         InstMovW *bottom = load1->as<InstMovW>();
   730         bottom->extractImm(&targ_bot);
   731         bottom->extractDest(&temp);
   733         // Extract the top part of the immediate.
   734         InstMovT *top = load2->as<InstMovT>();
   735         top->extractImm(&targ_top);
   737         // Make sure they are being loaded intothe same register.
   738         JS_ASSERT(top->checkDest(temp));
   740         if (dest)
   741             *dest = temp;
   742         if (style)
   743             *style = L_MOVWT;
   745         uint32_t *value = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16));
   746         return value;
   747     }
   748     if (load1->is<InstLDR>()) {
   749         InstLDR *load = load1->as<InstLDR>();
   750         uint32_t inst = load->encode();
   751         // get the address of the instruction as a raw pointer
   752         char *dataInst = reinterpret_cast<char*>(load);
   753         IsUp_ iu = IsUp_(inst & IsUp);
   754         int32_t offset = inst & 0xfff;
   755         if (iu == IsDown)
   756             offset = - offset;
   757         if (dest)
   758             *dest = toRD(*load);
   759         if (style)
   760             *style = L_LDR;
   761         uint32_t **ptr = (uint32_t **)&dataInst[offset + 8];
   762         return *ptr;
   763     }
   764     MOZ_ASSUME_UNREACHABLE("unsupported relocation");
   765 }
   767 static JitCode *
   768 CodeFromJump(InstructionIterator *jump)
   769 {
   770     uint8_t *target = (uint8_t *)Assembler::getCF32Target(jump);
   771     return JitCode::FromExecutable(target);
   772 }
   774 void
   775 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
   776 {
   777     RelocationIterator iter(reader);
   778     while (iter.read()) {
   779         InstructionIterator institer((Instruction *) (code->raw() + iter.offset()));
   780         JitCode *child = CodeFromJump(&institer);
   781         MarkJitCodeUnbarriered(trc, &child, "rel32");
   782     }
   783 }
   785 static void
   786 TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader)
   787 {
   788     while (reader.more()) {
   789         size_t offset = reader.readUnsigned();
   790         InstructionIterator iter((Instruction*)(buffer+offset));
   791         void *ptr = const_cast<uint32_t *>(js::jit::Assembler::getPtr32Target(&iter));
   792         // No barrier needed since these are constants.
   793         gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
   794     }
   796 }
   797 static void
   798 TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer,
   799                      js::Vector<BufferOffset, 0, SystemAllocPolicy> *locs)
   800 {
   801     for (unsigned int idx = 0; idx < locs->length(); idx++) {
   802         BufferOffset bo = (*locs)[idx];
   803         ARMBuffer::AssemblerBufferInstIterator iter(bo, buffer);
   804         void *ptr = const_cast<uint32_t *>(jit::Assembler::getPtr32Target(&iter));
   806         // No barrier needed since these are constants.
   807         gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
   808     }
   810 }
   811 void
   812 Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
   813 {
   814     ::TraceDataRelocations(trc, code->raw(), reader);
   815 }
   817 void
   818 Assembler::copyJumpRelocationTable(uint8_t *dest)
   819 {
   820     if (jumpRelocations_.length())
   821         memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
   822 }
   824 void
   825 Assembler::copyDataRelocationTable(uint8_t *dest)
   826 {
   827     if (dataRelocations_.length())
   828         memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
   829 }
   831 void
   832 Assembler::copyPreBarrierTable(uint8_t *dest)
   833 {
   834     if (preBarriers_.length())
   835         memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
   836 }
   838 void
   839 Assembler::trace(JSTracer *trc)
   840 {
   841     for (size_t i = 0; i < jumps_.length(); i++) {
   842         RelativePatch &rp = jumps_[i];
   843         if (rp.kind == Relocation::JITCODE) {
   844             JitCode *code = JitCode::FromExecutable((uint8_t*)rp.target);
   845             MarkJitCodeUnbarriered(trc, &code, "masmrel32");
   846             JS_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
   847         }
   848     }
   850     if (tmpDataRelocations_.length())
   851         ::TraceDataRelocations(trc, &m_buffer, &tmpDataRelocations_);
   852 }
   854 void
   855 Assembler::processCodeLabels(uint8_t *rawCode)
   856 {
   857     for (size_t i = 0; i < codeLabels_.length(); i++) {
   858         CodeLabel label = codeLabels_[i];
   859         Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset()));
   860     }
   861 }
   863 void
   864 Assembler::writeCodePointer(AbsoluteLabel *absoluteLabel) {
   865     JS_ASSERT(!absoluteLabel->bound());
   866     BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
   868     // x86/x64 makes general use of AbsoluteLabel and weaves a linked list of
   869     // uses of an AbsoluteLabel through the assembly. ARM only uses labels
   870     // for the case statements of switch jump tables. Thus, for simplicity, we
   871     // simply treat the AbsoluteLabel as a label and bind it to the offset of
   872     // the jump table entry that needs to be patched.
   873     LabelBase *label = absoluteLabel;
   874     label->bind(off.getOffset());
   875 }
   877 void
   878 Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address)
   879 {
   880     // See writeCodePointer comment.
   881     uint32_t off = actualOffset(label->offset());
   882     *reinterpret_cast<const void **>(rawCode + off) = address;
   883 }
   885 Assembler::Condition
   886 Assembler::InvertCondition(Condition cond)
   887 {
   888     const uint32_t ConditionInversionBit = 0x10000000;
   889     return Condition(ConditionInversionBit ^ cond);
   890 }
   892 Imm8::TwoImm8mData
   893 Imm8::encodeTwoImms(uint32_t imm)
   894 {
   895     // In the ideal case, we are looking for a number that (in binary) looks like:
   896     // 0b((00)*)n_1((00)*)n_2((00)*)
   897     //    left  n1   mid  n2
   898     // where both n_1 and n_2 fit into 8 bits.
   899     // since this is being done with rotates, we also need to handle the case
   900     // that one of these numbers is in fact split between the left and right
   901     // sides, in which case the constant will look like:
   902     // 0bn_1a((00)*)n_2((00)*)n_1b
   903     //   n1a  mid  n2   rgh    n1b
   904     // also remember, values are rotated by multiples of two, and left,
   905     // mid or right can have length zero
   906     uint32_t imm1, imm2;
   907     int left = CountLeadingZeroes32(imm) & 0x1E;
   908     uint32_t no_n1 = imm & ~(0xff << (24 - left));
   910     // not technically needed: this case only happens if we can encode
   911     // as a single imm8m.  There is a perfectly reasonable encoding in this
   912     // case, but we shouldn't encourage people to do things like this.
   913     if (no_n1 == 0)
   914         return TwoImm8mData();
   916     int mid = CountLeadingZeroes32(no_n1) & 0x1E;
   917     uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
   919     if (no_n2 == 0) {
   920         // we hit the easy case, no wraparound.
   921         // note: a single constant *may* look like this.
   922         int imm1shift = left + 8;
   923         int imm2shift = mid + 8;
   924         imm1 = (imm >> (32 - imm1shift)) & 0xff;
   925         if (imm2shift >= 32) {
   926             imm2shift = 0;
   927             // this assert does not always hold
   928             //assert((imm & 0xff) == no_n1);
   929             // in fact, this would lead to some incredibly subtle bugs.
   930             imm2 = no_n1;
   931         } else {
   932             imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
   933             JS_ASSERT( ((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) ==
   934                        imm2);
   935         }
   936         JS_ASSERT((imm1shift & 0x1) == 0);
   937         JS_ASSERT((imm2shift & 0x1) == 0);
   938         return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
   939                             datastore::Imm8mData(imm2, imm2shift >> 1));
   940     }
   942     // either it wraps, or it does not fit.
   943     // if we initially chopped off more than 8 bits, then it won't fit.
   944     if (left >= 8)
   945         return TwoImm8mData();
   947     int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
   948     // all remaining set bits *must* fit into the lower 8 bits
   949     // the right == 8 case should be handled by the previous case.
   950     if (right > 8)
   951         return TwoImm8mData();
   953     // make sure the initial bits that we removed for no_n1
   954     // fit into the 8-(32-right) leftmost bits
   955     if (((imm & (0xff << (24 - left))) << (8-right)) != 0) {
   956         // BUT we may have removed more bits than we needed to for no_n1
   957         // 0x04104001 e.g. we can encode 0x104 with a single op, then
   958         // 0x04000001 with a second, but we try to encode 0x0410000
   959         // and find that we need a second op for 0x4000, and 0x1 cannot
   960         // be included in the encoding of 0x04100000
   961         no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right)));
   962         mid = CountLeadingZeroes32(no_n1) & 30;
   963         no_n2 =
   964             no_n1  & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31));
   965         if (no_n2 != 0)
   966             return TwoImm8mData();
   967     }
   969     // now assemble all of this information into a two coherent constants
   970     // it is a rotate right from the lower 8 bits.
   971     int imm1shift = 8 - right;
   972     imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
   973     JS_ASSERT ((imm1shift&~0x1e) == 0);
   974     // left + 8 + mid is the position of the leftmost bit of n_2.
   975     // we needed to rotate 0x000000ab right by 8 in order to get
   976     // 0xab000000, then shift again by the leftmost bit in order to
   977     // get the constant that we care about.
   978     int imm2shift =  mid + 8;
   979     imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
   980     JS_ASSERT((imm1shift & 0x1) == 0);
   981     JS_ASSERT((imm2shift & 0x1) == 0);
   982     return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
   983                         datastore::Imm8mData(imm2, imm2shift >> 1));
   984 }
   986 ALUOp
   987 jit::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest)
   988 {
   989     // find an alternate ALUOp to get the job done, and use a different imm.
   990     *negDest = dest;
   991     switch (op) {
   992       case op_mov:
   993         *imm = Imm32(~imm->value);
   994         return op_mvn;
   995       case op_mvn:
   996         *imm = Imm32(~imm->value);
   997         return op_mov;
   998       case op_and:
   999         *imm = Imm32(~imm->value);
  1000         return op_bic;
  1001       case op_bic:
  1002         *imm = Imm32(~imm->value);
  1003         return op_and;
  1004       case op_add:
  1005         *imm = Imm32(-imm->value);
  1006         return op_sub;
  1007       case op_sub:
  1008         *imm = Imm32(-imm->value);
  1009         return op_add;
  1010       case op_cmp:
  1011         *imm = Imm32(-imm->value);
  1012         return op_cmn;
  1013       case op_cmn:
  1014         *imm = Imm32(-imm->value);
  1015         return op_cmp;
  1016       case op_tst:
  1017         JS_ASSERT(dest == InvalidReg);
  1018         *imm = Imm32(~imm->value);
  1019         *negDest = ScratchRegister;
  1020         return op_bic;
  1021         // orr has orn on thumb2 only.
  1022       default:
  1023         return op_invalid;
  1027 bool
  1028 jit::can_dbl(ALUOp op)
  1030     // some instructions can't be processed as two separate instructions
  1031     // such as and, and possibly add (when we're setting ccodes).
  1032     // there is also some hilarity with *reading* condition codes.
  1033     // for example, adc dest, src1, 0xfff; (add with carry) can be split up
  1034     // into adc dest, src1, 0xf00; add dest, dest, 0xff, since "reading" the
  1035     // condition code increments the result by one conditionally, that only needs
  1036     // to be done on one of the two instructions.
  1037     switch (op) {
  1038       case op_bic:
  1039       case op_add:
  1040       case op_sub:
  1041       case op_eor:
  1042       case op_orr:
  1043         return true;
  1044       default:
  1045         return false;
  1049 bool
  1050 jit::condsAreSafe(ALUOp op) {
  1051     // Even when we are setting condition codes, sometimes we can
  1052     // get away with splitting an operation into two.
  1053     // for example, if our immediate is 0x00ff00ff, and the operation is eors
  1054     // we can split this in half, since x ^ 0x00ff0000 ^ 0x000000ff should
  1055     // set all of its condition codes exactly the same as x ^ 0x00ff00ff.
  1056     // However, if the operation were adds,
  1057     // we cannot split this in half.  If the source on the add is
  1058     // 0xfff00ff0, the result sholud be 0xef10ef, but do we set the overflow bit
  1059     // or not?  Depending on which half is performed first (0x00ff0000
  1060     // or 0x000000ff) the V bit will be set differently, and *not* updating
  1061     // the V bit would be wrong.  Theoretically, the following should work
  1062     // adds r0, r1, 0x00ff0000;
  1063     // addsvs r0, r1, 0x000000ff;
  1064     // addvc r0, r1, 0x000000ff;
  1065     // but this is 3 instructions, and at that point, we might as well use
  1066     // something else.
  1067     switch(op) {
  1068       case op_bic:
  1069       case op_orr:
  1070       case op_eor:
  1071         return true;
  1072       default:
  1073         return false;
  1077 ALUOp
  1078 jit::getDestVariant(ALUOp op)
  1080     // all of the compare operations are dest-less variants of a standard
  1081     // operation.  Given the dest-less variant, return the dest-ful variant.
  1082     switch (op) {
  1083       case op_cmp:
  1084         return op_sub;
  1085       case op_cmn:
  1086         return op_add;
  1087       case op_tst:
  1088         return op_and;
  1089       case op_teq:
  1090         return op_eor;
  1091       default:
  1092         return op;
  1096 O2RegImmShift
  1097 jit::O2Reg(Register r) {
  1098     return O2RegImmShift(r, LSL, 0);
  1101 O2RegImmShift
  1102 jit::lsl(Register r, int amt)
  1104     JS_ASSERT(0 <= amt && amt <= 31);
  1105     return O2RegImmShift(r, LSL, amt);
  1108 O2RegImmShift
  1109 jit::lsr(Register r, int amt)
  1111     JS_ASSERT(1 <= amt && amt <= 32);
  1112     return O2RegImmShift(r, LSR, amt);
  1115 O2RegImmShift
  1116 jit::ror(Register r, int amt)
  1118     JS_ASSERT(1 <= amt && amt <= 31);
  1119     return O2RegImmShift(r, ROR, amt);
  1121 O2RegImmShift
  1122 jit::rol(Register r, int amt)
  1124     JS_ASSERT(1 <= amt && amt <= 31);
  1125     return O2RegImmShift(r, ROR, 32 - amt);
  1128 O2RegImmShift
  1129 jit::asr (Register r, int amt)
  1131     JS_ASSERT(1 <= amt && amt <= 32);
  1132     return O2RegImmShift(r, ASR, amt);
  1136 O2RegRegShift
  1137 jit::lsl(Register r, Register amt)
  1139     return O2RegRegShift(r, LSL, amt);
  1142 O2RegRegShift
  1143 jit::lsr(Register r, Register amt)
  1145     return O2RegRegShift(r, LSR, amt);
  1148 O2RegRegShift
  1149 jit::ror(Register r, Register amt)
  1151     return O2RegRegShift(r, ROR, amt);
  1154 O2RegRegShift
  1155 jit::asr (Register r, Register amt)
  1157     return O2RegRegShift(r, ASR, amt);
  1160 static js::jit::DoubleEncoder doubleEncoder;
  1162 /* static */ const js::jit::VFPImm js::jit::VFPImm::one(0x3FF00000);
  1164 js::jit::VFPImm::VFPImm(uint32_t top)
  1166     data = -1;
  1167     datastore::Imm8VFPImmData tmp;
  1168     if (doubleEncoder.lookup(top, &tmp))
  1169         data = tmp.encode();
  1172 BOffImm::BOffImm(Instruction &inst)
  1173   : data(inst.encode() & 0x00ffffff)
  1177 Instruction *
  1178 BOffImm::getDest(Instruction *src)
  1180     // TODO: It is probably worthwhile to verify that src is actually a branch
  1181     // NOTE: This does not explicitly shift the offset of the destination left by 2,
  1182     // since it is indexing into an array of instruction sized objects.
  1183     return &src[(((int32_t)data<<8)>>8) + 2];
  1186 //VFPRegister implementation
  1187 VFPRegister
  1188 VFPRegister::doubleOverlay() const
  1190     JS_ASSERT(!_isInvalid);
  1191     if (kind != Double) {
  1192         JS_ASSERT(_code % 2 == 0);
  1193         return VFPRegister(_code >> 1, Double);
  1195     return *this;
  1197 VFPRegister
  1198 VFPRegister::singleOverlay() const
  1200     JS_ASSERT(!_isInvalid);
  1201     if (kind == Double) {
  1202         // There are no corresponding float registers for d16-d31
  1203         JS_ASSERT(_code < 16);
  1204         return VFPRegister(_code << 1, Single);
  1207     JS_ASSERT(_code % 2 == 0);
  1208     return VFPRegister(_code, Single);
  1211 VFPRegister
  1212 VFPRegister::sintOverlay() const
  1214     JS_ASSERT(!_isInvalid);
  1215     if (kind == Double) {
  1216         // There are no corresponding float registers for d16-d31
  1217         ASSERT(_code < 16);
  1218         return VFPRegister(_code << 1, Int);
  1221     JS_ASSERT(_code % 2 == 0);
  1222     return VFPRegister(_code, Int);
  1224 VFPRegister
  1225 VFPRegister::uintOverlay() const
  1227     JS_ASSERT(!_isInvalid);
  1228     if (kind == Double) {
  1229         // There are no corresponding float registers for d16-d31
  1230         ASSERT(_code < 16);
  1231         return VFPRegister(_code << 1, UInt);
  1234     JS_ASSERT(_code % 2 == 0);
  1235     return VFPRegister(_code, UInt);
  1238 bool
  1239 VFPRegister::isInvalid()
  1241     return _isInvalid;
  1244 bool
  1245 VFPRegister::isMissing()
  1247     JS_ASSERT(!_isInvalid);
  1248     return _isMissing;
  1252 bool
  1253 Assembler::oom() const
  1255     return m_buffer.oom() ||
  1256         !enoughMemory_ ||
  1257         jumpRelocations_.oom() ||
  1258         dataRelocations_.oom() ||
  1259         preBarriers_.oom();
  1262 bool
  1263 Assembler::addCodeLabel(CodeLabel label)
  1265     return codeLabels_.append(label);
  1268 // Size of the instruction stream, in bytes.  Including pools. This function expects
  1269 // all pools that need to be placed have been placed.  If they haven't then we
  1270 // need to go an flush the pools :(
  1271 size_t
  1272 Assembler::size() const
  1274     return m_buffer.size();
  1276 // Size of the relocation table, in bytes.
  1277 size_t
  1278 Assembler::jumpRelocationTableBytes() const
  1280     return jumpRelocations_.length();
  1282 size_t
  1283 Assembler::dataRelocationTableBytes() const
  1285     return dataRelocations_.length();
  1288 size_t
  1289 Assembler::preBarrierTableBytes() const
  1291     return preBarriers_.length();
  1294 // Size of the data table, in bytes.
  1295 size_t
  1296 Assembler::bytesNeeded() const
  1298     return size() +
  1299         jumpRelocationTableBytes() +
  1300         dataRelocationTableBytes() +
  1301         preBarrierTableBytes();
  1304 // write a blob of binary into the instruction stream
  1305 BufferOffset
  1306 Assembler::writeInst(uint32_t x, uint32_t *dest)
  1308     if (dest == nullptr)
  1309         return m_buffer.putInt(x);
  1311     writeInstStatic(x, dest);
  1312     return BufferOffset();
  1314 void
  1315 Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
  1317     JS_ASSERT(dest != nullptr);
  1318     *dest = x;
  1321 BufferOffset
  1322 Assembler::align(int alignment)
  1324     BufferOffset ret;
  1325     if (alignment == 8) {
  1326         while (!m_buffer.isAligned(alignment)) {
  1327             BufferOffset tmp = as_nop();
  1328             if (!ret.assigned())
  1329                 ret = tmp;
  1331     } else {
  1332         flush();
  1333         JS_ASSERT((alignment & (alignment - 1)) == 0);
  1334         while (size() & (alignment-1)) {
  1335             BufferOffset tmp = as_nop();
  1336             if (!ret.assigned())
  1337                 ret = tmp;
  1340     return ret;
  1343 BufferOffset
  1344 Assembler::as_nop()
  1346     return writeInst(0xe320f000);
  1348 BufferOffset
  1349 Assembler::as_alu(Register dest, Register src1, Operand2 op2,
  1350                   ALUOp op, SetCond_ sc, Condition c, Instruction *instdest)
  1352     return writeInst((int)op | (int)sc | (int) c | op2.encode() |
  1353                      ((dest == InvalidReg) ? 0 : RD(dest)) |
  1354                      ((src1 == InvalidReg) ? 0 : RN(src1)), (uint32_t*)instdest);
  1357 BufferOffset
  1358 Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest)
  1360     return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest);
  1363 BufferOffset
  1364 Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c)
  1366     return as_alu(dest, InvalidReg, op2, op_mvn, sc, c);
  1369 // Logical operations.
  1370 BufferOffset
  1371 Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1373     return as_alu(dest, src1, op2, op_and, sc, c);
  1375 BufferOffset
  1376 Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1378     return as_alu(dest, src1, op2, op_bic, sc, c);
  1380 BufferOffset
  1381 Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1383     return as_alu(dest, src1, op2, op_eor, sc, c);
  1385 BufferOffset
  1386 Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1388     return as_alu(dest, src1, op2, op_orr, sc, c);
  1391 // Mathematical operations.
  1392 BufferOffset
  1393 Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1395     return as_alu(dest, src1, op2, op_adc, sc, c);
  1397 BufferOffset
  1398 Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1400     return as_alu(dest, src1, op2, op_add, sc, c);
  1402 BufferOffset
  1403 Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1405     return as_alu(dest, src1, op2, op_sbc, sc, c);
  1407 BufferOffset
  1408 Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1410     return as_alu(dest, src1, op2, op_sub, sc, c);
  1412 BufferOffset
  1413 Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1415     return as_alu(dest, src1, op2, op_rsb, sc, c);
  1417 BufferOffset
  1418 Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c)
  1420     return as_alu(dest, src1, op2, op_rsc, sc, c);
  1423 // Test operations.
  1424 BufferOffset
  1425 Assembler::as_cmn(Register src1, Operand2 op2, Condition c)
  1427     return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c);
  1429 BufferOffset
  1430 Assembler::as_cmp(Register src1, Operand2 op2, Condition c)
  1432     return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c);
  1434 BufferOffset
  1435 Assembler::as_teq(Register src1, Operand2 op2, Condition c)
  1437     return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c);
  1439 BufferOffset
  1440 Assembler::as_tst(Register src1, Operand2 op2, Condition c)
  1442     return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c);
  1445 // Not quite ALU worthy, but useful none the less:
  1446 // These also have the isue of these being formatted
  1447 // completly differently from the standard ALU operations.
  1448 BufferOffset
  1449 Assembler::as_movw(Register dest, Imm16 imm, Condition c, Instruction *pos)
  1451     JS_ASSERT(hasMOVWT());
  1452     return writeInst(0x03000000 | c | imm.encode() | RD(dest), (uint32_t*)pos);
  1454 BufferOffset
  1455 Assembler::as_movt(Register dest, Imm16 imm, Condition c, Instruction *pos)
  1457     JS_ASSERT(hasMOVWT());
  1458     return writeInst(0x03400000 | c | imm.encode() | RD(dest), (uint32_t*)pos);
  1461 static const int mull_tag = 0x90;
  1463 BufferOffset
  1464 Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn,
  1465                      MULOp op, SetCond_ sc, Condition c)
  1468     return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag);
  1470 BufferOffset
  1471 Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c)
  1473     return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c);
  1475 BufferOffset
  1476 Assembler::as_mla(Register dest, Register acc, Register src1, Register src2,
  1477                   SetCond_ sc, Condition c)
  1479     return as_genmul(dest, acc, src1, src2, opm_mla, sc, c);
  1481 BufferOffset
  1482 Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c)
  1484     return as_genmul(destHI, destLO, src1, src2, opm_umaal, NoSetCond, c);
  1486 BufferOffset
  1487 Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c)
  1489     return as_genmul(dest, acc, src1, src2, opm_mls, NoSetCond, c);
  1492 BufferOffset
  1493 Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2,
  1494                     SetCond_ sc, Condition c)
  1496     return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c);
  1499 BufferOffset
  1500 Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2,
  1501                     SetCond_ sc, Condition c)
  1503     return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c);
  1506 BufferOffset
  1507 Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2,
  1508                     SetCond_ sc, Condition c)
  1510     return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c);
  1513 BufferOffset
  1514 Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2,
  1515                     SetCond_ sc, Condition c)
  1517     return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c);
  1520 BufferOffset
  1521 Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c)
  1523     return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
  1526 BufferOffset
  1527 Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c)
  1529     return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
  1532 // Data transfer instructions: ldr, str, ldrb, strb.
  1533 // Using an int to differentiate between 8 bits and 32 bits is
  1534 // overkill, but meh
  1535 BufferOffset
  1536 Assembler::as_dtr(LoadStore ls, int size, Index mode,
  1537                   Register rt, DTRAddr addr, Condition c, uint32_t *dest)
  1539     JS_ASSERT (mode == Offset ||  (rt != addr.getBase() && pc != addr.getBase()));
  1540     JS_ASSERT(size == 32 || size == 8);
  1541     return writeInst( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c |
  1542                       RT(rt) | addr.encode(), dest);
  1545 class PoolHintData {
  1546   public:
  1547     enum LoadType {
  1548         // set 0 to bogus, since that is the value most likely to be
  1549         // accidentally left somewhere.
  1550         poolBOGUS  = 0,
  1551         poolDTR    = 1,
  1552         poolBranch = 2,
  1553         poolVDTR   = 3
  1554     };
  1556   private:
  1557     uint32_t   index    : 16;
  1558     uint32_t   cond     : 4;
  1559     LoadType   loadType : 2;
  1560     uint32_t   destReg  : 5;
  1561     uint32_t   destType : 1;
  1562     uint32_t   ONES     : 4;
  1564     static const uint32_t expectedOnes = 0xfu;
  1566   public:
  1567     void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const Register &destReg_) {
  1568         index = index_;
  1569         JS_ASSERT(index == index_);
  1570         cond = cond_ >> 28;
  1571         JS_ASSERT(cond == cond_ >> 28);
  1572         loadType = lt;
  1573         ONES = expectedOnes;
  1574         destReg = destReg_.code();
  1575         destType = 0;
  1577     void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const VFPRegister &destReg_) {
  1578         JS_ASSERT(destReg_.isFloat());
  1579         index = index_;
  1580         JS_ASSERT(index == index_);
  1581         cond = cond_ >> 28;
  1582         JS_ASSERT(cond == cond_ >> 28);
  1583         loadType = lt;
  1584         ONES = expectedOnes;
  1585         destReg = destReg_.isDouble() ? destReg_.code() : destReg_.doubleOverlay().code();
  1586         destType = destReg_.isDouble();
  1588     Assembler::Condition getCond() {
  1589         return Assembler::Condition(cond << 28);
  1592     Register getReg() {
  1593         return Register::FromCode(destReg);
  1595     VFPRegister getVFPReg() {
  1596         VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg));
  1597         return destType ? r : r.singleOverlay();
  1600     int32_t getIndex() {
  1601         return index;
  1603     void setIndex(uint32_t index_) {
  1604         JS_ASSERT(ONES == expectedOnes && loadType != poolBOGUS);
  1605         index = index_;
  1606         JS_ASSERT(index == index_);
  1609     LoadType getLoadType() {
  1610         // If this *was* a poolBranch, but the branch has already been bound
  1611         // then this isn't going to look like a real poolhintdata, but we still
  1612         // want to lie about it so everyone knows it *used* to be a branch.
  1613         if (ONES != expectedOnes)
  1614             return PoolHintData::poolBranch;
  1615         return loadType;
  1618     bool isValidPoolHint() {
  1619         // Most instructions cannot have a condition that is 0xf. Notable exceptions are
  1620         // blx and the entire NEON instruction set. For the purposes of pool loads, and
  1621         // possibly patched branches, the possible instructions are ldr and b, neither of
  1622         // which can have a condition code of 0xf.
  1623         return ONES == expectedOnes;
  1625 };
  1627 union PoolHintPun {
  1628     PoolHintData phd;
  1629     uint32_t raw;
  1630 };
  1632 // Handles all of the other integral data transferring functions:
  1633 // ldrsb, ldrsh, ldrd, etc.
  1634 // size is given in bits.
  1635 BufferOffset
  1636 Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
  1637                      Register rt, EDtrAddr addr, Condition c, uint32_t *dest)
  1639     int extra_bits2 = 0;
  1640     int extra_bits1 = 0;
  1641     switch(size) {
  1642       case 8:
  1643         JS_ASSERT(IsSigned);
  1644         JS_ASSERT(ls!=IsStore);
  1645         extra_bits1 = 0x1;
  1646         extra_bits2 = 0x2;
  1647         break;
  1648       case 16:
  1649         //case 32:
  1650         // doesn't need to be handled-- it is handled by the default ldr/str
  1651         extra_bits2 = 0x01;
  1652         extra_bits1 = (ls == IsStore) ? 0 : 1;
  1653         if (IsSigned) {
  1654             JS_ASSERT(ls != IsStore);
  1655             extra_bits2 |= 0x2;
  1657         break;
  1658       case 64:
  1659         extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
  1660         extra_bits1 = 0;
  1661         break;
  1662       default:
  1663         MOZ_ASSUME_UNREACHABLE("SAY WHAT?");
  1665     return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 |
  1666                      addr.encode() | RT(rt) | mode | c, dest);
  1669 BufferOffset
  1670 Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
  1671                 DTMMode mode, DTMWriteBack wb, Condition c)
  1673     return writeInst(0x08000000 | RN(rn) | ls |
  1674                      mode | mask | c | wb);
  1677 BufferOffset
  1678 Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c)
  1680     PoolHintPun php;
  1681     php.phd.init(0, c, PoolHintData::poolDTR, dest);
  1682     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value);
  1685 void
  1686 Assembler::as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data)
  1688     JS_ASSERT(addr->is<InstLDR>());
  1689     int32_t offset = addr->encode() & 0xfff;
  1690     if ((addr->encode() & IsUp) != IsUp)
  1691         offset = -offset;
  1692     char * rawAddr = reinterpret_cast<char*>(addr);
  1693     uint32_t * dest = reinterpret_cast<uint32_t*>(&rawAddr[offset + 8]);
  1694     *dest = data;
  1695     Condition orig_cond;
  1696     addr->extractCond(&orig_cond);
  1697     JS_ASSERT(orig_cond == c);
  1700 BufferOffset
  1701 Assembler::as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe, Condition c)
  1703     PoolHintPun php;
  1704     php.phd.init(0, c, PoolHintData::poolBranch, pc);
  1705     m_buffer.markNextAsBranch();
  1706     BufferOffset ret = m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value, pe);
  1707     // If this label is already bound, then immediately replace the stub load with
  1708     // a correct branch.
  1709     if (label->bound()) {
  1710         BufferOffset dest(label);
  1711         as_b(dest.diffB<BOffImm>(ret), c, ret);
  1712     } else {
  1713         label->use(ret.getOffset());
  1715     return ret;
  1718 BufferOffset
  1719 Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c)
  1721     JS_ASSERT(dest.isDouble());
  1722     PoolHintPun php;
  1723     php.phd.init(0, c, PoolHintData::poolVDTR, dest);
  1724     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&value);
  1727 struct PaddedFloat32
  1729     float value;
  1730     uint32_t padding;
  1731 };
  1732 JS_STATIC_ASSERT(sizeof(PaddedFloat32) == sizeof(double));
  1734 BufferOffset
  1735 Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c)
  1737     /*
  1738      * Insert floats into the double pool as they have the same limitations on
  1739      * immediate offset.  This wastes 4 bytes padding per float.  An alternative
  1740      * would be to have a separate pool for floats.
  1741      */
  1742     JS_ASSERT(dest.isSingle());
  1743     PoolHintPun php;
  1744     php.phd.init(0, c, PoolHintData::poolVDTR, dest);
  1745     PaddedFloat32 pf = { value, 0 };
  1746     return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&pf);
  1749 // Pool callbacks stuff:
  1750 void
  1751 Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token)
  1753     uint32_t *load = (uint32_t*) load_;
  1754     PoolHintPun php;
  1755     php.raw = *load;
  1756     php.phd.setIndex(token);
  1757     *load = php.raw;
  1759 // patchConstantPoolLoad takes the address of the instruction that wants to be patched, and
  1760 //the address of the start of the constant pool, and figures things out from there.
  1761 bool
  1762 Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
  1764     PoolHintData data = *(PoolHintData*)loadAddr;
  1765     uint32_t *instAddr = (uint32_t*) loadAddr;
  1766     int offset = (char *)constPoolAddr - (char *)loadAddr;
  1767     switch(data.getLoadType()) {
  1768       case PoolHintData::poolBOGUS:
  1769         MOZ_ASSUME_UNREACHABLE("bogus load type!");
  1770       case PoolHintData::poolDTR:
  1771         dummy->as_dtr(IsLoad, 32, Offset, data.getReg(),
  1772                       DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr);
  1773         break;
  1774       case PoolHintData::poolBranch:
  1775         // Either this used to be a poolBranch, and the label was already bound, so it was
  1776         // replaced with a real branch, or this may happen in the future.
  1777         // If this is going to happen in the future, then the actual bits that are written here
  1778         // don't matter (except the condition code, since that is always preserved across
  1779         // patchings) but if it does not get bound later,
  1780         // then we want to make sure this is a load from the pool entry (and the pool entry
  1781         // should be nullptr so it will crash).
  1782         if (data.isValidPoolHint()) {
  1783             dummy->as_dtr(IsLoad, 32, Offset, pc,
  1784                           DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)),
  1785                           data.getCond(), instAddr);
  1787         break;
  1788       case PoolHintData::poolVDTR: {
  1789         VFPRegister dest = data.getVFPReg();
  1790         int32_t imm = offset + (8 * data.getIndex()) - 8;
  1791         if (imm < -1023 || imm  > 1023)
  1792             return false;
  1793         dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr);
  1794         break;
  1797     return true;
  1800 uint32_t
  1801 Assembler::placeConstantPoolBarrier(int offset)
  1803     // BUG: 700526
  1804     // this is still an active path, however, we do not hit it in the test
  1805     // suite at all.
  1806     MOZ_ASSUME_UNREACHABLE("ARMAssembler holdover");
  1809 // Control flow stuff:
  1811 // bx can *only* branch to a register
  1812 // never to an immediate.
  1813 BufferOffset
  1814 Assembler::as_bx(Register r, Condition c, bool isPatchable)
  1816     BufferOffset ret = writeInst(((int) c) | op_bx | r.code());
  1817     if (c == Always && !isPatchable)
  1818         m_buffer.markGuard();
  1819     return ret;
  1821 void
  1822 Assembler::writePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool)
  1824     BOffImm off = afterPool.diffB<BOffImm>(branch);
  1825     *dest = InstBImm(off, Always);
  1827 // Branch can branch to an immediate *or* to a register.
  1828 // Branches to immediates are pc relative, branches to registers
  1829 // are absolute
  1830 BufferOffset
  1831 Assembler::as_b(BOffImm off, Condition c, bool isPatchable)
  1833     m_buffer.markNextAsBranch();
  1834     BufferOffset ret =writeInst(((int)c) | op_b | off.encode());
  1835     if (c == Always && !isPatchable)
  1836         m_buffer.markGuard();
  1837     return ret;
  1840 BufferOffset
  1841 Assembler::as_b(Label *l, Condition c, bool isPatchable)
  1843     if (m_buffer.oom()) {
  1844         BufferOffset ret;
  1845         return ret;
  1847     m_buffer.markNextAsBranch();
  1848     if (l->bound()) {
  1849         BufferOffset ret = as_nop();
  1850         as_b(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
  1851         return ret;
  1854     int32_t old;
  1855     BufferOffset ret;
  1856     if (l->used()) {
  1857         old = l->offset();
  1858         // This will currently throw an assertion if we couldn't actually
  1859         // encode the offset of the branch.
  1860         if (!BOffImm::isInRange(old)) {
  1861             m_buffer.fail_bail();
  1862             return ret;
  1864         ret = as_b(BOffImm(old), c, isPatchable);
  1865     } else {
  1866         old = LabelBase::INVALID_OFFSET;
  1867         BOffImm inv;
  1868         ret = as_b(inv, c, isPatchable);
  1870     DebugOnly<int32_t> check = l->use(ret.getOffset());
  1871     JS_ASSERT(check == old);
  1872     return ret;
  1874 BufferOffset
  1875 Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
  1877     *editSrc(inst) = InstBImm(off, c);
  1878     return inst;
  1881 // blx can go to either an immediate or a register.
  1882 // When blx'ing to a register, we change processor state
  1883 // depending on the low bit of the register
  1884 // when blx'ing to an immediate, we *always* change processor state.
  1886 BufferOffset
  1887 Assembler::as_blx(Register r, Condition c)
  1889     return writeInst(((int) c) | op_blx | r.code());
  1892 // bl can only branch to an pc-relative immediate offset
  1893 // It cannot change the processor state.
  1894 BufferOffset
  1895 Assembler::as_bl(BOffImm off, Condition c)
  1897     m_buffer.markNextAsBranch();
  1898     return writeInst(((int)c) | op_bl | off.encode());
  1901 BufferOffset
  1902 Assembler::as_bl(Label *l, Condition c)
  1904     if (m_buffer.oom()) {
  1905         BufferOffset ret;
  1906         return ret;
  1908     m_buffer.markNextAsBranch();
  1909     if (l->bound()) {
  1910         BufferOffset ret = as_nop();
  1911         as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret);
  1912         return ret;
  1915     int32_t old;
  1916     BufferOffset ret;
  1917     // See if the list was empty :(
  1918     if (l->used()) {
  1919         // This will currently throw an assertion if we couldn't actually
  1920         // encode the offset of the branch.
  1921         old = l->offset();
  1922         if (!BOffImm::isInRange(old)) {
  1923             m_buffer.fail_bail();
  1924             return ret;
  1926         ret = as_bl(BOffImm(old), c);
  1927     } else {
  1928         old = LabelBase::INVALID_OFFSET;
  1929         BOffImm inv;
  1930         ret = as_bl(inv, c);
  1932     DebugOnly<int32_t> check = l->use(ret.getOffset());
  1933     JS_ASSERT(check == old);
  1934     return ret;
  1936 BufferOffset
  1937 Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst)
  1939     *editSrc(inst) = InstBLImm(off, c);
  1940     return inst;
  1943 BufferOffset
  1944 Assembler::as_mrs(Register r, Condition c)
  1946     return writeInst(0x010f0000 | int(c) | RD(r));
  1949 BufferOffset
  1950 Assembler::as_msr(Register r, Condition c)
  1952     // hardcode the 'mask' field to 0b11 for now.  it is bits 18 and 19, which are the two high bits of the 'c' in this constant.
  1953     JS_ASSERT((r.code() & ~0xf) == 0);
  1954     return writeInst(0x012cf000 | int(c) | r.code());
  1957 // VFP instructions!
  1958 enum vfp_tags {
  1959     vfp_tag   = 0x0C000A00,
  1960     vfp_arith = 0x02000000
  1961 };
  1962 BufferOffset
  1963 Assembler::writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest)
  1965     JS_ASSERT((sz & blob) == 0);
  1966     JS_ASSERT((vfp_tag & blob) == 0);
  1967     return writeInst(vfp_tag | sz | blob, dest);
  1970 // Unityped variants: all registers hold the same (ieee754 single/double)
  1971 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
  1972 BufferOffset
  1973 Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  1974                   VFPOp op, Condition c)
  1976     // Make sure we believe that all of our operands are the same kind
  1977     JS_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
  1978     JS_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
  1979     vfp_size sz = vd.isDouble() ? isDouble : isSingle;
  1980     return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | vfp_arith | c);
  1983 BufferOffset
  1984 Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  1985                  Condition c)
  1987     return as_vfp_float(vd, vn, vm, opv_add, c);
  1990 BufferOffset
  1991 Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  1992                  Condition c)
  1994     return as_vfp_float(vd, vn, vm, opv_div, c);
  1997 BufferOffset
  1998 Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  1999                  Condition c)
  2001     return as_vfp_float(vd, vn, vm, opv_mul, c);
  2004 BufferOffset
  2005 Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  2006                   Condition c)
  2008     return as_vfp_float(vd, vn, vm, opv_mul, c);
  2009     MOZ_ASSUME_UNREACHABLE("Feature NYI");
  2012 BufferOffset
  2013 Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  2014                   Condition c)
  2016     MOZ_ASSUME_UNREACHABLE("Feature NYI");
  2019 BufferOffset
  2020 Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  2021                   Condition c)
  2023     MOZ_ASSUME_UNREACHABLE("Feature NYI");
  2024     return BufferOffset();
  2027 BufferOffset
  2028 Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c)
  2030     return as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c);
  2033 BufferOffset
  2034 Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c)
  2036     return as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c);
  2039 BufferOffset
  2040 Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c)
  2042     return as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c);
  2045 BufferOffset
  2046 Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
  2047                  Condition c)
  2049     return as_vfp_float(vd, vn, vm, opv_sub, c);
  2052 BufferOffset
  2053 Assembler::as_vcmp(VFPRegister vd, VFPRegister vm,
  2054                  Condition c)
  2056     return as_vfp_float(vd, NoVFPRegister, vm, opv_cmp, c);
  2058 BufferOffset
  2059 Assembler::as_vcmpz(VFPRegister vd, Condition c)
  2061     return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c);
  2064 // Specifically, a move between two same sized-registers.
  2065 BufferOffset
  2066 Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c)
  2068     return as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c);
  2070 //xfer between Core and VFP
  2072 // Unlike the next function, moving between the core registers and vfp
  2073 // registers can't be *that* properly typed.  Namely, since I don't want to
  2074 // munge the type VFPRegister to also include core registers.  Thus, the core
  2075 // and vfp registers are passed in based on their type, and src/dest is
  2076 // determined by the float2core.
  2078 BufferOffset
  2079 Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
  2080                     Condition c, int idx)
  2082     vfp_size sz = isSingle;
  2083     if (vm.isDouble()) {
  2084         // Technically, this can be done with a vmov à la ARM ARM under vmov
  2085         // however, that requires at least an extra bit saying if the
  2086         // operation should be performed on the lower or upper half of the
  2087         // double.  Moving a single to/from 2N/2N+1 isn't equivalent,
  2088         // since there are 32 single registers, and 32 double registers
  2089         // so there is no way to encode the last 16 double registers.
  2090         sz = isDouble;
  2091         JS_ASSERT(idx == 0 || idx == 1);
  2092         // If we are transferring a single half of the double
  2093         // then it must be moving a VFP reg to a core reg.
  2094         if (vt2 == InvalidReg)
  2095             JS_ASSERT(f2c == FloatToCore);
  2096         idx = idx << 21;
  2097     } else {
  2098         JS_ASSERT(idx == 0);
  2100     VFPXferSize xfersz = WordTransfer;
  2101     uint32_t (*encodeVFP)(VFPRegister) = VN;
  2102     if (vt2 != InvalidReg) {
  2103         // We are doing a 64 bit transfer.
  2104         xfersz = DoubleTransfer;
  2105         encodeVFP = VM;
  2108     return writeVFPInst(sz, xfersz | f2c | c |
  2109                         RT(vt1) | maybeRN(vt2) | encodeVFP(vm) | idx);
  2111 enum vcvt_destFloatness {
  2112     toInteger = 1 << 18,
  2113     toFloat  = 0 << 18
  2114 };
  2115 enum vcvt_toZero {
  2116     toZero = 1 << 7, // use the default rounding mode, which rounds truncates
  2117     toFPSCR = 0 << 7 // use whatever rounding mode the fpscr specifies
  2118 };
  2119 enum vcvt_Signedness {
  2120     toSigned   = 1 << 16,
  2121     toUnsigned = 0 << 16,
  2122     fromSigned   = 1 << 7,
  2123     fromUnsigned = 0 << 7
  2124 };
  2126 // our encoding actually allows just the src and the dest (and their types)
  2127 // to uniquely specify the encoding that we are going to use.
  2128 BufferOffset
  2129 Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
  2130                    Condition c)
  2132     // Unlike other cases, the source and dest types cannot be the same
  2133     JS_ASSERT(!vd.equiv(vm));
  2134     vfp_size sz = isDouble;
  2135     if (vd.isFloat() && vm.isFloat()) {
  2136         // Doing a float -> float conversion
  2137         if (vm.isSingle())
  2138             sz = isSingle;
  2139         return writeVFPInst(sz, c | 0x02B700C0 |
  2140                             VM(vm) | VD(vd));
  2143     // At least one of the registers should be a float.
  2144     vcvt_destFloatness destFloat;
  2145     vcvt_Signedness opSign;
  2146     vcvt_toZero doToZero = toFPSCR;
  2147     JS_ASSERT(vd.isFloat() || vm.isFloat());
  2148     if (vd.isSingle() || vm.isSingle()) {
  2149         sz = isSingle;
  2151     if (vd.isFloat()) {
  2152         destFloat = toFloat;
  2153         opSign = (vm.isSInt()) ? fromSigned : fromUnsigned;
  2154     } else {
  2155         destFloat = toInteger;
  2156         opSign = (vd.isSInt()) ? toSigned : toUnsigned;
  2157         doToZero = useFPSCR ? toFPSCR : toZero;
  2159     return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
  2162 BufferOffset
  2163 Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c)
  2165     JS_ASSERT(vd.isFloat());
  2166     uint32_t sx = 0x1;
  2167     vfp_size sf = vd.isDouble() ? isDouble : isSingle;
  2168     int32_t imm5 = fixedPoint;
  2169     imm5 = (sx ? 32 : 16) - imm5;
  2170     JS_ASSERT(imm5 >= 0);
  2171     imm5 = imm5 >> 1 | (imm5 & 1) << 5;
  2172     return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
  2173                         (!isSigned) << 16 | imm5 | c);
  2176 // xfer between VFP and memory
  2177 BufferOffset
  2178 Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
  2179                    Condition c /* vfp doesn't have a wb option*/,
  2180                    uint32_t *dest)
  2182     vfp_size sz = vd.isDouble() ? isDouble : isSingle;
  2183     return writeVFPInst(sz, ls | 0x01000000 | addr.encode() | VD(vd) | c, dest);
  2186 // VFP's ldm/stm work differently from the standard arm ones.
  2187 // You can only transfer a range
  2189 BufferOffset
  2190 Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
  2191                  /*also has update conditions*/Condition c)
  2193     JS_ASSERT(length <= 16 && length >= 0);
  2194     vfp_size sz = vd.isDouble() ? isDouble : isSingle;
  2196     if (vd.isDouble())
  2197         length *= 2;
  2199     return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) |
  2200                         length |
  2201                         dtmMode | dtmUpdate | dtmCond);
  2204 BufferOffset
  2205 Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c)
  2207     JS_ASSERT(imm.isValid());
  2208     vfp_size sz = vd.isDouble() ? isDouble : isSingle;
  2209     return writeVFPInst(sz,  c | imm.encode() | VD(vd) | 0x02B00000);
  2212 BufferOffset
  2213 Assembler::as_vmrs(Register r, Condition c)
  2215     return writeInst(c | 0x0ef10a10 | RT(r));
  2218 BufferOffset
  2219 Assembler::as_vmsr(Register r, Condition c)
  2221     return writeInst(c | 0x0ee10a10 | RT(r));
  2224 bool
  2225 Assembler::nextLink(BufferOffset b, BufferOffset *next)
  2227     Instruction branch = *editSrc(b);
  2228     JS_ASSERT(branch.is<InstBranchImm>());
  2230     BOffImm destOff;
  2231     branch.as<InstBranchImm>()->extractImm(&destOff);
  2232     if (destOff.isInvalid())
  2233         return false;
  2235     // Propagate the next link back to the caller, by
  2236     // constructing a new BufferOffset into the space they
  2237     // provided.
  2238     new (next) BufferOffset(destOff.decode());
  2239     return true;
  2242 void
  2243 Assembler::bind(Label *label, BufferOffset boff)
  2245     if (label->used()) {
  2246         bool more;
  2247         // If our caller didn't give us an explicit target to bind to
  2248         // then we want to bind to the location of the next instruction
  2249         BufferOffset dest = boff.assigned() ? boff : nextOffset();
  2250         BufferOffset b(label);
  2251         do {
  2252             BufferOffset next;
  2253             more = nextLink(b, &next);
  2254             Instruction branch = *editSrc(b);
  2255             Condition c;
  2256             branch.extractCond(&c);
  2257             if (branch.is<InstBImm>())
  2258                 as_b(dest.diffB<BOffImm>(b), c, b);
  2259             else if (branch.is<InstBLImm>())
  2260                 as_bl(dest.diffB<BOffImm>(b), c, b);
  2261             else
  2262                 MOZ_ASSUME_UNREACHABLE("crazy fixup!");
  2263             b = next;
  2264         } while (more);
  2266     label->bind(nextOffset().getOffset());
  2269 void
  2270 Assembler::bind(RepatchLabel *label)
  2272     BufferOffset dest = nextOffset();
  2273     if (label->used()) {
  2274         // If the label has a use, then change this use to refer to
  2275         // the bound label;
  2276         BufferOffset branchOff(label->offset());
  2277         // Since this was created with a RepatchLabel, the value written in the
  2278         // instruction stream is not branch shaped, it is PoolHintData shaped.
  2279         Instruction *branch = editSrc(branchOff);
  2280         PoolHintPun p;
  2281         p.raw = branch->encode();
  2282         Condition cond;
  2283         if (p.phd.isValidPoolHint())
  2284             cond = p.phd.getCond();
  2285         else
  2286             branch->extractCond(&cond);
  2287         as_b(dest.diffB<BOffImm>(branchOff), cond, branchOff);
  2289     label->bind(dest.getOffset());
  2292 void
  2293 Assembler::retarget(Label *label, Label *target)
  2295     if (label->used()) {
  2296         if (target->bound()) {
  2297             bind(label, BufferOffset(target));
  2298         } else if (target->used()) {
  2299             // The target is not bound but used. Prepend label's branch list
  2300             // onto target's.
  2301             BufferOffset labelBranchOffset(label);
  2302             BufferOffset next;
  2304             // Find the head of the use chain for label.
  2305             while (nextLink(labelBranchOffset, &next))
  2306                 labelBranchOffset = next;
  2308             // Then patch the head of label's use chain to the tail of
  2309             // target's use chain, prepending the entire use chain of target.
  2310             Instruction branch = *editSrc(labelBranchOffset);
  2311             Condition c;
  2312             branch.extractCond(&c);
  2313             int32_t prev = target->use(label->offset());
  2314             if (branch.is<InstBImm>())
  2315                 as_b(BOffImm(prev), c, labelBranchOffset);
  2316             else if (branch.is<InstBLImm>())
  2317                 as_bl(BOffImm(prev), c, labelBranchOffset);
  2318             else
  2319                 MOZ_ASSUME_UNREACHABLE("crazy fixup!");
  2320         } else {
  2321             // The target is unbound and unused.  We can just take the head of
  2322             // the list hanging off of label, and dump that into target.
  2323             DebugOnly<uint32_t> prev = target->use(label->offset());
  2324             JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
  2327     label->reset();
  2332 void dbg_break() {}
  2333 static int stopBKPT = -1;
  2334 void
  2335 Assembler::as_bkpt()
  2337     // This is a count of how many times a breakpoint instruction has been generated.
  2338     // It is embedded into the instruction for debugging purposes.  gdb will print "bkpt xxx"
  2339     // when you attempt to dissassemble a breakpoint with the number xxx embedded into it.
  2340     // If this breakpoint is being hit, then you can run (in gdb)
  2341     // >b dbg_break
  2342     // >b main
  2343     // >commands
  2344     // >set stopBKPT = xxx
  2345     // >c
  2346     // >end
  2348     // which will set a breakpoint on the function dbg_break above
  2349     // set a scripted breakpoint on main that will set the (otherwise unmodified)
  2350     // value to the number of the breakpoint, so dbg_break will actuall be called
  2351     // and finally, when you run the executable, execution will halt when that
  2352     // breakpoint is generated
  2353     static int hit = 0;
  2354     if (stopBKPT == hit)
  2355         dbg_break();
  2356     writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0)<<4));
  2357     hit++;
  2360 void
  2361 Assembler::dumpPool()
  2363     m_buffer.flushPool();
  2366 void
  2367 Assembler::flushBuffer()
  2369     m_buffer.flushPool();
  2372 void
  2373 Assembler::enterNoPool()
  2375     m_buffer.enterNoPool();
  2378 void
  2379 Assembler::leaveNoPool()
  2381     m_buffer.leaveNoPool();
  2384 ptrdiff_t
  2385 Assembler::getBranchOffset(const Instruction *i_)
  2387     if (!i_->is<InstBranchImm>())
  2388         return 0;
  2390     InstBranchImm *i = i_->as<InstBranchImm>();
  2391     BOffImm dest;
  2392     i->extractImm(&dest);
  2393     return dest.decode();
  2395 void
  2396 Assembler::retargetNearBranch(Instruction *i, int offset, bool final)
  2398     Assembler::Condition c;
  2399     i->extractCond(&c);
  2400     retargetNearBranch(i, offset, c, final);
  2403 void
  2404 Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final)
  2406     // Retargeting calls is totally unsupported!
  2407     JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>());
  2408     if (i->is<InstBLImm>())
  2409         new (i) InstBLImm(BOffImm(offset), cond);
  2410     else
  2411         new (i) InstBImm(BOffImm(offset), cond);
  2413     // Flush the cache, since an instruction was overwritten
  2414     if (final)
  2415         AutoFlushICache::flush(uintptr_t(i), 4);
  2418 void
  2419 Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond)
  2421     int32_t offset = reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i);
  2422     if (!i->is<InstLDR>()) {
  2423         new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
  2424         AutoFlushICache::flush(uintptr_t(i), 4);
  2426     *slot = dest;
  2430 struct PoolHeader : Instruction {
  2431     struct Header
  2433         // size should take into account the pool header.
  2434         // size is in units of Instruction (4bytes), not byte
  2435         uint32_t size : 15;
  2436         bool isNatural : 1;
  2437         uint32_t ONES : 16;
  2439         Header(int size_, bool isNatural_)
  2440           : size(size_),
  2441             isNatural(isNatural_),
  2442             ONES(0xffff)
  2443         { }
  2445         Header(const Instruction *i) {
  2446             JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
  2447             memcpy(this, i, sizeof(Header));
  2448             JS_ASSERT(ONES == 0xffff);
  2451         uint32_t raw() const {
  2452             JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
  2453             uint32_t dest;
  2454             memcpy(&dest, this, sizeof(Header));
  2455             return dest;
  2457     };
  2459     PoolHeader(int size_, bool isNatural_)
  2460       : Instruction(Header(size_, isNatural_).raw(), true)
  2461     { }
  2463     uint32_t size() const {
  2464         Header tmp(this);
  2465         return tmp.size;
  2467     uint32_t isNatural() const {
  2468         Header tmp(this);
  2469         return tmp.isNatural;
  2471     static bool isTHIS(const Instruction &i) {
  2472         return (*i.raw() & 0xffff0000) == 0xffff0000;
  2474     static const PoolHeader *asTHIS(const Instruction &i) {
  2475         if (!isTHIS(i))
  2476             return nullptr;
  2477         return static_cast<const PoolHeader*>(&i);
  2479 };
  2482 void
  2483 Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural)
  2485     STATIC_ASSERT(sizeof(PoolHeader) == 4);
  2486     uint8_t *pool = start+4;
  2487     // go through the usual rigaramarole to get the size of the pool.
  2488     pool = p[0].addPoolSize(pool);
  2489     pool = p[1].addPoolSize(pool);
  2490     pool = p[1].other->addPoolSize(pool);
  2491     pool = p[0].other->addPoolSize(pool);
  2492     uint32_t size = pool - start;
  2493     JS_ASSERT((size & 3) == 0);
  2494     size = size >> 2;
  2495     JS_ASSERT(size < (1 << 15));
  2496     PoolHeader header(size, isNatural);
  2497     *(PoolHeader*)start = header;
  2501 void
  2502 Assembler::writePoolFooter(uint8_t *start, Pool *p, bool isNatural)
  2504     return;
  2507 // The size of an arbitrary 32-bit call in the instruction stream.
  2508 // On ARM this sequence is |pc = ldr pc - 4; imm32| given that we
  2509 // never reach the imm32.
  2510 uint32_t
  2511 Assembler::patchWrite_NearCallSize()
  2513     return sizeof(uint32_t);
  2515 void
  2516 Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
  2518     Instruction *inst = (Instruction *) start.raw();
  2519     // Overwrite whatever instruction used to be here with a call.
  2520     // Since the destination is in the same function, it will be within range of the 24<<2 byte
  2521     // bl instruction.
  2522     uint8_t *dest = toCall.raw();
  2523     new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always);
  2524     // Ensure everyone sees the code that was just written into memory.
  2526     AutoFlushICache::flush(uintptr_t(inst), 4);
  2529 void
  2530 Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
  2531                                    PatchedImmPtr expectedValue)
  2533     Instruction *ptr = (Instruction *) label.raw();
  2534     InstructionIterator iter(ptr);
  2535     Register dest;
  2536     Assembler::RelocStyle rs;
  2537     DebugOnly<const uint32_t *> val = getPtr32Target(&iter, &dest, &rs);
  2538     JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value));
  2539     reinterpret_cast<MacroAssemblerARM*>(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)),
  2540                                                                  dest, Always, rs, ptr);
  2541     // L_LDR won't cause any instructions to be updated.
  2542     if (rs != L_LDR) {
  2543         AutoFlushICache::flush(uintptr_t(ptr), 4);
  2544         AutoFlushICache::flush(uintptr_t(ptr->next()), 4);
  2548 void
  2549 Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
  2551     patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value));
  2554 // This just stomps over memory with 32 bits of raw data. Its purpose is to
  2555 // overwrite the call of JITed code with 32 bits worth of an offset. This will
  2556 // is only meant to function on code that has been invalidated, so it should
  2557 // be totally safe. Since that instruction will never be executed again, a
  2558 // ICache flush should not be necessary
  2559 void
  2560 Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
  2561     // Raw is going to be the return address.
  2562     uint32_t *raw = (uint32_t*)label.raw();
  2563     // Overwrite the 4 bytes before the return address, which will
  2564     // end up being the call instruction.
  2565     *(raw-1) = imm.value;
  2569 uint8_t *
  2570 Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
  2572     Instruction *inst = reinterpret_cast<Instruction*>(inst_);
  2573     if (count != nullptr)
  2574         *count += sizeof(Instruction);
  2575     return reinterpret_cast<uint8_t*>(inst->next());
  2578 static bool
  2579 InstIsGuard(Instruction *inst, const PoolHeader **ph)
  2581     Assembler::Condition c;
  2582     inst->extractCond(&c);
  2583     if (c != Assembler::Always)
  2584         return false;
  2585     if (!(inst->is<InstBXReg>() || inst->is<InstBImm>()))
  2586         return false;
  2587     // See if the next instruction is a pool header.
  2588     *ph = (inst+1)->as<const PoolHeader>();
  2589     return *ph != nullptr;
  2592 static bool
  2593 InstIsBNop(Instruction *inst) {
  2594     // In some special situations, it is necessary to insert a NOP
  2595     // into the instruction stream that nobody knows about, since nobody should know about
  2596     // it, make sure it gets skipped when Instruction::next() is called.
  2597     // this generates a very specific nop, namely a branch to the next instruction.
  2598     Assembler::Condition c;
  2599     inst->extractCond(&c);
  2600     if (c != Assembler::Always)
  2601         return false;
  2602     if (!inst->is<InstBImm>())
  2603         return false;
  2604     InstBImm *b = inst->as<InstBImm>();
  2605     BOffImm offset;
  2606     b->extractImm(&offset);
  2607     return offset.decode() == 4;
  2610 static bool
  2611 InstIsArtificialGuard(Instruction *inst, const PoolHeader **ph)
  2613     if (!InstIsGuard(inst, ph))
  2614         return false;
  2615     return !(*ph)->isNatural();
  2618 // Cases to be handled:
  2619 // 1) no pools or branches in sight => return this+1
  2620 // 2) branch to next instruction => return this+2, because a nop needed to be inserted into the stream.
  2621 // 3) this+1 is an artificial guard for a pool => return first instruction after the pool
  2622 // 4) this+1 is a natural guard => return the branch
  2623 // 5) this is a branch, right before a pool => return first instruction after the pool
  2624 // in assembly form:
  2625 // 1) add r0, r0, r0 <= this
  2626 //    add r1, r1, r1 <= returned value
  2627 //    add r2, r2, r2
  2628 //
  2629 // 2) add r0, r0, r0 <= this
  2630 //    b foo
  2631 //    foo:
  2632 //    add r2, r2, r2 <= returned value
  2633 //
  2634 // 3) add r0, r0, r0 <= this
  2635 //    b after_pool;
  2636 //    .word 0xffff0002  # bit 15 being 0 indicates that the branch was not requested by the assembler
  2637 //    0xdeadbeef        # the 2 indicates that there is 1 pool entry, and the pool header
  2638 //    add r4, r4, r4 <= returned value
  2639 // 4) add r0, r0, r0 <= this
  2640 //    b after_pool  <= returned value
  2641 //    .word 0xffff8002  # bit 15 being 1 indicates that the branch was requested by the assembler
  2642 //    0xdeadbeef
  2643 //    add r4, r4, r4
  2644 // 5) b after_pool  <= this
  2645 //    .word 0xffff8002  # bit 15 has no bearing on the returned value
  2646 //    0xdeadbeef
  2647 //    add r4, r4, r4  <= returned value
  2649 Instruction *
  2650 Instruction::next()
  2652     Instruction *ret = this+1;
  2653     const PoolHeader *ph;
  2654     // If this is a guard, and the next instruction is a header, always work around the pool
  2655     // If it isn't a guard, then start looking ahead.
  2656     if (InstIsGuard(this, &ph))
  2657         return ret + ph->size();
  2658     if (InstIsArtificialGuard(ret, &ph))
  2659         return ret + 1 + ph->size();
  2660     if (InstIsBNop(ret))
  2661         return ret + 1;
  2662     return ret;
  2665 void
  2666 Assembler::ToggleToJmp(CodeLocationLabel inst_)
  2668     uint32_t *ptr = (uint32_t *)inst_.raw();
  2670     DebugOnly<Instruction *> inst = (Instruction *)inst_.raw();
  2671     JS_ASSERT(inst->is<InstCMP>());
  2673     // Zero bits 20-27, then set 24-27 to be correct for a branch.
  2674     // 20-23 will be party of the B's immediate, and should be 0.
  2675     *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
  2676     AutoFlushICache::flush(uintptr_t(ptr), 4);
  2679 void
  2680 Assembler::ToggleToCmp(CodeLocationLabel inst_)
  2682     uint32_t *ptr = (uint32_t *)inst_.raw();
  2684     DebugOnly<Instruction *> inst = (Instruction *)inst_.raw();
  2685     JS_ASSERT(inst->is<InstBImm>());
  2687     // Ensure that this masking operation doesn't affect the offset of the
  2688     // branch instruction when it gets toggled back.
  2689     JS_ASSERT((*ptr & (0xf << 20)) == 0);
  2691     // Also make sure that the CMP is valid. Part of having a valid CMP is that
  2692     // all of the bits describing the destination in most ALU instructions are
  2693     // all unset (looks like it is encoding r0).
  2694     JS_ASSERT(toRD(*inst) == r0);
  2696     // Zero out bits 20-27, then set them to be correct for a compare.
  2697     *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
  2699     AutoFlushICache::flush(uintptr_t(ptr), 4);
  2702 void
  2703 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
  2705     Instruction *inst = (Instruction *)inst_.raw();
  2706     JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>());
  2708     if (inst->is<InstMovW>()) {
  2709         // If it looks like the start of a movw/movt sequence,
  2710         // then make sure we have all of it (and advance the iterator
  2711         // past the full sequence)
  2712         inst = inst->next();
  2713         JS_ASSERT(inst->is<InstMovT>());
  2716     inst = inst->next();
  2717     JS_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>());
  2719     if (enabled == inst->is<InstBLXReg>()) {
  2720         // Nothing to do.
  2721         return;
  2724     if (enabled)
  2725         *inst = InstBLXReg(ScratchRegister, Always);
  2726     else
  2727         *inst = InstNOP();
  2729     AutoFlushICache::flush(uintptr_t(inst), 4);
  2732 void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
  2734     JS_ASSERT(inst->is<InstCMP>());
  2735     InstCMP *cmp = inst->as<InstCMP>();
  2737     Register index;
  2738     cmp->extractOp1(&index);
  2740     Operand2 op = cmp->extractOp2();
  2741     JS_ASSERT(op.isImm8());
  2743     Imm8 imm8 = Imm8(heapSize);
  2744     JS_ASSERT(!imm8.invalid);
  2746     *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always);
  2747     // NOTE: we don't update the Auto Flush Cache!  this function is currently only called from
  2748     // within AsmJSModule::patchHeapAccesses, which does that for us.  Don't call this!
  2751 InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) {
  2752     const PoolHeader *ph;
  2753     // If this is a guard, and the next instruction is a header, always work around the pool
  2754     // If it isn't a guard, then start looking ahead.
  2755     if (InstIsArtificialGuard(i, &ph)) {
  2756         i = i->next();
  2759 Assembler *Assembler::dummy = nullptr;

mercurial