Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_arm_Assembler_arm_h
8 #define jit_arm_Assembler_arm_h
10 #include "mozilla/ArrayUtils.h"
11 #include "mozilla/Attributes.h"
12 #include "mozilla/MathAlgorithms.h"
14 #include "assembler/assembler/AssemblerBufferWithConstantPool.h"
15 #include "jit/arm/Architecture-arm.h"
16 #include "jit/CompactBuffer.h"
17 #include "jit/IonCode.h"
18 #include "jit/shared/Assembler-shared.h"
19 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
21 namespace js {
22 namespace jit {
24 //NOTE: there are duplicates in this list!
25 // sometimes we want to specifically refer to the
26 // link register as a link register (bl lr is much
27 // clearer than bl r14). HOWEVER, this register can
28 // easily be a gpr when it is not busy holding the return
29 // address.
30 static MOZ_CONSTEXPR_VAR Register r0 = { Registers::r0 };
31 static MOZ_CONSTEXPR_VAR Register r1 = { Registers::r1 };
32 static MOZ_CONSTEXPR_VAR Register r2 = { Registers::r2 };
33 static MOZ_CONSTEXPR_VAR Register r3 = { Registers::r3 };
34 static MOZ_CONSTEXPR_VAR Register r4 = { Registers::r4 };
35 static MOZ_CONSTEXPR_VAR Register r5 = { Registers::r5 };
36 static MOZ_CONSTEXPR_VAR Register r6 = { Registers::r6 };
37 static MOZ_CONSTEXPR_VAR Register r7 = { Registers::r7 };
38 static MOZ_CONSTEXPR_VAR Register r8 = { Registers::r8 };
39 static MOZ_CONSTEXPR_VAR Register r9 = { Registers::r9 };
40 static MOZ_CONSTEXPR_VAR Register r10 = { Registers::r10 };
41 static MOZ_CONSTEXPR_VAR Register r11 = { Registers::r11 };
42 static MOZ_CONSTEXPR_VAR Register r12 = { Registers::ip };
43 static MOZ_CONSTEXPR_VAR Register ip = { Registers::ip };
44 static MOZ_CONSTEXPR_VAR Register sp = { Registers::sp };
45 static MOZ_CONSTEXPR_VAR Register r14 = { Registers::lr };
46 static MOZ_CONSTEXPR_VAR Register lr = { Registers::lr };
47 static MOZ_CONSTEXPR_VAR Register pc = { Registers::pc };
49 static MOZ_CONSTEXPR_VAR Register ScratchRegister = {Registers::ip};
51 static MOZ_CONSTEXPR_VAR Register OsrFrameReg = r3;
52 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = r8;
53 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = r5;
54 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = r6;
55 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = r7;
56 static MOZ_CONSTEXPR_VAR Register CallTempReg3 = r8;
57 static MOZ_CONSTEXPR_VAR Register CallTempReg4 = r0;
58 static MOZ_CONSTEXPR_VAR Register CallTempReg5 = r1;
60 static MOZ_CONSTEXPR_VAR Register IntArgReg0 = r0;
61 static MOZ_CONSTEXPR_VAR Register IntArgReg1 = r1;
62 static MOZ_CONSTEXPR_VAR Register IntArgReg2 = r2;
63 static MOZ_CONSTEXPR_VAR Register IntArgReg3 = r3;
64 static MOZ_CONSTEXPR_VAR Register GlobalReg = r10;
65 static MOZ_CONSTEXPR_VAR Register HeapReg = r11;
66 static MOZ_CONSTEXPR_VAR Register CallTempNonArgRegs[] = { r5, r6, r7, r8 };
67 static const uint32_t NumCallTempNonArgRegs =
68 mozilla::ArrayLength(CallTempNonArgRegs);
69 class ABIArgGenerator
70 {
71 unsigned intRegIndex_;
72 unsigned floatRegIndex_;
73 uint32_t stackOffset_;
74 ABIArg current_;
76 public:
77 ABIArgGenerator();
78 ABIArg next(MIRType argType);
79 ABIArg ¤t() { return current_; }
80 uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
81 static const Register NonArgReturnVolatileReg0;
82 static const Register NonArgReturnVolatileReg1;
83 };
85 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = r1;
87 static MOZ_CONSTEXPR_VAR Register InvalidReg = { Registers::invalid_reg };
88 static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { FloatRegisters::invalid_freg };
90 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = r3;
91 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = r2;
92 static MOZ_CONSTEXPR_VAR Register StackPointer = sp;
93 static MOZ_CONSTEXPR_VAR Register FramePointer = InvalidReg;
94 static MOZ_CONSTEXPR_VAR Register ReturnReg = r0;
95 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloatReg = { FloatRegisters::d0 };
96 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloatReg = { FloatRegisters::d15 };
98 static MOZ_CONSTEXPR_VAR FloatRegister NANReg = { FloatRegisters::d14 };
100 // Registers used in the GenerateFFIIonExit Enable Activation block.
101 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = r4;
102 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = r0;
103 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = r1;
104 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = r2;
105 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = r3;
107 // Registers used in the GenerateFFIIonExit Disable Activation block.
108 // None of these may be the second scratch register (lr).
109 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = r2;
110 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = r3;
111 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = r0;
112 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = r1;
113 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = r4;
116 static MOZ_CONSTEXPR_VAR FloatRegister d0 = {FloatRegisters::d0};
117 static MOZ_CONSTEXPR_VAR FloatRegister d1 = {FloatRegisters::d1};
118 static MOZ_CONSTEXPR_VAR FloatRegister d2 = {FloatRegisters::d2};
119 static MOZ_CONSTEXPR_VAR FloatRegister d3 = {FloatRegisters::d3};
120 static MOZ_CONSTEXPR_VAR FloatRegister d4 = {FloatRegisters::d4};
121 static MOZ_CONSTEXPR_VAR FloatRegister d5 = {FloatRegisters::d5};
122 static MOZ_CONSTEXPR_VAR FloatRegister d6 = {FloatRegisters::d6};
123 static MOZ_CONSTEXPR_VAR FloatRegister d7 = {FloatRegisters::d7};
124 static MOZ_CONSTEXPR_VAR FloatRegister d8 = {FloatRegisters::d8};
125 static MOZ_CONSTEXPR_VAR FloatRegister d9 = {FloatRegisters::d9};
126 static MOZ_CONSTEXPR_VAR FloatRegister d10 = {FloatRegisters::d10};
127 static MOZ_CONSTEXPR_VAR FloatRegister d11 = {FloatRegisters::d11};
128 static MOZ_CONSTEXPR_VAR FloatRegister d12 = {FloatRegisters::d12};
129 static MOZ_CONSTEXPR_VAR FloatRegister d13 = {FloatRegisters::d13};
130 static MOZ_CONSTEXPR_VAR FloatRegister d14 = {FloatRegisters::d14};
131 static MOZ_CONSTEXPR_VAR FloatRegister d15 = {FloatRegisters::d15};
133 // For maximal awesomeness, 8 should be sufficent.
134 // ldrd/strd (dual-register load/store) operate in a single cycle
135 // when the address they are dealing with is 8 byte aligned.
136 // Also, the ARM abi wants the stack to be 8 byte aligned at
137 // function boundaries. I'm trying to make sure this is always true.
138 static const uint32_t StackAlignment = 8;
139 static const uint32_t CodeAlignment = 8;
140 static const bool StackKeptAligned = true;
141 static const uint32_t NativeFrameSize = sizeof(void*);
142 static const uint32_t AlignmentAtPrologue = 0;
143 static const uint32_t AlignmentMidPrologue = 4;
146 static const Scale ScalePointer = TimesFour;
148 class Instruction;
149 class InstBranchImm;
150 uint32_t RM(Register r);
151 uint32_t RS(Register r);
152 uint32_t RD(Register r);
153 uint32_t RT(Register r);
154 uint32_t RN(Register r);
156 uint32_t maybeRD(Register r);
157 uint32_t maybeRT(Register r);
158 uint32_t maybeRN(Register r);
160 Register toRN (Instruction &i);
161 Register toRM (Instruction &i);
162 Register toRD (Instruction &i);
163 Register toR (Instruction &i);
165 class VFPRegister;
166 uint32_t VD(VFPRegister vr);
167 uint32_t VN(VFPRegister vr);
168 uint32_t VM(VFPRegister vr);
170 class VFPRegister
171 {
172 public:
173 // What type of data is being stored in this register?
174 // UInt / Int are specifically for vcvt, where we need
175 // to know how the data is supposed to be converted.
176 enum RegType {
177 Double = 0x0,
178 Single = 0x1,
179 UInt = 0x2,
180 Int = 0x3
181 };
183 protected:
184 RegType kind : 2;
185 // ARM doesn't have more than 32 registers...
186 // don't take more bits than we'll need.
187 // Presently, I don't have plans to address the upper
188 // and lower halves of the double registers seprately, so
189 // 5 bits should suffice. If I do decide to address them seprately
190 // (vmov, I'm looking at you), I will likely specify it as a separate
191 // field.
192 uint32_t _code : 5;
193 bool _isInvalid : 1;
194 bool _isMissing : 1;
196 VFPRegister(int r, RegType k)
197 : kind(k), _code (r), _isInvalid(false), _isMissing(false)
198 { }
200 public:
201 VFPRegister()
202 : _isInvalid(true), _isMissing(false)
203 { }
205 VFPRegister(bool b)
206 : _isInvalid(false), _isMissing(b)
207 { }
209 VFPRegister(FloatRegister fr)
210 : kind(Double), _code(fr.code()), _isInvalid(false), _isMissing(false)
211 {
212 JS_ASSERT(_code == (unsigned)fr.code());
213 }
215 VFPRegister(FloatRegister fr, RegType k)
216 : kind(k), _code (fr.code()), _isInvalid(false), _isMissing(false)
217 {
218 JS_ASSERT(_code == (unsigned)fr.code());
219 }
220 bool isDouble() const { return kind == Double; }
221 bool isSingle() const { return kind == Single; }
222 bool isFloat() const { return (kind == Double) || (kind == Single); }
223 bool isInt() const { return (kind == UInt) || (kind == Int); }
224 bool isSInt() const { return kind == Int; }
225 bool isUInt() const { return kind == UInt; }
226 bool equiv(VFPRegister other) const { return other.kind == kind; }
227 size_t size() const { return (kind == Double) ? 8 : 4; }
228 bool isInvalid();
229 bool isMissing();
231 VFPRegister doubleOverlay() const;
232 VFPRegister singleOverlay() const;
233 VFPRegister sintOverlay() const;
234 VFPRegister uintOverlay() const;
236 struct VFPRegIndexSplit;
237 VFPRegIndexSplit encode();
239 // for serializing values
240 struct VFPRegIndexSplit {
241 const uint32_t block : 4;
242 const uint32_t bit : 1;
244 private:
245 friend VFPRegIndexSplit js::jit::VFPRegister::encode();
247 VFPRegIndexSplit (uint32_t block_, uint32_t bit_)
248 : block(block_), bit(bit_)
249 {
250 JS_ASSERT (block == block_);
251 JS_ASSERT(bit == bit_);
252 }
253 };
255 uint32_t code() const {
256 return _code;
257 }
258 };
260 // For being passed into the generic vfp instruction generator when
261 // there is an instruction that only takes two registers
262 extern VFPRegister NoVFPRegister;
264 struct ImmTag : public Imm32
265 {
266 ImmTag(JSValueTag mask)
267 : Imm32(int32_t(mask))
268 { }
269 };
271 struct ImmType : public ImmTag
272 {
273 ImmType(JSValueType type)
274 : ImmTag(JSVAL_TYPE_TO_TAG(type))
275 { }
276 };
278 enum Index {
279 Offset = 0 << 21 | 1<<24,
280 PreIndex = 1<<21 | 1 << 24,
281 PostIndex = 0 << 21 | 0 << 24
282 // The docs were rather unclear on this. it sounds like
283 // 1<<21 | 0 << 24 encodes dtrt
284 };
286 // Seriously, wtf arm
287 enum IsImmOp2_ {
288 IsImmOp2 = 1 << 25,
289 IsNotImmOp2 = 0 << 25
290 };
291 enum IsImmDTR_ {
292 IsImmDTR = 0 << 25,
293 IsNotImmDTR = 1 << 25
294 };
295 // For the extra memory operations, ldrd, ldrsb, ldrh
296 enum IsImmEDTR_ {
297 IsImmEDTR = 1 << 22,
298 IsNotImmEDTR = 0 << 22
299 };
302 enum ShiftType {
303 LSL = 0, // << 5
304 LSR = 1, // << 5
305 ASR = 2, // << 5
306 ROR = 3, // << 5
307 RRX = ROR // RRX is encoded as ROR with a 0 offset.
308 };
310 // The actual codes that get set by instructions
311 // and the codes that are checked by the conditions below.
312 struct ConditionCodes
313 {
314 bool Zero : 1;
315 bool Overflow : 1;
316 bool Carry : 1;
317 bool Minus : 1;
318 };
320 // Modes for STM/LDM.
321 // Names are the suffixes applied to
322 // the instruction.
323 enum DTMMode {
324 A = 0 << 24, // empty / after
325 B = 1 << 24, // full / before
326 D = 0 << 23, // decrement
327 I = 1 << 23, // increment
328 DA = D | A,
329 DB = D | B,
330 IA = I | A,
331 IB = I | B
332 };
334 enum DTMWriteBack {
335 WriteBack = 1 << 21,
336 NoWriteBack = 0 << 21
337 };
339 enum SetCond_ {
340 SetCond = 1 << 20,
341 NoSetCond = 0 << 20
342 };
343 enum LoadStore {
344 IsLoad = 1 << 20,
345 IsStore = 0 << 20
346 };
347 // You almost never want to use this directly.
348 // Instead, you wantto pass in a signed constant,
349 // and let this bit be implicitly set for you.
350 // this is however, necessary if we want a negative index
351 enum IsUp_ {
352 IsUp = 1 << 23,
353 IsDown = 0 << 23
354 };
355 enum ALUOp {
356 op_mov = 0xd << 21,
357 op_mvn = 0xf << 21,
358 op_and = 0x0 << 21,
359 op_bic = 0xe << 21,
360 op_eor = 0x1 << 21,
361 op_orr = 0xc << 21,
362 op_adc = 0x5 << 21,
363 op_add = 0x4 << 21,
364 op_sbc = 0x6 << 21,
365 op_sub = 0x2 << 21,
366 op_rsb = 0x3 << 21,
367 op_rsc = 0x7 << 21,
368 op_cmn = 0xb << 21,
369 op_cmp = 0xa << 21,
370 op_teq = 0x9 << 21,
371 op_tst = 0x8 << 21,
372 op_invalid = -1
373 };
376 enum MULOp {
377 opm_mul = 0 << 21,
378 opm_mla = 1 << 21,
379 opm_umaal = 2 << 21,
380 opm_mls = 3 << 21,
381 opm_umull = 4 << 21,
382 opm_umlal = 5 << 21,
383 opm_smull = 6 << 21,
384 opm_smlal = 7 << 21
385 };
386 enum BranchTag {
387 op_b = 0x0a000000,
388 op_b_mask = 0x0f000000,
389 op_b_dest_mask = 0x00ffffff,
390 op_bl = 0x0b000000,
391 op_blx = 0x012fff30,
392 op_bx = 0x012fff10
393 };
395 // Just like ALUOp, but for the vfp instruction set.
396 enum VFPOp {
397 opv_mul = 0x2 << 20,
398 opv_add = 0x3 << 20,
399 opv_sub = 0x3 << 20 | 0x1 << 6,
400 opv_div = 0x8 << 20,
401 opv_mov = 0xB << 20 | 0x1 << 6,
402 opv_abs = 0xB << 20 | 0x3 << 6,
403 opv_neg = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
404 opv_sqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
405 opv_cmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
406 opv_cmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16
407 };
408 // Negate the operation, AND negate the immediate that we were passed in.
409 ALUOp ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest);
410 bool can_dbl(ALUOp op);
411 bool condsAreSafe(ALUOp op);
412 // If there is a variant of op that has a dest (think cmp/sub)
413 // return that variant of it.
414 ALUOp getDestVariant(ALUOp op);
416 static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
417 static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
418 // All of these classes exist solely to shuffle data into the various operands.
419 // For example Operand2 can be an imm8, a register-shifted-by-a-constant or
420 // a register-shifted-by-a-register. I represent this in C++ by having a
421 // base class Operand2, which just stores the 32 bits of data as they will be
422 // encoded in the instruction. You cannot directly create an Operand2
423 // since it is tricky, and not entirely sane to do so. Instead, you create
424 // one of its child classes, e.g. Imm8. Imm8's constructor takes a single
425 // integer argument. Imm8 will verify that its argument can be encoded
426 // as an ARM 12 bit imm8, encode it using an Imm8data, and finally call
427 // its parent's (Operand2) constructor with the Imm8data. The Operand2
428 // constructor will then call the Imm8data's encode() function to extract
429 // the raw bits from it. In the future, we should be able to extract
430 // data from the Operand2 by asking it for its component Imm8data
431 // structures. The reason this is so horribly round-about is I wanted
432 // to have Imm8 and RegisterShiftedRegister inherit directly from Operand2
433 // but have all of them take up only a single word of storage.
434 // I also wanted to avoid passing around raw integers at all
435 // since they are error prone.
436 class Op2Reg;
437 class O2RegImmShift;
438 class O2RegRegShift;
439 namespace datastore {
440 struct Reg
441 {
442 // the "second register"
443 uint32_t RM : 4;
444 // do we get another register for shifting
445 uint32_t RRS : 1;
446 ShiftType Type : 2;
447 // I'd like this to be a more sensible encoding, but that would
448 // need to be a struct and that would not pack :(
449 uint32_t ShiftAmount : 5;
450 uint32_t pad : 20;
452 Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftamount)
453 : RM(rm), RRS(rsr), Type(type), ShiftAmount(shiftamount), pad(0)
454 { }
456 uint32_t encode() {
457 return RM | RRS << 4 | Type << 5 | ShiftAmount << 7;
458 }
459 explicit Reg(const Op2Reg &op) {
460 memcpy(this, &op, sizeof(*this));
461 }
462 };
464 // Op2 has a mode labelled "<imm8m>", which is arm's magical
465 // immediate encoding. Some instructions actually get 8 bits of
466 // data, which is called Imm8Data below. These should have edit
467 // distance > 1, but this is how it is for now.
468 struct Imm8mData
469 {
470 private:
471 uint32_t data : 8;
472 uint32_t rot : 4;
473 // Throw in an extra bit that will be 1 if we can't encode this
474 // properly. if we can encode it properly, a simple "|" will still
475 // suffice to meld it into the instruction.
476 uint32_t buff : 19;
477 public:
478 uint32_t invalid : 1;
480 uint32_t encode() {
481 JS_ASSERT(!invalid);
482 return data | rot << 8;
483 };
485 // Default constructor makes an invalid immediate.
486 Imm8mData()
487 : data(0xff), rot(0xf), invalid(1)
488 { }
490 Imm8mData(uint32_t data_, uint32_t rot_)
491 : data(data_), rot(rot_), invalid(0)
492 {
493 JS_ASSERT(data == data_);
494 JS_ASSERT(rot == rot_);
495 }
496 };
498 struct Imm8Data
499 {
500 private:
501 uint32_t imm4L : 4;
502 uint32_t pad : 4;
503 uint32_t imm4H : 4;
505 public:
506 uint32_t encode() {
507 return imm4L | (imm4H << 8);
508 };
509 Imm8Data(uint32_t imm) : imm4L(imm&0xf), imm4H(imm>>4) {
510 JS_ASSERT(imm <= 0xff);
511 }
512 };
514 // VLDR/VSTR take an 8 bit offset, which is implicitly left shifted
515 // by 2.
516 struct Imm8VFPOffData
517 {
518 private:
519 uint32_t data;
521 public:
522 uint32_t encode() {
523 return data;
524 };
525 Imm8VFPOffData(uint32_t imm) : data (imm) {
526 JS_ASSERT((imm & ~(0xff)) == 0);
527 }
528 };
530 // ARM can magically encode 256 very special immediates to be moved
531 // into a register.
532 struct Imm8VFPImmData
533 {
534 private:
535 uint32_t imm4L : 4;
536 uint32_t pad : 12;
537 uint32_t imm4H : 4;
538 int32_t isInvalid : 12;
540 public:
541 Imm8VFPImmData()
542 : imm4L(-1U & 0xf), imm4H(-1U & 0xf), isInvalid(-1)
543 { }
545 Imm8VFPImmData(uint32_t imm)
546 : imm4L(imm&0xf), imm4H(imm>>4), isInvalid(0)
547 {
548 JS_ASSERT(imm <= 0xff);
549 }
551 uint32_t encode() {
552 if (isInvalid != 0)
553 return -1;
554 return imm4L | (imm4H << 16);
555 };
556 };
558 struct Imm12Data
559 {
560 uint32_t data : 12;
561 uint32_t encode() {
562 return data;
563 }
565 Imm12Data(uint32_t imm)
566 : data(imm)
567 {
568 JS_ASSERT(data == imm);
569 }
571 };
573 struct RIS
574 {
575 uint32_t ShiftAmount : 5;
576 uint32_t encode () {
577 return ShiftAmount;
578 }
580 RIS(uint32_t imm)
581 : ShiftAmount(imm)
582 {
583 JS_ASSERT(ShiftAmount == imm);
584 }
585 explicit RIS(Reg r) : ShiftAmount(r.ShiftAmount) {}
586 };
588 struct RRS
589 {
590 uint32_t MustZero : 1;
591 // the register that holds the shift amount
592 uint32_t RS : 4;
594 RRS(uint32_t rs)
595 : RS(rs)
596 {
597 JS_ASSERT(rs == RS);
598 }
600 uint32_t encode () {
601 return RS << 1;
602 }
603 };
605 } // namespace datastore
607 class MacroAssemblerARM;
608 class Operand;
609 class Operand2
610 {
611 friend class Operand;
612 friend class MacroAssemblerARM;
613 friend class InstALU;
614 public:
615 uint32_t oper : 31;
616 uint32_t invalid : 1;
617 bool isO2Reg() {
618 return !(oper & IsImmOp2);
619 }
620 Op2Reg toOp2Reg();
621 bool isImm8() {
622 return oper & IsImmOp2;
623 }
625 protected:
626 Operand2(datastore::Imm8mData base)
627 : oper(base.invalid ? -1 : (base.encode() | (uint32_t)IsImmOp2)),
628 invalid(base.invalid)
629 { }
631 Operand2(datastore::Reg base)
632 : oper(base.encode() | (uint32_t)IsNotImmOp2)
633 { }
635 private:
636 Operand2(int blob)
637 : oper(blob)
638 { }
640 public:
641 uint32_t encode() {
642 return oper;
643 }
644 };
646 class Imm8 : public Operand2
647 {
648 public:
649 static datastore::Imm8mData encodeImm(uint32_t imm) {
650 // mozilla::CountLeadingZeroes32(imm) requires imm != 0.
651 if (imm == 0)
652 return datastore::Imm8mData(0, 0);
653 int left = mozilla::CountLeadingZeroes32(imm) & 30;
654 // See if imm is a simple value that can be encoded with a rotate of 0.
655 // This is effectively imm <= 0xff, but I assume this can be optimized
656 // more
657 if (left >= 24)
658 return datastore::Imm8mData(imm, 0);
660 // Mask out the 8 bits following the first bit that we found, see if we
661 // have 0 yet.
662 int no_imm = imm & ~(0xff << (24 - left));
663 if (no_imm == 0) {
664 return datastore::Imm8mData(imm >> (24 - left), ((8+left) >> 1));
665 }
666 // Look for the most signifigant bit set, once again.
667 int right = 32 - (mozilla::CountLeadingZeroes32(no_imm) & 30);
668 // If it is in the bottom 8 bits, there is a chance that this is a
669 // wraparound case.
670 if (right >= 8)
671 return datastore::Imm8mData();
672 // Rather than masking out bits and checking for 0, just rotate the
673 // immediate that we were passed in, and see if it fits into 8 bits.
674 unsigned int mask = imm << (8 - right) | imm >> (24 + right);
675 if (mask <= 0xff)
676 return datastore::Imm8mData(mask, (8-right) >> 1);
677 return datastore::Imm8mData();
678 }
679 // pair template?
680 struct TwoImm8mData
681 {
682 datastore::Imm8mData fst, snd;
684 TwoImm8mData()
685 : fst(), snd()
686 { }
688 TwoImm8mData(datastore::Imm8mData _fst, datastore::Imm8mData _snd)
689 : fst(_fst), snd(_snd)
690 { }
691 };
693 static TwoImm8mData encodeTwoImms(uint32_t);
694 Imm8(uint32_t imm)
695 : Operand2(encodeImm(imm))
696 { }
697 };
699 class Op2Reg : public Operand2
700 {
701 public:
702 Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
703 : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode()))
704 { }
706 Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
707 : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode()))
708 { }
709 bool isO2RegImmShift() {
710 datastore::Reg r(*this);
711 return !r.RRS;
712 }
713 O2RegImmShift toO2RegImmShift();
714 bool isO2RegRegShift() {
715 datastore::Reg r(*this);
716 return r.RRS;
717 }
718 O2RegRegShift toO2RegRegShift();
720 bool checkType(ShiftType type) {
721 datastore::Reg r(*this);
722 return r.Type == type;
723 }
724 bool checkRM(Register rm) {
725 datastore::Reg r(*this);
726 return r.RM == rm.code();
727 }
728 bool getRM(Register *rm) {
729 datastore::Reg r(*this);
730 *rm = Register::FromCode(r.RM);
731 return true;
732 }
733 };
735 class O2RegImmShift : public Op2Reg
736 {
737 public:
738 O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
739 : Op2Reg(rn, type, datastore::RIS(shift))
740 { }
741 int getShift() {
742 datastore::Reg r(*this);
743 datastore::RIS ris(r);
744 return ris.ShiftAmount;
745 }
746 };
748 class O2RegRegShift : public Op2Reg
749 {
750 public:
751 O2RegRegShift(Register rn, ShiftType type, Register rs)
752 : Op2Reg(rn, type, datastore::RRS(rs.code()))
753 { }
754 };
756 O2RegImmShift O2Reg(Register r);
757 O2RegImmShift lsl (Register r, int amt);
758 O2RegImmShift lsr (Register r, int amt);
759 O2RegImmShift asr (Register r, int amt);
760 O2RegImmShift rol (Register r, int amt);
761 O2RegImmShift ror (Register r, int amt);
763 O2RegRegShift lsl (Register r, Register amt);
764 O2RegRegShift lsr (Register r, Register amt);
765 O2RegRegShift asr (Register r, Register amt);
766 O2RegRegShift ror (Register r, Register amt);
768 // An offset from a register to be used for ldr/str. This should include
769 // the sign bit, since ARM has "signed-magnitude" offsets. That is it encodes
770 // an unsigned offset, then the instruction specifies if the offset is positive
771 // or negative. The +/- bit is necessary if the instruction set wants to be
772 // able to have a negative register offset e.g. ldr pc, [r1,-r2];
773 class DtrOff
774 {
775 uint32_t data;
777 protected:
778 DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
779 : data(immdata.encode() | (uint32_t)IsImmDTR | ((uint32_t)iu))
780 { }
782 DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
783 : data(reg.encode() | (uint32_t) IsNotImmDTR | iu)
784 { }
786 public:
787 uint32_t encode() { return data; }
788 };
790 class DtrOffImm : public DtrOff
791 {
792 public:
793 DtrOffImm(int32_t imm)
794 : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)), imm >= 0 ? IsUp : IsDown)
795 {
796 JS_ASSERT(mozilla::Abs(imm) < 4096);
797 }
798 };
800 class DtrOffReg : public DtrOff
801 {
802 // These are designed to be called by a constructor of a subclass.
803 // Constructing the necessary RIS/RRS structures are annoying
804 protected:
805 DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm, IsUp_ iu = IsUp)
806 : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu)
807 { }
809 DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg, IsUp_ iu = IsUp)
810 : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu)
811 { }
812 };
814 class DtrRegImmShift : public DtrOffReg
815 {
816 public:
817 DtrRegImmShift(Register rn, ShiftType type, uint32_t shift, IsUp_ iu = IsUp)
818 : DtrOffReg(rn, type, datastore::RIS(shift), iu)
819 { }
820 };
822 class DtrRegRegShift : public DtrOffReg
823 {
824 public:
825 DtrRegRegShift(Register rn, ShiftType type, Register rs, IsUp_ iu = IsUp)
826 : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu)
827 { }
828 };
830 // we will frequently want to bundle a register with its offset so that we have
831 // an "operand" to a load instruction.
832 class DTRAddr
833 {
834 uint32_t data;
836 public:
837 DTRAddr(Register reg, DtrOff dtr)
838 : data(dtr.encode() | (reg.code() << 16))
839 { }
841 uint32_t encode() {
842 return data;
843 }
844 Register getBase() {
845 return Register::FromCode((data >> 16) &0xf);
846 }
847 private:
848 friend class Operand;
849 DTRAddr(uint32_t blob)
850 : data(blob)
851 { }
852 };
854 // Offsets for the extended data transfer instructions:
855 // ldrsh, ldrd, ldrsb, etc.
856 class EDtrOff
857 {
858 uint32_t data;
860 protected:
861 EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
862 : data(imm8.encode() | IsImmEDTR | (uint32_t)iu)
863 { }
865 EDtrOff(Register rm, IsUp_ iu = IsUp)
866 : data(rm.code() | IsNotImmEDTR | iu)
867 { }
869 public:
870 uint32_t encode() {
871 return data;
872 }
873 };
875 class EDtrOffImm : public EDtrOff
876 {
877 public:
878 EDtrOffImm(int32_t imm)
879 : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)), (imm >= 0) ? IsUp : IsDown)
880 {
881 JS_ASSERT(mozilla::Abs(imm) < 256);
882 }
883 };
885 // this is the most-derived class, since the extended data
886 // transfer instructions don't support any sort of modifying the
887 // "index" operand
888 class EDtrOffReg : public EDtrOff
889 {
890 public:
891 EDtrOffReg(Register rm)
892 : EDtrOff(rm)
893 { }
894 };
896 class EDtrAddr
897 {
898 uint32_t data;
900 public:
901 EDtrAddr(Register r, EDtrOff off)
902 : data(RN(r) | off.encode())
903 { }
905 uint32_t encode() {
906 return data;
907 }
908 };
910 class VFPOff
911 {
912 uint32_t data;
914 protected:
915 VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
916 : data(imm.encode() | (uint32_t)isup)
917 { }
919 public:
920 uint32_t encode() {
921 return data;
922 }
923 };
925 class VFPOffImm : public VFPOff
926 {
927 public:
928 VFPOffImm(int32_t imm)
929 : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4), imm < 0 ? IsDown : IsUp)
930 {
931 JS_ASSERT(mozilla::Abs(imm) <= 255 * 4);
932 }
933 };
934 class VFPAddr
935 {
936 friend class Operand;
938 uint32_t data;
940 protected:
941 VFPAddr(uint32_t blob)
942 : data(blob)
943 { }
945 public:
946 VFPAddr(Register base, VFPOff off)
947 : data(RN(base) | off.encode())
948 { }
950 uint32_t encode() {
951 return data;
952 }
953 };
955 class VFPImm {
956 uint32_t data;
958 public:
959 static const VFPImm one;
961 VFPImm(uint32_t topWordOfDouble);
963 uint32_t encode() {
964 return data;
965 }
966 bool isValid() {
967 return data != -1U;
968 }
969 };
971 // A BOffImm is an immediate that is used for branches. Namely, it is the offset that will
972 // be encoded in the branch instruction. This is the only sane way of constructing a branch.
973 class BOffImm
974 {
975 uint32_t data;
977 public:
978 uint32_t encode() {
979 return data;
980 }
981 int32_t decode() {
982 return ((((int32_t)data) << 8) >> 6) + 8;
983 }
985 explicit BOffImm(int offset)
986 : data ((offset - 8) >> 2 & 0x00ffffff)
987 {
988 JS_ASSERT((offset & 0x3) == 0);
989 if (!isInRange(offset))
990 CrashAtUnhandlableOOM("BOffImm");
991 }
992 static bool isInRange(int offset)
993 {
994 if ((offset - 8) < -33554432)
995 return false;
996 if ((offset - 8) > 33554428)
997 return false;
998 return true;
999 }
1000 static const int INVALID = 0x00800000;
1001 BOffImm()
1002 : data(INVALID)
1003 { }
1005 bool isInvalid() {
1006 return data == uint32_t(INVALID);
1007 }
1008 Instruction *getDest(Instruction *src);
1010 private:
1011 friend class InstBranchImm;
1012 BOffImm(Instruction &inst);
1013 };
1015 class Imm16
1016 {
1017 uint32_t lower : 12;
1018 uint32_t pad : 4;
1019 uint32_t upper : 4;
1020 uint32_t invalid : 12;
1022 public:
1023 Imm16();
1024 Imm16(uint32_t imm);
1025 Imm16(Instruction &inst);
1027 uint32_t encode() {
1028 return lower | upper << 16;
1029 }
1030 uint32_t decode() {
1031 return lower | upper << 12;
1032 }
1034 bool isInvalid () {
1035 return invalid;
1036 }
1037 };
1039 /* I would preffer that these do not exist, since there are essentially
1040 * no instructions that would ever take more than one of these, however,
1041 * the MIR wants to only have one type of arguments to functions, so bugger.
1042 */
1043 class Operand
1044 {
1045 // the encoding of registers is the same for OP2, DTR and EDTR
1046 // yet the type system doesn't let us express this, so choices
1047 // must be made.
1048 public:
1049 enum Tag_ {
1050 OP2,
1051 MEM,
1052 FOP
1053 };
1055 private:
1056 Tag_ Tag : 3;
1057 uint32_t reg : 5;
1058 int32_t offset;
1059 uint32_t data;
1061 public:
1062 Operand (Register reg_)
1063 : Tag(OP2), reg(reg_.code())
1064 { }
1066 Operand (FloatRegister freg)
1067 : Tag(FOP), reg(freg.code())
1068 { }
1070 Operand (Register base, Imm32 off)
1071 : Tag(MEM), reg(base.code()), offset(off.value)
1072 { }
1074 Operand (Register base, int32_t off)
1075 : Tag(MEM), reg(base.code()), offset(off)
1076 { }
1078 Operand (const Address &addr)
1079 : Tag(MEM), reg(addr.base.code()), offset(addr.offset)
1080 { }
1082 Tag_ getTag() const {
1083 return Tag;
1084 }
1086 Operand2 toOp2() const {
1087 JS_ASSERT(Tag == OP2);
1088 return O2Reg(Register::FromCode(reg));
1089 }
1091 Register toReg() const {
1092 JS_ASSERT(Tag == OP2);
1093 return Register::FromCode(reg);
1094 }
1096 void toAddr(Register *r, Imm32 *dest) const {
1097 JS_ASSERT(Tag == MEM);
1098 *r = Register::FromCode(reg);
1099 *dest = Imm32(offset);
1100 }
1101 Address toAddress() const {
1102 return Address(Register::FromCode(reg), offset);
1103 }
1104 int32_t disp() const {
1105 JS_ASSERT(Tag == MEM);
1106 return offset;
1107 }
1109 int32_t base() const {
1110 JS_ASSERT(Tag == MEM);
1111 return reg;
1112 }
1113 Register baseReg() const {
1114 return Register::FromCode(reg);
1115 }
1116 DTRAddr toDTRAddr() const {
1117 return DTRAddr(baseReg(), DtrOffImm(offset));
1118 }
1119 VFPAddr toVFPAddr() const {
1120 return VFPAddr(baseReg(), VFPOffImm(offset));
1121 }
1122 };
1124 void
1125 PatchJump(CodeLocationJump &jump_, CodeLocationLabel label);
1126 class InstructionIterator;
1127 class Assembler;
1128 typedef js::jit::AssemblerBufferWithConstantPool<1024, 4, Instruction, Assembler, 1> ARMBuffer;
1130 class Assembler : public AssemblerShared
1131 {
1132 public:
1133 // ARM conditional constants
1134 enum ARMCondition {
1135 EQ = 0x00000000, // Zero
1136 NE = 0x10000000, // Non-zero
1137 CS = 0x20000000,
1138 CC = 0x30000000,
1139 MI = 0x40000000,
1140 PL = 0x50000000,
1141 VS = 0x60000000,
1142 VC = 0x70000000,
1143 HI = 0x80000000,
1144 LS = 0x90000000,
1145 GE = 0xa0000000,
1146 LT = 0xb0000000,
1147 GT = 0xc0000000,
1148 LE = 0xd0000000,
1149 AL = 0xe0000000
1150 };
1152 enum Condition {
1153 Equal = EQ,
1154 NotEqual = NE,
1155 Above = HI,
1156 AboveOrEqual = CS,
1157 Below = CC,
1158 BelowOrEqual = LS,
1159 GreaterThan = GT,
1160 GreaterThanOrEqual = GE,
1161 LessThan = LT,
1162 LessThanOrEqual = LE,
1163 Overflow = VS,
1164 Signed = MI,
1165 NotSigned = PL,
1166 Zero = EQ,
1167 NonZero = NE,
1168 Always = AL,
1170 VFP_NotEqualOrUnordered = NE,
1171 VFP_Equal = EQ,
1172 VFP_Unordered = VS,
1173 VFP_NotUnordered = VC,
1174 VFP_GreaterThanOrEqualOrUnordered = CS,
1175 VFP_GreaterThanOrEqual = GE,
1176 VFP_GreaterThanOrUnordered = HI,
1177 VFP_GreaterThan = GT,
1178 VFP_LessThanOrEqualOrUnordered = LE,
1179 VFP_LessThanOrEqual = LS,
1180 VFP_LessThanOrUnordered = LT,
1181 VFP_LessThan = CC // MI is valid too.
1182 };
1184 // Bit set when a DoubleCondition does not map to a single ARM condition.
1185 // The macro assembler has to special-case these conditions, or else
1186 // ConditionFromDoubleCondition will complain.
1187 static const int DoubleConditionBitSpecial = 0x1;
1189 enum DoubleCondition {
1190 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
1191 DoubleOrdered = VFP_NotUnordered,
1192 DoubleEqual = VFP_Equal,
1193 DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
1194 DoubleGreaterThan = VFP_GreaterThan,
1195 DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
1196 DoubleLessThan = VFP_LessThan,
1197 DoubleLessThanOrEqual = VFP_LessThanOrEqual,
1198 // If either operand is NaN, these conditions always evaluate to true.
1199 DoubleUnordered = VFP_Unordered,
1200 DoubleEqualOrUnordered = VFP_Equal | DoubleConditionBitSpecial,
1201 DoubleNotEqualOrUnordered = VFP_NotEqualOrUnordered,
1202 DoubleGreaterThanOrUnordered = VFP_GreaterThanOrUnordered,
1203 DoubleGreaterThanOrEqualOrUnordered = VFP_GreaterThanOrEqualOrUnordered,
1204 DoubleLessThanOrUnordered = VFP_LessThanOrUnordered,
1205 DoubleLessThanOrEqualOrUnordered = VFP_LessThanOrEqualOrUnordered
1206 };
1208 Condition getCondition(uint32_t inst) {
1209 return (Condition) (0xf0000000 & inst);
1210 }
1211 static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
1212 JS_ASSERT(!(cond & DoubleConditionBitSpecial));
1213 return static_cast<Condition>(cond);
1214 }
1216 // :( this should be protected, but since CodeGenerator
1217 // wants to use it, It needs to go out here :(
1219 BufferOffset nextOffset() {
1220 return m_buffer.nextOffset();
1221 }
1223 protected:
1224 BufferOffset labelOffset (Label *l) {
1225 return BufferOffset(l->bound());
1226 }
1228 Instruction * editSrc (BufferOffset bo) {
1229 return m_buffer.getInst(bo);
1230 }
1231 public:
1232 void resetCounter();
1233 uint32_t actualOffset(uint32_t) const;
1234 uint32_t actualIndex(uint32_t) const;
1235 static uint8_t *PatchableJumpAddress(JitCode *code, uint32_t index);
1236 BufferOffset actualOffset(BufferOffset) const;
1237 protected:
1239 // structure for fixing up pc-relative loads/jumps when a the machine code
1240 // gets moved (executable copy, gc, etc.)
1241 struct RelativePatch
1242 {
1243 void *target;
1244 Relocation::Kind kind;
1245 RelativePatch(void *target, Relocation::Kind kind)
1246 : target(target), kind(kind)
1247 { }
1248 };
1250 // TODO: this should actually be a pool-like object
1251 // It is currently a big hack, and probably shouldn't exist
1252 js::Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
1253 js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
1254 js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpJumpRelocations_;
1255 js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpDataRelocations_;
1256 js::Vector<BufferOffset, 0, SystemAllocPolicy> tmpPreBarriers_;
1258 CompactBufferWriter jumpRelocations_;
1259 CompactBufferWriter dataRelocations_;
1260 CompactBufferWriter relocations_;
1261 CompactBufferWriter preBarriers_;
1263 bool enoughMemory_;
1265 //typedef JSC::AssemblerBufferWithConstantPool<1024, 4, 4, js::jit::Assembler> ARMBuffer;
1266 ARMBuffer m_buffer;
1268 // There is now a semi-unified interface for instruction generation.
1269 // During assembly, there is an active buffer that instructions are
1270 // being written into, but later, we may wish to modify instructions
1271 // that have already been created. In order to do this, we call the
1272 // same assembly function, but pass it a destination address, which
1273 // will be overwritten with a new instruction. In order to do this very
1274 // after assembly buffers no longer exist, when calling with a third
1275 // dest parameter, a this object is still needed. dummy always happens
1276 // to be null, but we shouldn't be looking at it in any case.
1277 static Assembler *dummy;
1278 mozilla::Array<Pool, 4> pools_;
1279 Pool *int32Pool;
1280 Pool *doublePool;
1282 public:
1283 Assembler()
1284 : enoughMemory_(true),
1285 m_buffer(4, 4, 0, &pools_[0], 8),
1286 int32Pool(m_buffer.getPool(1)),
1287 doublePool(m_buffer.getPool(0)),
1288 isFinished(false),
1289 dtmActive(false),
1290 dtmCond(Always)
1291 {
1292 }
1294 // We need to wait until an AutoIonContextAlloc is created by the
1295 // IonMacroAssembler, before allocating any space.
1296 void initWithAllocator() {
1297 m_buffer.initWithAllocator();
1299 // Set up the backwards double region
1300 new (&pools_[2]) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, true);
1301 // Set up the backwards 32 bit region
1302 new (&pools_[3]) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, true, true);
1303 // Set up the forwards double region
1304 new (doublePool) Pool (1024, 8, 4, 8, 8, m_buffer.LifoAlloc_, false, false, &pools_[2]);
1305 // Set up the forwards 32 bit region
1306 new (int32Pool) Pool (4096, 4, 4, 8, 4, m_buffer.LifoAlloc_, false, true, &pools_[3]);
1307 for (int i = 0; i < 4; i++) {
1308 if (pools_[i].poolData == nullptr) {
1309 m_buffer.fail_oom();
1310 return;
1311 }
1312 }
1313 }
1315 static Condition InvertCondition(Condition cond);
1317 // MacroAssemblers hold onto gcthings, so they are traced by the GC.
1318 void trace(JSTracer *trc);
1319 void writeRelocation(BufferOffset src) {
1320 tmpJumpRelocations_.append(src);
1321 }
1323 // As opposed to x86/x64 version, the data relocation has to be executed
1324 // before to recover the pointer, and not after.
1325 void writeDataRelocation(const ImmGCPtr &ptr) {
1326 if (ptr.value)
1327 tmpDataRelocations_.append(nextOffset());
1328 }
1329 void writePrebarrierOffset(CodeOffsetLabel label) {
1330 tmpPreBarriers_.append(BufferOffset(label.offset()));
1331 }
1333 enum RelocBranchStyle {
1334 B_MOVWT,
1335 B_LDR_BX,
1336 B_LDR,
1337 B_MOVW_ADD
1338 };
1340 enum RelocStyle {
1341 L_MOVWT,
1342 L_LDR
1343 };
1345 public:
1346 // Given the start of a Control Flow sequence, grab the value that is finally branched to
1347 // given the start of a function that loads an address into a register get the address that
1348 // ends up in the register.
1349 template <class Iter>
1350 static const uint32_t * getCF32Target(Iter *iter);
1352 static uintptr_t getPointer(uint8_t *);
1353 template <class Iter>
1354 static const uint32_t * getPtr32Target(Iter *iter, Register *dest = nullptr, RelocStyle *rs = nullptr);
1356 bool oom() const;
1358 void setPrinter(Sprinter *sp) {
1359 }
1361 private:
1362 bool isFinished;
1363 public:
1364 void finish();
1365 void executableCopy(void *buffer);
1366 void copyJumpRelocationTable(uint8_t *dest);
1367 void copyDataRelocationTable(uint8_t *dest);
1368 void copyPreBarrierTable(uint8_t *dest);
1370 bool addCodeLabel(CodeLabel label);
1371 size_t numCodeLabels() const {
1372 return codeLabels_.length();
1373 }
1374 CodeLabel codeLabel(size_t i) {
1375 return codeLabels_[i];
1376 }
1378 // Size of the instruction stream, in bytes.
1379 size_t size() const;
1380 // Size of the jump relocation table, in bytes.
1381 size_t jumpRelocationTableBytes() const;
1382 size_t dataRelocationTableBytes() const;
1383 size_t preBarrierTableBytes() const;
1385 // Size of the data table, in bytes.
1386 size_t bytesNeeded() const;
1388 // Write a blob of binary into the instruction stream *OR*
1389 // into a destination address. If dest is nullptr (the default), then the
1390 // instruction gets written into the instruction stream. If dest is not null
1391 // it is interpreted as a pointer to the location that we want the
1392 // instruction to be written.
1393 BufferOffset writeInst(uint32_t x, uint32_t *dest = nullptr);
1394 // A static variant for the cases where we don't want to have an assembler
1395 // object at all. Normally, you would use the dummy (nullptr) object.
1396 static void writeInstStatic(uint32_t x, uint32_t *dest);
1398 public:
1399 void writeCodePointer(AbsoluteLabel *label);
1401 BufferOffset align(int alignment);
1402 BufferOffset as_nop();
1403 BufferOffset as_alu(Register dest, Register src1, Operand2 op2,
1404 ALUOp op, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
1406 BufferOffset as_mov(Register dest,
1407 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always, Instruction *instdest = nullptr);
1408 BufferOffset as_mvn(Register dest, Operand2 op2,
1409 SetCond_ sc = NoSetCond, Condition c = Always);
1410 // logical operations
1411 BufferOffset as_and(Register dest, Register src1,
1412 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1413 BufferOffset as_bic(Register dest, Register src1,
1414 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1415 BufferOffset as_eor(Register dest, Register src1,
1416 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1417 BufferOffset as_orr(Register dest, Register src1,
1418 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1419 // mathematical operations
1420 BufferOffset as_adc(Register dest, Register src1,
1421 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1422 BufferOffset as_add(Register dest, Register src1,
1423 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1424 BufferOffset as_sbc(Register dest, Register src1,
1425 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1426 BufferOffset as_sub(Register dest, Register src1,
1427 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1428 BufferOffset as_rsb(Register dest, Register src1,
1429 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1430 BufferOffset as_rsc(Register dest, Register src1,
1431 Operand2 op2, SetCond_ sc = NoSetCond, Condition c = Always);
1432 // test operations
1433 BufferOffset as_cmn(Register src1, Operand2 op2,
1434 Condition c = Always);
1435 BufferOffset as_cmp(Register src1, Operand2 op2,
1436 Condition c = Always);
1437 BufferOffset as_teq(Register src1, Operand2 op2,
1438 Condition c = Always);
1439 BufferOffset as_tst(Register src1, Operand2 op2,
1440 Condition c = Always);
1442 // Not quite ALU worthy, but useful none the less:
1443 // These also have the isue of these being formatted
1444 // completly differently from the standard ALU operations.
1445 BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
1446 BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always, Instruction *pos = nullptr);
1448 BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
1449 MULOp op, SetCond_ sc, Condition c = Always);
1450 BufferOffset as_mul(Register dest, Register src1, Register src2,
1451 SetCond_ sc = NoSetCond, Condition c = Always);
1452 BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
1453 SetCond_ sc = NoSetCond, Condition c = Always);
1454 BufferOffset as_umaal(Register dest1, Register dest2, Register src1, Register src2,
1455 Condition c = Always);
1456 BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2,
1457 Condition c = Always);
1458 BufferOffset as_umull(Register dest1, Register dest2, Register src1, Register src2,
1459 SetCond_ sc = NoSetCond, Condition c = Always);
1460 BufferOffset as_umlal(Register dest1, Register dest2, Register src1, Register src2,
1461 SetCond_ sc = NoSetCond, Condition c = Always);
1462 BufferOffset as_smull(Register dest1, Register dest2, Register src1, Register src2,
1463 SetCond_ sc = NoSetCond, Condition c = Always);
1464 BufferOffset as_smlal(Register dest1, Register dest2, Register src1, Register src2,
1465 SetCond_ sc = NoSetCond, Condition c = Always);
1467 BufferOffset as_sdiv(Register dest, Register num, Register div, Condition c = Always);
1468 BufferOffset as_udiv(Register dest, Register num, Register div, Condition c = Always);
1470 // Data transfer instructions: ldr, str, ldrb, strb.
1471 // Using an int to differentiate between 8 bits and 32 bits is
1472 // overkill, but meh
1473 BufferOffset as_dtr(LoadStore ls, int size, Index mode,
1474 Register rt, DTRAddr addr, Condition c = Always, uint32_t *dest = nullptr);
1475 // Handles all of the other integral data transferring functions:
1476 // ldrsb, ldrsh, ldrd, etc.
1477 // size is given in bits.
1478 BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
1479 Register rt, EDtrAddr addr, Condition c = Always, uint32_t *dest = nullptr);
1481 BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask,
1482 DTMMode mode, DTMWriteBack wb, Condition c = Always);
1483 //overwrite a pool entry with new data.
1484 void as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data);
1485 // load a 32 bit immediate from a pool into a register
1486 BufferOffset as_Imm32Pool(Register dest, uint32_t value, Condition c = Always);
1487 // make a patchable jump that can target the entire 32 bit address space.
1488 BufferOffset as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe = nullptr, Condition c = Always);
1490 // load a 64 bit floating point immediate from a pool into a register
1491 BufferOffset as_FImm64Pool(VFPRegister dest, double value, Condition c = Always);
1492 // load a 32 bit floating point immediate from a pool into a register
1493 BufferOffset as_FImm32Pool(VFPRegister dest, float value, Condition c = Always);
1495 // Control flow stuff:
1497 // bx can *only* branch to a register
1498 // never to an immediate.
1499 BufferOffset as_bx(Register r, Condition c = Always, bool isPatchable = false);
1501 // Branch can branch to an immediate *or* to a register.
1502 // Branches to immediates are pc relative, branches to registers
1503 // are absolute
1504 BufferOffset as_b(BOffImm off, Condition c, bool isPatchable = false);
1506 BufferOffset as_b(Label *l, Condition c = Always, bool isPatchable = false);
1507 BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
1509 // blx can go to either an immediate or a register.
1510 // When blx'ing to a register, we change processor mode
1511 // depending on the low bit of the register
1512 // when blx'ing to an immediate, we *always* change processor state.
1513 BufferOffset as_blx(Label *l);
1515 BufferOffset as_blx(Register r, Condition c = Always);
1516 BufferOffset as_bl(BOffImm off, Condition c);
1517 // bl can only branch+link to an immediate, never to a register
1518 // it never changes processor state
1519 BufferOffset as_bl();
1520 // bl #imm can have a condition code, blx #imm cannot.
1521 // blx reg can be conditional.
1522 BufferOffset as_bl(Label *l, Condition c);
1523 BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
1525 BufferOffset as_mrs(Register r, Condition c = Always);
1526 BufferOffset as_msr(Register r, Condition c = Always);
1527 // VFP instructions!
1528 private:
1530 enum vfp_size {
1531 isDouble = 1 << 8,
1532 isSingle = 0 << 8
1533 };
1535 BufferOffset writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest=nullptr);
1536 // Unityped variants: all registers hold the same (ieee754 single/double)
1537 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
1538 BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1539 VFPOp op, Condition c = Always);
1541 public:
1542 BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1543 Condition c = Always);
1545 BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1546 Condition c = Always);
1548 BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1549 Condition c = Always);
1551 BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1552 Condition c = Always);
1554 BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1555 Condition c = Always);
1557 BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1558 Condition c = Always);
1560 BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
1562 BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
1564 BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
1566 BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1567 Condition c = Always);
1569 BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm,
1570 Condition c = Always);
1571 BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always);
1573 // specifically, a move between two same sized-registers
1574 BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
1575 /*xfer between Core and VFP*/
1576 enum FloatToCore_ {
1577 FloatToCore = 1 << 20,
1578 CoreToFloat = 0 << 20
1579 };
1581 private:
1582 enum VFPXferSize {
1583 WordTransfer = 0x02000010,
1584 DoubleTransfer = 0x00400010
1585 };
1587 public:
1588 // Unlike the next function, moving between the core registers and vfp
1589 // registers can't be *that* properly typed. Namely, since I don't want to
1590 // munge the type VFPRegister to also include core registers. Thus, the core
1591 // and vfp registers are passed in based on their type, and src/dest is
1592 // determined by the float2core.
1594 BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c,
1595 Condition c = Always, int idx = 0);
1597 // our encoding actually allows just the src and the dest (and theiyr types)
1598 // to uniquely specify the encoding that we are going to use.
1599 BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
1600 Condition c = Always);
1601 // hard coded to a 32 bit fixed width result for now
1602 BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c = Always);
1604 /* xfer between VFP and memory*/
1605 BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
1606 Condition c = Always /* vfp doesn't have a wb option*/,
1607 uint32_t *dest = nullptr);
1609 // VFP's ldm/stm work differently from the standard arm ones.
1610 // You can only transfer a range
1612 BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
1613 /*also has update conditions*/Condition c = Always);
1615 BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
1617 BufferOffset as_vmrs(Register r, Condition c = Always);
1618 BufferOffset as_vmsr(Register r, Condition c = Always);
1619 // label operations
1620 bool nextLink(BufferOffset b, BufferOffset *next);
1621 void bind(Label *label, BufferOffset boff = BufferOffset());
1622 void bind(RepatchLabel *label);
1623 uint32_t currentOffset() {
1624 return nextOffset().getOffset();
1625 }
1626 void retarget(Label *label, Label *target);
1627 // I'm going to pretend this doesn't exist for now.
1628 void retarget(Label *label, void *target, Relocation::Kind reloc);
1630 void Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address);
1632 // See Bind
1633 size_t labelOffsetToPatchOffset(size_t offset) {
1634 return actualOffset(offset);
1635 }
1637 void call(Label *label);
1638 void call(void *target);
1640 void as_bkpt();
1642 public:
1643 static void TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader);
1644 static void TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader);
1646 protected:
1647 void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
1648 enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
1649 if (kind == Relocation::JITCODE)
1650 writeRelocation(src);
1651 }
1653 public:
1654 // The buffer is about to be linked, make sure any constant pools or excess
1655 // bookkeeping has been flushed to the instruction stream.
1656 void flush() {
1657 JS_ASSERT(!isFinished);
1658 m_buffer.flushPool();
1659 return;
1660 }
1662 // Copy the assembly code to the given buffer, and perform any pending
1663 // relocations relying on the target address.
1664 void executableCopy(uint8_t *buffer);
1666 // Actual assembly emitting functions.
1668 // Since I can't think of a reasonable default for the mode, I'm going to
1669 // leave it as a required argument.
1670 void startDataTransferM(LoadStore ls, Register rm,
1671 DTMMode mode, DTMWriteBack update = NoWriteBack,
1672 Condition c = Always)
1673 {
1674 JS_ASSERT(!dtmActive);
1675 dtmUpdate = update;
1676 dtmBase = rm;
1677 dtmLoadStore = ls;
1678 dtmLastReg = -1;
1679 dtmRegBitField = 0;
1680 dtmActive = 1;
1681 dtmCond = c;
1682 dtmMode = mode;
1683 }
1685 void transferReg(Register rn) {
1686 JS_ASSERT(dtmActive);
1687 JS_ASSERT(rn.code() > dtmLastReg);
1688 dtmRegBitField |= 1 << rn.code();
1689 if (dtmLoadStore == IsLoad && rn.code() == 13 && dtmBase.code() == 13) {
1690 MOZ_ASSUME_UNREACHABLE("ARM Spec says this is invalid");
1691 }
1692 }
1693 void finishDataTransfer() {
1694 dtmActive = false;
1695 as_dtm(dtmLoadStore, dtmBase, dtmRegBitField, dtmMode, dtmUpdate, dtmCond);
1696 }
1698 void startFloatTransferM(LoadStore ls, Register rm,
1699 DTMMode mode, DTMWriteBack update = NoWriteBack,
1700 Condition c = Always)
1701 {
1702 JS_ASSERT(!dtmActive);
1703 dtmActive = true;
1704 dtmUpdate = update;
1705 dtmLoadStore = ls;
1706 dtmBase = rm;
1707 dtmCond = c;
1708 dtmLastReg = -1;
1709 dtmMode = mode;
1710 dtmDelta = 0;
1711 }
1712 void transferFloatReg(VFPRegister rn)
1713 {
1714 if (dtmLastReg == -1) {
1715 vdtmFirstReg = rn.code();
1716 } else {
1717 if (dtmDelta == 0) {
1718 dtmDelta = rn.code() - dtmLastReg;
1719 JS_ASSERT(dtmDelta == 1 || dtmDelta == -1);
1720 }
1721 JS_ASSERT(dtmLastReg >= 0);
1722 JS_ASSERT(rn.code() == unsigned(dtmLastReg) + dtmDelta);
1723 }
1724 dtmLastReg = rn.code();
1725 }
1726 void finishFloatTransfer() {
1727 JS_ASSERT(dtmActive);
1728 dtmActive = false;
1729 JS_ASSERT(dtmLastReg != -1);
1730 dtmDelta = dtmDelta ? dtmDelta : 1;
1731 // fencepost problem.
1732 int len = dtmDelta * (dtmLastReg - vdtmFirstReg) + 1;
1733 as_vdtm(dtmLoadStore, dtmBase,
1734 VFPRegister(FloatRegister::FromCode(Min(vdtmFirstReg, dtmLastReg))),
1735 len, dtmCond);
1736 }
1738 private:
1739 int dtmRegBitField;
1740 int vdtmFirstReg;
1741 int dtmLastReg;
1742 int dtmDelta;
1743 Register dtmBase;
1744 DTMWriteBack dtmUpdate;
1745 DTMMode dtmMode;
1746 LoadStore dtmLoadStore;
1747 bool dtmActive;
1748 Condition dtmCond;
1750 public:
1751 enum {
1752 padForAlign8 = (int)0x00,
1753 padForAlign16 = (int)0x0000,
1754 padForAlign32 = (int)0xe12fff7f // 'bkpt 0xffff'
1755 };
1757 // API for speaking with the IonAssemblerBufferWithConstantPools
1758 // generate an initial placeholder instruction that we want to later fix up
1759 static void insertTokenIntoTag(uint32_t size, uint8_t *load, int32_t token);
1760 // take the stub value that was written in before, and write in an actual load
1761 // using the index we'd computed previously as well as the address of the pool start.
1762 static bool patchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
1763 // this is a callback for when we have filled a pool, and MUST flush it now.
1764 // The pool requires the assembler to place a branch past the pool, and it
1765 // calls this function.
1766 static uint32_t placeConstantPoolBarrier(int offset);
1767 // END API
1769 // move our entire pool into the instruction stream
1770 // This is to force an opportunistic dump of the pool, prefferably when it
1771 // is more convenient to do a dump.
1772 void dumpPool();
1773 void flushBuffer();
1774 void enterNoPool();
1775 void leaveNoPool();
1776 // this should return a BOffImm, but I didn't want to require everyplace that used the
1777 // AssemblerBuffer to make that class.
1778 static ptrdiff_t getBranchOffset(const Instruction *i);
1779 static void retargetNearBranch(Instruction *i, int offset, Condition cond, bool final = true);
1780 static void retargetNearBranch(Instruction *i, int offset, bool final = true);
1781 static void retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond);
1783 static void writePoolHeader(uint8_t *start, Pool *p, bool isNatural);
1784 static void writePoolFooter(uint8_t *start, Pool *p, bool isNatural);
1785 static void writePoolGuard(BufferOffset branch, Instruction *inst, BufferOffset dest);
1788 static uint32_t patchWrite_NearCallSize();
1789 static uint32_t nopSize() { return 4; }
1790 static void patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
1791 static void patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
1792 PatchedImmPtr expectedValue);
1793 static void patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
1794 ImmPtr expectedValue);
1795 static void patchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
1796 static uint32_t alignDoubleArg(uint32_t offset) {
1797 return (offset+1)&~1;
1798 }
1799 static uint8_t *nextInstruction(uint8_t *instruction, uint32_t *count = nullptr);
1800 // Toggle a jmp or cmp emitted by toggledJump().
1802 static void ToggleToJmp(CodeLocationLabel inst_);
1803 static void ToggleToCmp(CodeLocationLabel inst_);
1805 static void ToggleCall(CodeLocationLabel inst_, bool enabled);
1807 static void updateBoundsCheck(uint32_t logHeapSize, Instruction *inst);
1808 void processCodeLabels(uint8_t *rawCode);
1809 bool bailed() {
1810 return m_buffer.bail();
1811 }
1812 }; // Assembler
1814 // An Instruction is a structure for both encoding and decoding any and all ARM instructions.
1815 // many classes have not been implemented thusfar.
1816 class Instruction
1817 {
1818 uint32_t data;
1820 protected:
1821 // This is not for defaulting to always, this is for instructions that
1822 // cannot be made conditional, and have the usually invalid 4b1111 cond field
1823 Instruction (uint32_t data_, bool fake = false) : data(data_ | 0xf0000000) {
1824 JS_ASSERT (fake || ((data_ & 0xf0000000) == 0));
1825 }
1826 // Standard constructor
1827 Instruction (uint32_t data_, Assembler::Condition c) : data(data_ | (uint32_t) c) {
1828 JS_ASSERT ((data_ & 0xf0000000) == 0);
1829 }
1830 // You should never create an instruction directly. You should create a
1831 // more specific instruction which will eventually call one of these
1832 // constructors for you.
1833 public:
1834 uint32_t encode() const {
1835 return data;
1836 }
1837 // Check if this instruction is really a particular case
1838 template <class C>
1839 bool is() const { return C::isTHIS(*this); }
1841 // safely get a more specific variant of this pointer
1842 template <class C>
1843 C *as() const { return C::asTHIS(*this); }
1845 const Instruction & operator=(const Instruction &src) {
1846 data = src.data;
1847 return *this;
1848 }
1849 // Since almost all instructions have condition codes, the condition
1850 // code extractor resides in the base class.
1851 void extractCond(Assembler::Condition *c) {
1852 if (data >> 28 != 0xf )
1853 *c = (Assembler::Condition)(data & 0xf0000000);
1854 }
1855 // Get the next instruction in the instruction stream.
1856 // This does neat things like ignoreconstant pools and their guards.
1857 Instruction *next();
1859 // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
1860 // an instruction. raw() just coerces this into a pointer to a uint32_t
1861 const uint32_t *raw() const { return &data; }
1862 uint32_t size() const { return 4; }
1863 }; // Instruction
1865 // make sure that it is the right size
1866 JS_STATIC_ASSERT(sizeof(Instruction) == 4);
1868 // Data Transfer Instructions
1869 class InstDTR : public Instruction
1870 {
1871 public:
1872 enum IsByte_ {
1873 IsByte = 0x00400000,
1874 IsWord = 0x00000000
1875 };
1876 static const int IsDTR = 0x04000000;
1877 static const int IsDTRMask = 0x0c000000;
1879 // TODO: Replace the initialization with something that is safer.
1880 InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
1881 : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c)
1882 { }
1884 static bool isTHIS(const Instruction &i);
1885 static InstDTR *asTHIS(const Instruction &i);
1887 };
1888 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(Instruction));
1890 class InstLDR : public InstDTR
1891 {
1892 public:
1893 InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
1894 : InstDTR(IsLoad, IsWord, mode, rt, addr, c)
1895 { }
1896 static bool isTHIS(const Instruction &i);
1897 static InstLDR *asTHIS(const Instruction &i);
1899 };
1900 JS_STATIC_ASSERT(sizeof(InstDTR) == sizeof(InstLDR));
1902 class InstNOP : public Instruction
1903 {
1904 static const uint32_t NopInst = 0x0320f000;
1906 public:
1907 InstNOP()
1908 : Instruction(NopInst, Assembler::Always)
1909 { }
1911 static bool isTHIS(const Instruction &i);
1912 static InstNOP *asTHIS(Instruction &i);
1913 };
1915 // Branching to a register, or calling a register
1916 class InstBranchReg : public Instruction
1917 {
1918 protected:
1919 // Don't use BranchTag yourself, use a derived instruction.
1920 enum BranchTag {
1921 IsBX = 0x012fff10,
1922 IsBLX = 0x012fff30
1923 };
1924 static const uint32_t IsBRegMask = 0x0ffffff0;
1925 InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
1926 : Instruction(tag | rm.code(), c)
1927 { }
1928 public:
1929 static bool isTHIS (const Instruction &i);
1930 static InstBranchReg *asTHIS (const Instruction &i);
1931 // Get the register that is being branched to
1932 void extractDest(Register *dest);
1933 // Make sure we are branching to a pre-known register
1934 bool checkDest(Register dest);
1935 };
1936 JS_STATIC_ASSERT(sizeof(InstBranchReg) == sizeof(Instruction));
1938 // Branching to an immediate offset, or calling an immediate offset
1939 class InstBranchImm : public Instruction
1940 {
1941 protected:
1942 enum BranchTag {
1943 IsB = 0x0a000000,
1944 IsBL = 0x0b000000
1945 };
1946 static const uint32_t IsBImmMask = 0x0f000000;
1948 InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
1949 : Instruction(tag | off.encode(), c)
1950 { }
1952 public:
1953 static bool isTHIS (const Instruction &i);
1954 static InstBranchImm *asTHIS (const Instruction &i);
1955 void extractImm(BOffImm *dest);
1956 };
1957 JS_STATIC_ASSERT(sizeof(InstBranchImm) == sizeof(Instruction));
1959 // Very specific branching instructions.
1960 class InstBXReg : public InstBranchReg
1961 {
1962 public:
1963 static bool isTHIS (const Instruction &i);
1964 static InstBXReg *asTHIS (const Instruction &i);
1965 };
1966 class InstBLXReg : public InstBranchReg
1967 {
1968 public:
1969 InstBLXReg(Register reg, Assembler::Condition c)
1970 : InstBranchReg(IsBLX, reg, c)
1971 { }
1973 static bool isTHIS (const Instruction &i);
1974 static InstBLXReg *asTHIS (const Instruction &i);
1975 };
1976 class InstBImm : public InstBranchImm
1977 {
1978 public:
1979 InstBImm(BOffImm off, Assembler::Condition c)
1980 : InstBranchImm(IsB, off, c)
1981 { }
1983 static bool isTHIS (const Instruction &i);
1984 static InstBImm *asTHIS (const Instruction &i);
1985 };
1986 class InstBLImm : public InstBranchImm
1987 {
1988 public:
1989 InstBLImm(BOffImm off, Assembler::Condition c)
1990 : InstBranchImm(IsBL, off, c)
1991 { }
1993 static bool isTHIS (const Instruction &i);
1994 static InstBLImm *asTHIS (Instruction &i);
1995 };
1997 // Both movw and movt. The layout of both the immediate and the destination
1998 // register is the same so the code is being shared.
1999 class InstMovWT : public Instruction
2000 {
2001 protected:
2002 enum WT {
2003 IsW = 0x03000000,
2004 IsT = 0x03400000
2005 };
2006 static const uint32_t IsWTMask = 0x0ff00000;
2008 InstMovWT(Register rd, Imm16 imm, WT wt, Assembler::Condition c)
2009 : Instruction (RD(rd) | imm.encode() | wt, c)
2010 { }
2012 public:
2013 void extractImm(Imm16 *dest);
2014 void extractDest(Register *dest);
2015 bool checkImm(Imm16 dest);
2016 bool checkDest(Register dest);
2018 static bool isTHIS (Instruction &i);
2019 static InstMovWT *asTHIS (Instruction &i);
2021 };
2022 JS_STATIC_ASSERT(sizeof(InstMovWT) == sizeof(Instruction));
2024 class InstMovW : public InstMovWT
2025 {
2026 public:
2027 InstMovW (Register rd, Imm16 imm, Assembler::Condition c)
2028 : InstMovWT(rd, imm, IsW, c)
2029 { }
2031 static bool isTHIS (const Instruction &i);
2032 static InstMovW *asTHIS (const Instruction &i);
2033 };
2035 class InstMovT : public InstMovWT
2036 {
2037 public:
2038 InstMovT (Register rd, Imm16 imm, Assembler::Condition c)
2039 : InstMovWT(rd, imm, IsT, c)
2040 { }
2041 static bool isTHIS (const Instruction &i);
2042 static InstMovT *asTHIS (const Instruction &i);
2043 };
2045 class InstALU : public Instruction
2046 {
2047 static const int32_t ALUMask = 0xc << 24;
2048 public:
2049 InstALU (Register rd, Register rn, Operand2 op2, ALUOp op, SetCond_ sc, Assembler::Condition c)
2050 : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | sc, c)
2051 { }
2052 static bool isTHIS (const Instruction &i);
2053 static InstALU *asTHIS (const Instruction &i);
2054 void extractOp(ALUOp *ret);
2055 bool checkOp(ALUOp op);
2056 void extractDest(Register *ret);
2057 bool checkDest(Register rd);
2058 void extractOp1(Register *ret);
2059 bool checkOp1(Register rn);
2060 Operand2 extractOp2();
2061 };
2063 class InstCMP : public InstALU
2064 {
2065 public:
2066 static bool isTHIS (const Instruction &i);
2067 static InstCMP *asTHIS (const Instruction &i);
2068 };
2070 class InstMOV : public InstALU
2071 {
2072 public:
2073 static bool isTHIS (const Instruction &i);
2074 static InstMOV *asTHIS (const Instruction &i);
2075 };
2078 class InstructionIterator {
2079 private:
2080 Instruction *i;
2081 public:
2082 InstructionIterator(Instruction *i_);
2083 Instruction *next() {
2084 i = i->next();
2085 return cur();
2086 }
2087 Instruction *cur() const {
2088 return i;
2089 }
2090 };
2092 static const uint32_t NumIntArgRegs = 4;
2093 static const uint32_t NumFloatArgRegs = 8;
2095 static inline bool
2096 GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
2097 {
2098 if (usedIntArgs >= NumIntArgRegs)
2099 return false;
2100 *out = Register::FromCode(usedIntArgs);
2101 return true;
2102 }
2104 // Get a register in which we plan to put a quantity that will be used as an
2105 // integer argument. This differs from GetIntArgReg in that if we have no more
2106 // actual argument registers to use we will fall back on using whatever
2107 // CallTempReg* don't overlap the argument registers, and only fail once those
2108 // run out too.
2109 static inline bool
2110 GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register *out)
2111 {
2112 if (GetIntArgReg(usedIntArgs, usedFloatArgs, out))
2113 return true;
2114 // Unfortunately, we have to assume things about the point at which
2115 // GetIntArgReg returns false, because we need to know how many registers it
2116 // can allocate.
2117 usedIntArgs -= NumIntArgRegs;
2118 if (usedIntArgs >= NumCallTempNonArgRegs)
2119 return false;
2120 *out = CallTempNonArgRegs[usedIntArgs];
2121 return true;
2122 }
2125 #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
2127 static inline uint32_t
2128 GetArgStackDisp(uint32_t arg)
2129 {
2130 JS_ASSERT(!useHardFpABI());
2131 JS_ASSERT(arg >= NumIntArgRegs);
2132 return (arg - NumIntArgRegs) * sizeof(intptr_t);
2133 }
2135 #endif
2138 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
2140 static inline bool
2141 GetFloatArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs, FloatRegister *out)
2142 {
2143 JS_ASSERT(useHardFpABI());
2144 if (usedFloatArgs >= NumFloatArgRegs)
2145 return false;
2146 *out = FloatRegister::FromCode(usedFloatArgs);
2147 return true;
2148 }
2150 static inline uint32_t
2151 GetIntArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
2152 {
2153 JS_ASSERT(useHardFpABI());
2154 JS_ASSERT(usedIntArgs >= NumIntArgRegs);
2155 uint32_t doubleSlots = Max(0, (int32_t)usedFloatArgs - (int32_t)NumFloatArgRegs);
2156 doubleSlots *= 2;
2157 int intSlots = usedIntArgs - NumIntArgRegs;
2158 return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
2159 }
2161 static inline uint32_t
2162 GetFloat32ArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
2163 {
2164 JS_ASSERT(useHardFpABI());
2165 JS_ASSERT(usedFloatArgs >= NumFloatArgRegs);
2166 uint32_t intSlots = 0;
2167 if (usedIntArgs > NumIntArgRegs)
2168 intSlots = usedIntArgs - NumIntArgRegs;
2169 uint32_t float32Slots = usedFloatArgs - NumFloatArgRegs;
2170 return (intSlots + float32Slots + *padding) * sizeof(intptr_t);
2171 }
2173 static inline uint32_t
2174 GetDoubleArgStackDisp(uint32_t usedIntArgs, uint32_t usedFloatArgs, uint32_t *padding)
2175 {
2176 JS_ASSERT(useHardFpABI());
2177 JS_ASSERT(usedFloatArgs >= NumFloatArgRegs);
2178 uint32_t intSlots = 0;
2179 if (usedIntArgs > NumIntArgRegs) {
2180 intSlots = usedIntArgs - NumIntArgRegs;
2181 // update the amount of padding required.
2182 *padding += (*padding + usedIntArgs) % 2;
2183 }
2184 uint32_t doubleSlots = usedFloatArgs - NumFloatArgRegs;
2185 doubleSlots *= 2;
2186 return (intSlots + doubleSlots + *padding) * sizeof(intptr_t);
2187 }
2189 #endif
2193 class DoubleEncoder {
2194 uint32_t rep(bool b, uint32_t count) {
2195 uint32_t ret = 0;
2196 for (uint32_t i = 0; i < count; i++)
2197 ret = (ret << 1) | b;
2198 return ret;
2199 }
2201 uint32_t encode(uint8_t value) {
2202 //ARM ARM "VFP modified immediate constants"
2203 // aBbbbbbb bbcdefgh 000...
2204 // we want to return the top 32 bits of the double
2205 // the rest are 0.
2206 bool a = value >> 7;
2207 bool b = value >> 6 & 1;
2208 bool B = !b;
2209 uint32_t cdefgh = value & 0x3f;
2210 return a << 31 |
2211 B << 30 |
2212 rep(b, 8) << 22 |
2213 cdefgh << 16;
2214 }
2216 struct DoubleEntry
2217 {
2218 uint32_t dblTop;
2219 datastore::Imm8VFPImmData data;
2221 DoubleEntry()
2222 : dblTop(-1)
2223 { }
2224 DoubleEntry(uint32_t dblTop_, datastore::Imm8VFPImmData data_)
2225 : dblTop(dblTop_), data(data_)
2226 { }
2227 };
2229 mozilla::Array<DoubleEntry, 256> table;
2231 public:
2232 DoubleEncoder()
2233 {
2234 for (int i = 0; i < 256; i++) {
2235 table[i] = DoubleEntry(encode(i), datastore::Imm8VFPImmData(i));
2236 }
2237 }
2239 bool lookup(uint32_t top, datastore::Imm8VFPImmData *ret) {
2240 for (int i = 0; i < 256; i++) {
2241 if (table[i].dblTop == top) {
2242 *ret = table[i].data;
2243 return true;
2244 }
2245 }
2246 return false;
2247 }
2248 };
2250 class AutoForbidPools {
2251 Assembler *masm_;
2252 public:
2253 AutoForbidPools(Assembler *masm) : masm_(masm) {
2254 masm_->enterNoPool();
2255 }
2256 ~AutoForbidPools() {
2257 masm_->leaveNoPool();
2258 }
2259 };
2261 } // namespace jit
2262 } // namespace js
2264 #endif /* jit_arm_Assembler_arm_h */