Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_arm_MacroAssembler_arm_h
8 #define jit_arm_MacroAssembler_arm_h
10 #include "mozilla/DebugOnly.h"
12 #include "jsopcode.h"
14 #include "jit/arm/Assembler-arm.h"
15 #include "jit/IonCaches.h"
16 #include "jit/IonFrames.h"
17 #include "jit/MoveResolver.h"
19 using mozilla::DebugOnly;
21 namespace js {
22 namespace jit {
24 static Register CallReg = ip;
25 static const int defaultShift = 3;
26 JS_STATIC_ASSERT(1 << defaultShift == sizeof(jsval));
28 // MacroAssemblerARM is inheriting form Assembler defined in Assembler-arm.{h,cpp}
29 class MacroAssemblerARM : public Assembler
30 {
31 protected:
32 // On ARM, some instructions require a second scratch register. This register
33 // defaults to lr, since it's non-allocatable (as it can be clobbered by some
34 // instructions). Allow the baseline compiler to override this though, since
35 // baseline IC stubs rely on lr holding the return address.
36 Register secondScratchReg_;
38 // higher level tag testing code
39 Operand ToPayload(Operand base) {
40 return Operand(Register::FromCode(base.base()), base.disp());
41 }
42 Address ToPayload(Address base) {
43 return ToPayload(Operand(base)).toAddress();
44 }
45 Operand ToType(Operand base) {
46 return Operand(Register::FromCode(base.base()), base.disp() + sizeof(void *));
47 }
48 Address ToType(Address base) {
49 return ToType(Operand(base)).toAddress();
50 }
52 public:
53 MacroAssemblerARM()
54 : secondScratchReg_(lr)
55 { }
57 void setSecondScratchReg(Register reg) {
58 JS_ASSERT(reg != ScratchRegister);
59 secondScratchReg_ = reg;
60 }
62 void convertBoolToInt32(Register source, Register dest);
63 void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
64 void convertInt32ToDouble(const Address &src, FloatRegister dest);
65 void convertUInt32ToFloat32(const Register &src, const FloatRegister &dest);
66 void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
67 void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest,
68 Condition c = Always);
69 void branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail);
70 void convertDoubleToInt32(const FloatRegister &src, const Register &dest, Label *fail,
71 bool negativeZeroCheck = true);
72 void convertFloat32ToInt32(const FloatRegister &src, const Register &dest, Label *fail,
73 bool negativeZeroCheck = true);
75 void convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest);
76 void branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail);
77 void convertInt32ToFloat32(const Register &src, const FloatRegister &dest);
78 void convertInt32ToFloat32(const Address &src, FloatRegister dest);
80 void addDouble(FloatRegister src, FloatRegister dest);
81 void subDouble(FloatRegister src, FloatRegister dest);
82 void mulDouble(FloatRegister src, FloatRegister dest);
83 void divDouble(FloatRegister src, FloatRegister dest);
85 void negateDouble(FloatRegister reg);
86 void inc64(AbsoluteAddress dest);
88 // somewhat direct wrappers for the low-level assembler funcitons
89 // bitops
90 // attempt to encode a virtual alu instruction using
91 // two real instructions.
92 private:
93 bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op,
94 SetCond_ sc, Condition c);
96 public:
97 void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
98 SetCond_ sc = NoSetCond, Condition c = Always);
99 void ma_alu(Register src1, Imm32 imm, Register dest,
100 ALUOp op,
101 SetCond_ sc = NoSetCond, Condition c = Always);
103 void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
104 SetCond_ sc = NoSetCond, Condition c = Always);
105 void ma_nop();
106 void ma_movPatchable(Imm32 imm, Register dest, Assembler::Condition c,
107 RelocStyle rs, Instruction *i = nullptr);
108 void ma_movPatchable(ImmPtr imm, Register dest, Assembler::Condition c,
109 RelocStyle rs, Instruction *i = nullptr);
110 // These should likely be wrapped up as a set of macros
111 // or something like that. I cannot think of a good reason
112 // to explicitly have all of this code.
113 // ALU based ops
114 // mov
115 void ma_mov(Register src, Register dest,
116 SetCond_ sc = NoSetCond, Condition c = Always);
118 void ma_mov(Imm32 imm, Register dest,
119 SetCond_ sc = NoSetCond, Condition c = Always);
120 void ma_mov(ImmWord imm, Register dest,
121 SetCond_ sc = NoSetCond, Condition c = Always);
123 void ma_mov(const ImmGCPtr &ptr, Register dest);
125 // Shifts (just a move with a shifting op2)
126 void ma_lsl(Imm32 shift, Register src, Register dst);
127 void ma_lsr(Imm32 shift, Register src, Register dst);
128 void ma_asr(Imm32 shift, Register src, Register dst);
129 void ma_ror(Imm32 shift, Register src, Register dst);
130 void ma_rol(Imm32 shift, Register src, Register dst);
131 // Shifts (just a move with a shifting op2)
132 void ma_lsl(Register shift, Register src, Register dst);
133 void ma_lsr(Register shift, Register src, Register dst);
134 void ma_asr(Register shift, Register src, Register dst);
135 void ma_ror(Register shift, Register src, Register dst);
136 void ma_rol(Register shift, Register src, Register dst);
138 // Move not (dest <- ~src)
139 void ma_mvn(Imm32 imm, Register dest,
140 SetCond_ sc = NoSetCond, Condition c = Always);
143 void ma_mvn(Register src1, Register dest,
144 SetCond_ sc = NoSetCond, Condition c = Always);
146 // Negate (dest <- -src) implemented as rsb dest, src, 0
147 void ma_neg(Register src, Register dest,
148 SetCond_ sc = NoSetCond, Condition c = Always);
150 // and
151 void ma_and(Register src, Register dest,
152 SetCond_ sc = NoSetCond, Condition c = Always);
154 void ma_and(Register src1, Register src2, Register dest,
155 SetCond_ sc = NoSetCond, Condition c = Always);
157 void ma_and(Imm32 imm, Register dest,
158 SetCond_ sc = NoSetCond, Condition c = Always);
160 void ma_and(Imm32 imm, Register src1, Register dest,
161 SetCond_ sc = NoSetCond, Condition c = Always);
165 // bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
166 void ma_bic(Imm32 imm, Register dest,
167 SetCond_ sc = NoSetCond, Condition c = Always);
169 // exclusive or
170 void ma_eor(Register src, Register dest,
171 SetCond_ sc = NoSetCond, Condition c = Always);
173 void ma_eor(Register src1, Register src2, Register dest,
174 SetCond_ sc = NoSetCond, Condition c = Always);
176 void ma_eor(Imm32 imm, Register dest,
177 SetCond_ sc = NoSetCond, Condition c = Always);
179 void ma_eor(Imm32 imm, Register src1, Register dest,
180 SetCond_ sc = NoSetCond, Condition c = Always);
183 // or
184 void ma_orr(Register src, Register dest,
185 SetCond_ sc = NoSetCond, Condition c = Always);
187 void ma_orr(Register src1, Register src2, Register dest,
188 SetCond_ sc = NoSetCond, Condition c = Always);
190 void ma_orr(Imm32 imm, Register dest,
191 SetCond_ sc = NoSetCond, Condition c = Always);
193 void ma_orr(Imm32 imm, Register src1, Register dest,
194 SetCond_ sc = NoSetCond, Condition c = Always);
197 // arithmetic based ops
198 // add with carry
199 void ma_adc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
200 void ma_adc(Register src, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
201 void ma_adc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
203 // add
204 void ma_add(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
205 void ma_add(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
206 void ma_add(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
207 void ma_add(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
208 void ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
210 // subtract with carry
211 void ma_sbc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
212 void ma_sbc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
213 void ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
215 // subtract
216 void ma_sub(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
217 void ma_sub(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
218 void ma_sub(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
219 void ma_sub(Register src1, Operand op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
220 void ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
222 // reverse subtract
223 void ma_rsb(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
224 void ma_rsb(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
225 void ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
226 void ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
228 // reverse subtract with carry
229 void ma_rsc(Imm32 imm, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
230 void ma_rsc(Register src1, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
231 void ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc = NoSetCond, Condition c = Always);
233 // compares/tests
234 // compare negative (sets condition codes as src1 + src2 would)
235 void ma_cmn(Register src1, Imm32 imm, Condition c = Always);
236 void ma_cmn(Register src1, Register src2, Condition c = Always);
237 void ma_cmn(Register src1, Operand op, Condition c = Always);
239 // compare (src - src2)
240 void ma_cmp(Register src1, Imm32 imm, Condition c = Always);
241 void ma_cmp(Register src1, ImmWord ptr, Condition c = Always);
242 void ma_cmp(Register src1, ImmGCPtr ptr, Condition c = Always);
243 void ma_cmp(Register src1, Operand op, Condition c = Always);
244 void ma_cmp(Register src1, Register src2, Condition c = Always);
247 // test for equality, (src1^src2)
248 void ma_teq(Register src1, Imm32 imm, Condition c = Always);
249 void ma_teq(Register src1, Register src2, Condition c = Always);
250 void ma_teq(Register src1, Operand op, Condition c = Always);
253 // test (src1 & src2)
254 void ma_tst(Register src1, Imm32 imm, Condition c = Always);
255 void ma_tst(Register src1, Register src2, Condition c = Always);
256 void ma_tst(Register src1, Operand op, Condition c = Always);
258 // multiplies. For now, there are only two that we care about.
259 void ma_mul(Register src1, Register src2, Register dest);
260 void ma_mul(Register src1, Imm32 imm, Register dest);
261 Condition ma_check_mul(Register src1, Register src2, Register dest, Condition cond);
262 Condition ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond);
264 // fast mod, uses scratch registers, and thus needs to be in the assembler
265 // implicitly assumes that we can overwrite dest at the beginning of the sequence
266 void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
267 int32_t shift);
269 // mod, depends on integer divide instructions being supported
270 void ma_smod(Register num, Register div, Register dest);
271 void ma_umod(Register num, Register div, Register dest);
273 // division, depends on integer divide instructions being supported
274 void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always);
275 void ma_udiv(Register num, Register div, Register dest, Condition cond = Always);
277 // memory
278 // shortcut for when we know we're transferring 32 bits of data
279 void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
280 Index mode = Offset, Condition cc = Always);
282 void ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
283 Index mode = Offset, Condition cc = Always);
286 void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
287 void ma_str(Register rt, const Operand &addr, Index mode = Offset, Condition cc = Always);
288 void ma_dtr(LoadStore ls, Register rt, const Operand &addr, Index mode, Condition cc);
290 void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
291 void ma_ldr(const Operand &addr, Register rt, Index mode = Offset, Condition cc = Always);
293 void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
294 void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
295 void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
296 void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
297 void ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode = Offset, Condition cc = Always);
298 void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
299 void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
300 void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
301 // specialty for moving N bits of data, where n == 8,16,32,64
302 BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
303 Register rn, Register rm, Register rt,
304 Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
306 BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
307 Register rn, Imm32 offset, Register rt,
308 Index mode = Offset, Condition cc = Always);
309 void ma_pop(Register r);
310 void ma_push(Register r);
312 void ma_vpop(VFPRegister r);
313 void ma_vpush(VFPRegister r);
315 // branches when done from within arm-specific code
316 BufferOffset ma_b(Label *dest, Condition c = Always, bool isPatchable = false);
317 void ma_bx(Register dest, Condition c = Always);
319 void ma_b(void *target, Relocation::Kind reloc, Condition c = Always);
321 // this is almost NEVER necessary, we'll basically never be calling a label
322 // except, possibly in the crazy bailout-table case.
323 void ma_bl(Label *dest, Condition c = Always);
325 void ma_blx(Register dest, Condition c = Always);
327 //VFP/ALU
328 void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
329 void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
331 void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
332 void ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst);
334 void ma_vneg(FloatRegister src, FloatRegister dest, Condition cc = Always);
335 void ma_vmov(FloatRegister src, FloatRegister dest, Condition cc = Always);
336 void ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
337 void ma_vabs(FloatRegister src, FloatRegister dest, Condition cc = Always);
338 void ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
340 void ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc = Always);
341 void ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
343 void ma_vimm(double value, FloatRegister dest, Condition cc = Always);
344 void ma_vimm_f32(float value, FloatRegister dest, Condition cc = Always);
346 void ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc = Always);
347 void ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc = Always);
348 void ma_vcmpz(FloatRegister src1, Condition cc = Always);
349 void ma_vcmpz_f32(FloatRegister src1, Condition cc = Always);
351 void ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
352 void ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
354 void ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
355 void ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
357 void ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc = Always);
359 // source is F64, dest is I32
360 void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
361 void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
363 // source is I32, dest is F64
364 void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
365 void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest, Condition cc = Always);
367 // source is F32, dest is I32
368 void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc = Always);
369 void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc = Always);
371 // source is I32, dest is F32
372 void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
373 void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest, Condition cc = Always);
375 void ma_vxfer(FloatRegister src, Register dest, Condition cc = Always);
376 void ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc = Always);
378 void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
379 void ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc = Always);
381 void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
383 BufferOffset ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister dest, Condition cc = Always);
386 BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
387 BufferOffset ma_vldr(const Operand &addr, VFPRegister dest, Condition cc = Always);
388 BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
390 BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
391 BufferOffset ma_vstr(VFPRegister src, const Operand &addr, Condition cc = Always);
393 BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
394 // calls an Ion function, assumes that the stack is untouched (8 byte alinged)
395 void ma_callIon(const Register reg);
396 // callso an Ion function, assuming that sp has already been decremented
397 void ma_callIonNoPush(const Register reg);
398 // calls an ion function, assuming that the stack is currently not 8 byte aligned
399 void ma_callIonHalfPush(const Register reg);
401 void ma_call(ImmPtr dest);
403 // calls reg, storing the return address into sp[0]
404 void ma_callAndStoreRet(const Register reg, uint32_t stackArgBytes);
406 // Float registers can only be loaded/stored in continuous runs
407 // when using vstm/vldm.
408 // This function breaks set into continuous runs and loads/stores
409 // them at [rm]. rm will be modified and left in a state logically
410 // suitable for the next load/store.
411 // Returns the offset from [dm] for the logical next load/store.
412 int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
413 Register rm, DTMMode mode)
414 {
415 if (mode == IA) {
416 return transferMultipleByRunsImpl
417 <FloatRegisterForwardIterator>(set, ls, rm, mode, 1);
418 }
419 if (mode == DB) {
420 return transferMultipleByRunsImpl
421 <FloatRegisterBackwardIterator>(set, ls, rm, mode, -1);
422 }
423 MOZ_ASSUME_UNREACHABLE("Invalid data transfer addressing mode");
424 }
426 private:
427 // Implementation for transferMultipleByRuns so we can use different
428 // iterators for forward/backward traversals.
429 // The sign argument should be 1 if we traverse forwards, -1 if we
430 // traverse backwards.
431 template<typename RegisterIterator> int32_t
432 transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
433 Register rm, DTMMode mode, int32_t sign)
434 {
435 JS_ASSERT(sign == 1 || sign == -1);
437 int32_t delta = sign * sizeof(double);
438 int32_t offset = 0;
439 RegisterIterator iter(set);
440 while (iter.more()) {
441 startFloatTransferM(ls, rm, mode, WriteBack);
442 int32_t reg = (*iter).code_;
443 do {
444 offset += delta;
445 transferFloatReg(*iter);
446 } while ((++iter).more() && (*iter).code_ == (reg += sign));
447 finishFloatTransfer();
448 }
450 JS_ASSERT(offset == static_cast<int32_t>(set.size() * sizeof(double)) * sign);
451 return offset;
452 }
453 };
455 class MacroAssemblerARMCompat : public MacroAssemblerARM
456 {
457 // Number of bytes the stack is adjusted inside a call to C. Calls to C may
458 // not be nested.
459 bool inCall_;
460 uint32_t args_;
461 // The actual number of arguments that were passed, used to assert that
462 // the initial number of arguments declared was correct.
463 uint32_t passedArgs_;
464 uint32_t passedArgTypes_;
466 // ARM treats arguments as a vector in registers/memory, that looks like:
467 // { r0, r1, r2, r3, [sp], [sp,+4], [sp,+8] ... }
468 // usedIntSlots_ keeps track of how many of these have been used.
469 // It bears a passing resemblance to passedArgs_, but a single argument
470 // can effectively use between one and three slots depending on its size and
471 // alignment requirements
472 uint32_t usedIntSlots_;
473 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR)
474 uint32_t usedFloatSlots_;
475 bool usedFloat32_;
476 uint32_t padding_;
477 #endif
478 bool dynamicAlignment_;
480 bool enoughMemory_;
482 // Used to work around the move resolver's lack of support for
483 // moving into register pairs, which the softfp ABI needs.
484 mozilla::Array<MoveOperand, 2> floatArgsInGPR;
485 mozilla::Array<bool, 2> floatArgsInGPRValid;
487 // Compute space needed for the function call and set the properties of the
488 // callee. It returns the space which has to be allocated for calling the
489 // function.
490 //
491 // arg Number of arguments of the function.
492 void setupABICall(uint32_t arg);
494 protected:
495 MoveResolver moveResolver_;
497 // Extra bytes currently pushed onto the frame beyond frameDepth_. This is
498 // needed to compute offsets to stack slots while temporary space has been
499 // reserved for unexpected spills or C++ function calls. It is maintained
500 // by functions which track stack alignment, which for clear distinction
501 // use StudlyCaps (for example, Push, Pop).
502 uint32_t framePushed_;
503 void adjustFrame(int value) {
504 setFramePushed(framePushed_ + value);
505 }
506 public:
507 MacroAssemblerARMCompat()
508 : inCall_(false),
509 enoughMemory_(true),
510 framePushed_(0)
511 { }
512 bool oom() const {
513 return Assembler::oom() || !enoughMemory_;
514 }
516 public:
517 using MacroAssemblerARM::call;
519 // jumps + other functions that should be called from
520 // non-arm specific code...
521 // basically, an x86 front end on top of the ARM code.
522 void j(Condition code , Label *dest)
523 {
524 as_b(dest, code);
525 }
526 void j(Label *dest)
527 {
528 as_b(dest, Always);
529 }
531 void mov(Register src, Register dest) {
532 ma_mov(src, dest);
533 }
534 void mov(ImmWord imm, Register dest) {
535 ma_mov(Imm32(imm.value), dest);
536 }
537 void mov(ImmPtr imm, Register dest) {
538 mov(ImmWord(uintptr_t(imm.value)), dest);
539 }
540 void mov(Register src, Address dest) {
541 MOZ_ASSUME_UNREACHABLE("NYI-IC");
542 }
543 void mov(Address src, Register dest) {
544 MOZ_ASSUME_UNREACHABLE("NYI-IC");
545 }
547 void call(const Register reg) {
548 as_blx(reg);
549 }
550 void call(Label *label) {
551 // for now, assume that it'll be nearby?
552 as_bl(label, Always);
553 }
554 void call(ImmWord imm) {
555 call(ImmPtr((void*)imm.value));
556 }
557 void call(ImmPtr imm) {
558 BufferOffset bo = m_buffer.nextOffset();
559 addPendingJump(bo, imm, Relocation::HARDCODED);
560 ma_call(imm);
561 }
562 void call(AsmJSImmPtr imm) {
563 movePtr(imm, CallReg);
564 call(CallReg);
565 }
566 void call(JitCode *c) {
567 BufferOffset bo = m_buffer.nextOffset();
568 addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
569 RelocStyle rs;
570 if (hasMOVWT())
571 rs = L_MOVWT;
572 else
573 rs = L_LDR;
575 ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
576 ma_callIonHalfPush(ScratchRegister);
577 }
579 void appendCallSite(const CallSiteDesc &desc) {
580 enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_));
581 }
583 void call(const CallSiteDesc &desc, const Register reg) {
584 call(reg);
585 appendCallSite(desc);
586 }
587 void call(const CallSiteDesc &desc, Label *label) {
588 call(label);
589 appendCallSite(desc);
590 }
591 void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
592 call(imm);
593 appendCallSite(desc);
594 }
595 void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) {
596 movePtr(imm, CallReg);
597 ma_callAndStoreRet(CallReg, stackArgBytes);
598 appendCallSite(CallSiteDesc::Exit());
599 }
600 void callIonFromAsmJS(const Register reg) {
601 ma_callIonNoPush(reg);
602 appendCallSite(CallSiteDesc::Exit());
604 // The Ion ABI has the callee pop the return address off the stack.
605 // The asm.js caller assumes that the call leaves sp unchanged, so bump
606 // the stack.
607 subPtr(Imm32(sizeof(void*)), sp);
608 }
610 void branch(JitCode *c) {
611 BufferOffset bo = m_buffer.nextOffset();
612 addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
613 RelocStyle rs;
614 if (hasMOVWT())
615 rs = L_MOVWT;
616 else
617 rs = L_LDR;
619 ma_movPatchable(ImmPtr(c->raw()), ScratchRegister, Always, rs);
620 ma_bx(ScratchRegister);
621 }
622 void branch(const Register reg) {
623 ma_bx(reg);
624 }
625 void nop() {
626 ma_nop();
627 }
628 void ret() {
629 ma_pop(pc);
630 m_buffer.markGuard();
631 }
632 void retn(Imm32 n) {
633 // pc <- [sp]; sp += n
634 ma_dtr(IsLoad, sp, n, pc, PostIndex);
635 m_buffer.markGuard();
636 }
637 void push(Imm32 imm) {
638 ma_mov(imm, ScratchRegister);
639 ma_push(ScratchRegister);
640 }
641 void push(ImmWord imm) {
642 push(Imm32(imm.value));
643 }
644 void push(ImmGCPtr imm) {
645 ma_mov(imm, ScratchRegister);
646 ma_push(ScratchRegister);
647 }
648 void push(const Address &address) {
649 ma_ldr(Operand(address.base, address.offset), ScratchRegister);
650 ma_push(ScratchRegister);
651 }
652 void push(const Register ®) {
653 ma_push(reg);
654 }
655 void push(const FloatRegister ®) {
656 ma_vpush(VFPRegister(reg));
657 }
658 void pushWithPadding(const Register ®, const Imm32 extraSpace) {
659 Imm32 totSpace = Imm32(extraSpace.value + 4);
660 ma_dtr(IsStore, sp, totSpace, reg, PreIndex);
661 }
662 void pushWithPadding(const Imm32 &imm, const Imm32 extraSpace) {
663 Imm32 totSpace = Imm32(extraSpace.value + 4);
664 // ma_dtr may need the scratch register to adjust the stack, so use the
665 // second scratch register.
666 ma_mov(imm, secondScratchReg_);
667 ma_dtr(IsStore, sp, totSpace, secondScratchReg_, PreIndex);
668 }
670 void pop(const Register ®) {
671 ma_pop(reg);
672 }
673 void pop(const FloatRegister ®) {
674 ma_vpop(VFPRegister(reg));
675 }
677 void popN(const Register ®, Imm32 extraSpace) {
678 Imm32 totSpace = Imm32(extraSpace.value + 4);
679 ma_dtr(IsLoad, sp, totSpace, reg, PostIndex);
680 }
682 CodeOffsetLabel toggledJump(Label *label);
684 // Emit a BLX or NOP instruction. ToggleCall can be used to patch
685 // this instruction.
686 CodeOffsetLabel toggledCall(JitCode *target, bool enabled);
688 static size_t ToggledCallSize() {
689 if (hasMOVWT())
690 // Size of a movw, movt, nop/blx instruction.
691 return 12;
692 // Size of a ldr, nop/blx instruction
693 return 8;
694 }
696 CodeOffsetLabel pushWithPatch(ImmWord imm) {
697 CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
698 ma_push(ScratchRegister);
699 return label;
700 }
702 CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
703 CodeOffsetLabel label = currentOffset();
704 ma_movPatchable(Imm32(imm.value), dest, Always, hasMOVWT() ? L_MOVWT : L_LDR);
705 return label;
706 }
707 CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
708 return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
709 }
711 void jump(Label *label) {
712 as_b(label);
713 }
714 void jump(Register reg) {
715 ma_bx(reg);
716 }
717 void jump(const Address &address) {
718 ma_ldr(Operand(address.base, address.offset), ScratchRegister);
719 ma_bx(ScratchRegister);
720 }
722 void neg32(Register reg) {
723 ma_neg(reg, reg, SetCond);
724 }
725 void negl(Register reg) {
726 ma_neg(reg, reg, SetCond);
727 }
728 void test32(Register lhs, Register rhs) {
729 ma_tst(lhs, rhs);
730 }
731 void test32(Register lhs, Imm32 imm) {
732 ma_tst(lhs, imm);
733 }
734 void test32(const Address &address, Imm32 imm) {
735 ma_ldr(Operand(address.base, address.offset), ScratchRegister);
736 ma_tst(ScratchRegister, imm);
737 }
738 void testPtr(Register lhs, Register rhs) {
739 test32(lhs, rhs);
740 }
742 // Returns the register containing the type tag.
743 Register splitTagForTest(const ValueOperand &value) {
744 return value.typeReg();
745 }
747 // higher level tag testing code
748 Condition testInt32(Condition cond, const ValueOperand &value);
749 Condition testBoolean(Condition cond, const ValueOperand &value);
750 Condition testDouble(Condition cond, const ValueOperand &value);
751 Condition testNull(Condition cond, const ValueOperand &value);
752 Condition testUndefined(Condition cond, const ValueOperand &value);
753 Condition testString(Condition cond, const ValueOperand &value);
754 Condition testObject(Condition cond, const ValueOperand &value);
755 Condition testNumber(Condition cond, const ValueOperand &value);
756 Condition testMagic(Condition cond, const ValueOperand &value);
758 Condition testPrimitive(Condition cond, const ValueOperand &value);
760 // register-based tests
761 Condition testInt32(Condition cond, const Register &tag);
762 Condition testBoolean(Condition cond, const Register &tag);
763 Condition testNull(Condition cond, const Register &tag);
764 Condition testUndefined(Condition cond, const Register &tag);
765 Condition testString(Condition cond, const Register &tag);
766 Condition testObject(Condition cond, const Register &tag);
767 Condition testDouble(Condition cond, const Register &tag);
768 Condition testNumber(Condition cond, const Register &tag);
769 Condition testMagic(Condition cond, const Register &tag);
770 Condition testPrimitive(Condition cond, const Register &tag);
772 Condition testGCThing(Condition cond, const Address &address);
773 Condition testMagic(Condition cond, const Address &address);
774 Condition testInt32(Condition cond, const Address &address);
775 Condition testDouble(Condition cond, const Address &address);
776 Condition testBoolean(Condition cond, const Address &address);
777 Condition testNull(Condition cond, const Address &address);
778 Condition testUndefined(Condition cond, const Address &address);
779 Condition testString(Condition cond, const Address &address);
780 Condition testObject(Condition cond, const Address &address);
781 Condition testNumber(Condition cond, const Address &address);
783 Condition testUndefined(Condition cond, const BaseIndex &src);
784 Condition testNull(Condition cond, const BaseIndex &src);
785 Condition testBoolean(Condition cond, const BaseIndex &src);
786 Condition testString(Condition cond, const BaseIndex &src);
787 Condition testInt32(Condition cond, const BaseIndex &src);
788 Condition testObject(Condition cond, const BaseIndex &src);
789 Condition testDouble(Condition cond, const BaseIndex &src);
790 Condition testMagic(Condition cond, const BaseIndex &src);
791 Condition testGCThing(Condition cond, const BaseIndex &src);
793 template <typename T>
794 void branchTestGCThing(Condition cond, const T &t, Label *label) {
795 Condition c = testGCThing(cond, t);
796 ma_b(label, c);
797 }
798 template <typename T>
799 void branchTestPrimitive(Condition cond, const T &t, Label *label) {
800 Condition c = testPrimitive(cond, t);
801 ma_b(label, c);
802 }
804 void branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label);
805 void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value,
806 Label *label);
808 // unboxing code
809 void unboxInt32(const ValueOperand &operand, const Register &dest);
810 void unboxInt32(const Address &src, const Register &dest);
811 void unboxBoolean(const ValueOperand &operand, const Register &dest);
812 void unboxBoolean(const Address &src, const Register &dest);
813 void unboxDouble(const ValueOperand &operand, const FloatRegister &dest);
814 void unboxDouble(const Address &src, const FloatRegister &dest);
815 void unboxString(const ValueOperand &operand, const Register &dest);
816 void unboxString(const Address &src, const Register &dest);
817 void unboxObject(const ValueOperand &src, const Register &dest);
818 void unboxValue(const ValueOperand &src, AnyRegister dest);
819 void unboxPrivate(const ValueOperand &src, Register dest);
821 void notBoolean(const ValueOperand &val) {
822 ma_eor(Imm32(1), val.payloadReg());
823 }
825 // boxing code
826 void boxDouble(const FloatRegister &src, const ValueOperand &dest);
827 void boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest);
829 // Extended unboxing API. If the payload is already in a register, returns
830 // that register. Otherwise, provides a move to the given scratch register,
831 // and returns that.
832 Register extractObject(const Address &address, Register scratch);
833 Register extractObject(const ValueOperand &value, Register scratch) {
834 return value.payloadReg();
835 }
836 Register extractInt32(const ValueOperand &value, Register scratch) {
837 return value.payloadReg();
838 }
839 Register extractBoolean(const ValueOperand &value, Register scratch) {
840 return value.payloadReg();
841 }
842 Register extractTag(const Address &address, Register scratch);
843 Register extractTag(const BaseIndex &address, Register scratch);
844 Register extractTag(const ValueOperand &value, Register scratch) {
845 return value.typeReg();
846 }
848 void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
849 void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
850 void loadInt32OrDouble(const Operand &src, const FloatRegister &dest);
851 void loadInt32OrDouble(Register base, Register index,
852 const FloatRegister &dest, int32_t shift = defaultShift);
853 void loadConstantDouble(double dp, const FloatRegister &dest);
854 // treat the value as a boolean, and set condition codes accordingly
855 Condition testInt32Truthy(bool truthy, const ValueOperand &operand);
856 Condition testBooleanTruthy(bool truthy, const ValueOperand &operand);
857 Condition testDoubleTruthy(bool truthy, const FloatRegister ®);
858 Condition testStringTruthy(bool truthy, const ValueOperand &value);
860 void boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
861 void int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
862 void loadConstantFloat32(float f, const FloatRegister &dest);
864 template<typename T>
865 void branchTestInt32(Condition cond, const T & t, Label *label) {
866 Condition c = testInt32(cond, t);
867 ma_b(label, c);
868 }
869 template<typename T>
870 void branchTestBoolean(Condition cond, const T & t, Label *label) {
871 Condition c = testBoolean(cond, t);
872 ma_b(label, c);
873 }
874 void branch32(Condition cond, Register lhs, Register rhs, Label *label) {
875 ma_cmp(lhs, rhs);
876 ma_b(label, cond);
877 }
878 void branch32(Condition cond, Register lhs, Imm32 imm, Label *label) {
879 ma_cmp(lhs, imm);
880 ma_b(label, cond);
881 }
882 void branch32(Condition cond, const Operand &lhs, Register rhs, Label *label) {
883 if (lhs.getTag() == Operand::OP2) {
884 branch32(cond, lhs.toReg(), rhs, label);
885 } else {
886 ma_ldr(lhs, ScratchRegister);
887 branch32(cond, ScratchRegister, rhs, label);
888 }
889 }
890 void branch32(Condition cond, const Operand &lhs, Imm32 rhs, Label *label) {
891 if (lhs.getTag() == Operand::OP2) {
892 branch32(cond, lhs.toReg(), rhs, label);
893 } else {
894 // branch32 will use ScratchRegister.
895 ma_ldr(lhs, secondScratchReg_);
896 branch32(cond, secondScratchReg_, rhs, label);
897 }
898 }
899 void branch32(Condition cond, const Address &lhs, Register rhs, Label *label) {
900 load32(lhs, ScratchRegister);
901 branch32(cond, ScratchRegister, rhs, label);
902 }
903 void branch32(Condition cond, const Address &lhs, Imm32 rhs, Label *label) {
904 // branch32 will use ScratchRegister.
905 load32(lhs, secondScratchReg_);
906 branch32(cond, secondScratchReg_, rhs, label);
907 }
908 void branchPtr(Condition cond, const Address &lhs, Register rhs, Label *label) {
909 branch32(cond, lhs, rhs, label);
910 }
912 void branchPrivatePtr(Condition cond, const Address &lhs, ImmPtr ptr, Label *label) {
913 branchPtr(cond, lhs, ptr, label);
914 }
916 void branchPrivatePtr(Condition cond, const Address &lhs, Register ptr, Label *label) {
917 branchPtr(cond, lhs, ptr, label);
918 }
920 void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label *label) {
921 branchPtr(cond, lhs, ptr, label);
922 }
924 template<typename T>
925 void branchTestDouble(Condition cond, const T & t, Label *label) {
926 Condition c = testDouble(cond, t);
927 ma_b(label, c);
928 }
929 template<typename T>
930 void branchTestNull(Condition cond, const T & t, Label *label) {
931 Condition c = testNull(cond, t);
932 ma_b(label, c);
933 }
934 template<typename T>
935 void branchTestObject(Condition cond, const T & t, Label *label) {
936 Condition c = testObject(cond, t);
937 ma_b(label, c);
938 }
939 template<typename T>
940 void branchTestString(Condition cond, const T & t, Label *label) {
941 Condition c = testString(cond, t);
942 ma_b(label, c);
943 }
944 template<typename T>
945 void branchTestUndefined(Condition cond, const T & t, Label *label) {
946 Condition c = testUndefined(cond, t);
947 ma_b(label, c);
948 }
949 template <typename T>
950 void branchTestNumber(Condition cond, const T &t, Label *label) {
951 cond = testNumber(cond, t);
952 ma_b(label, cond);
953 }
954 template <typename T>
955 void branchTestMagic(Condition cond, const T &t, Label *label) {
956 cond = testMagic(cond, t);
957 ma_b(label, cond);
958 }
959 void branchTestMagicValue(Condition cond, const ValueOperand &val, JSWhyMagic why,
960 Label *label) {
961 JS_ASSERT(cond == Equal || cond == NotEqual);
962 // Test for magic
963 Label notmagic;
964 Condition testCond = testMagic(cond, val);
965 ma_b(¬magic, InvertCondition(testCond));
966 // Test magic value
967 branch32(cond, val.payloadReg(), Imm32(static_cast<int32_t>(why)), label);
968 bind(¬magic);
969 }
970 void branchTestInt32Truthy(bool truthy, const ValueOperand &operand, Label *label) {
971 Condition c = testInt32Truthy(truthy, operand);
972 ma_b(label, c);
973 }
974 void branchTestBooleanTruthy(bool truthy, const ValueOperand &operand, Label *label) {
975 Condition c = testBooleanTruthy(truthy, operand);
976 ma_b(label, c);
977 }
978 void branchTestDoubleTruthy(bool truthy, const FloatRegister ®, Label *label) {
979 Condition c = testDoubleTruthy(truthy, reg);
980 ma_b(label, c);
981 }
982 void branchTestStringTruthy(bool truthy, const ValueOperand &value, Label *label) {
983 Condition c = testStringTruthy(truthy, value);
984 ma_b(label, c);
985 }
986 void branchTest32(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
987 // x86 likes test foo, foo rather than cmp foo, #0.
988 // Convert the former into the latter.
989 if (lhs == rhs && (cond == Zero || cond == NonZero))
990 ma_cmp(lhs, Imm32(0));
991 else
992 ma_tst(lhs, rhs);
993 ma_b(label, cond);
994 }
995 void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
996 ma_tst(lhs, imm);
997 ma_b(label, cond);
998 }
999 void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
1000 // branchTest32 will use ScratchRegister.
1001 load32(address, secondScratchReg_);
1002 branchTest32(cond, secondScratchReg_, imm, label);
1003 }
1004 void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
1005 branchTest32(cond, lhs, rhs, label);
1006 }
1007 void branchTestPtr(Condition cond, const Register &lhs, const Imm32 rhs, Label *label) {
1008 branchTest32(cond, lhs, rhs, label);
1009 }
1010 void branchTestPtr(Condition cond, const Address &lhs, Imm32 imm, Label *label) {
1011 branchTest32(cond, lhs, imm, label);
1012 }
1013 void branchPtr(Condition cond, Register lhs, Register rhs, Label *label) {
1014 branch32(cond, lhs, rhs, label);
1015 }
1016 void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
1017 movePtr(ptr, ScratchRegister);
1018 branchPtr(cond, lhs, ScratchRegister, label);
1019 }
1020 void branchPtr(Condition cond, Register lhs, ImmWord imm, Label *label) {
1021 branch32(cond, lhs, Imm32(imm.value), label);
1022 }
1023 void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label *label) {
1024 branchPtr(cond, lhs, ImmWord(uintptr_t(imm.value)), label);
1025 }
1026 void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label *label) {
1027 movePtr(imm, ScratchRegister);
1028 branchPtr(cond, lhs, ScratchRegister, label);
1029 }
1030 void branchPtr(Condition cond, Register lhs, Imm32 imm, Label *label) {
1031 branch32(cond, lhs, imm, label);
1032 }
1033 void decBranchPtr(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
1034 subPtr(imm, lhs);
1035 branch32(cond, lhs, Imm32(0), label);
1036 }
1037 void moveValue(const Value &val, Register type, Register data);
1039 CodeOffsetJump jumpWithPatch(RepatchLabel *label, Condition cond = Always);
1040 template <typename T>
1041 CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel *label) {
1042 ma_cmp(reg, ptr);
1043 return jumpWithPatch(label, cond);
1044 }
1045 template <typename T>
1046 CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel *label) {
1047 ma_ldr(addr, secondScratchReg_);
1048 ma_cmp(secondScratchReg_, ptr);
1049 return jumpWithPatch(label, cond);
1050 }
1051 void branchPtr(Condition cond, Address addr, ImmGCPtr ptr, Label *label) {
1052 ma_ldr(addr, secondScratchReg_);
1053 ma_cmp(secondScratchReg_, ptr);
1054 ma_b(label, cond);
1055 }
1056 void branchPtr(Condition cond, Address addr, ImmWord ptr, Label *label) {
1057 ma_ldr(addr, secondScratchReg_);
1058 ma_cmp(secondScratchReg_, ptr);
1059 ma_b(label, cond);
1060 }
1061 void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label *label) {
1062 branchPtr(cond, addr, ImmWord(uintptr_t(ptr.value)), label);
1063 }
1064 void branchPtr(Condition cond, const AbsoluteAddress &addr, const Register &ptr, Label *label) {
1065 loadPtr(addr, ScratchRegister);
1066 ma_cmp(ScratchRegister, ptr);
1067 ma_b(label, cond);
1068 }
1069 void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr, Label *label) {
1070 loadPtr(addr, ScratchRegister);
1071 ma_cmp(ScratchRegister, ptr);
1072 ma_b(label, cond);
1073 }
1074 void branch32(Condition cond, const AbsoluteAddress &lhs, Imm32 rhs, Label *label) {
1075 loadPtr(lhs, secondScratchReg_); // ma_cmp will use the scratch register.
1076 ma_cmp(secondScratchReg_, rhs);
1077 ma_b(label, cond);
1078 }
1079 void branch32(Condition cond, const AbsoluteAddress &lhs, const Register &rhs, Label *label) {
1080 loadPtr(lhs, secondScratchReg_); // ma_cmp will use the scratch register.
1081 ma_cmp(secondScratchReg_, rhs);
1082 ma_b(label, cond);
1083 }
1085 void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
1086 if (dest.isFloat())
1087 loadInt32OrDouble(Operand(address), dest.fpu());
1088 else
1089 ma_ldr(address, dest.gpr());
1090 }
1092 void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
1093 if (dest.isFloat())
1094 loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
1095 else
1096 load32(address, dest.gpr());
1097 }
1099 void moveValue(const Value &val, const ValueOperand &dest);
1101 void moveValue(const ValueOperand &src, const ValueOperand &dest) {
1102 Register s0 = src.typeReg(), d0 = dest.typeReg(),
1103 s1 = src.payloadReg(), d1 = dest.payloadReg();
1105 // Either one or both of the source registers could be the same as a
1106 // destination register.
1107 if (s1 == d0) {
1108 if (s0 == d1) {
1109 // If both are, this is just a swap of two registers.
1110 JS_ASSERT(d1 != ScratchRegister);
1111 JS_ASSERT(d0 != ScratchRegister);
1112 ma_mov(d1, ScratchRegister);
1113 ma_mov(d0, d1);
1114 ma_mov(ScratchRegister, d0);
1115 return;
1116 }
1117 // If only one is, copy that source first.
1118 mozilla::Swap(s0, s1);
1119 mozilla::Swap(d0, d1);
1120 }
1122 if (s0 != d0)
1123 ma_mov(s0, d0);
1124 if (s1 != d1)
1125 ma_mov(s1, d1);
1126 }
1128 void storeValue(ValueOperand val, Operand dst);
1129 void storeValue(ValueOperand val, const BaseIndex &dest);
1130 void storeValue(JSValueType type, Register reg, BaseIndex dest) {
1131 // Harder cases not handled yet.
1132 JS_ASSERT(dest.offset == 0);
1133 ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
1134 storeValue(type, reg, Address(ScratchRegister, 0));
1135 }
1136 void storeValue(ValueOperand val, const Address &dest) {
1137 storeValue(val, Operand(dest));
1138 }
1139 void storeValue(JSValueType type, Register reg, Address dest) {
1140 ma_str(reg, dest);
1141 ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), secondScratchReg_);
1142 ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
1143 }
1144 void storeValue(const Value &val, Address dest) {
1145 jsval_layout jv = JSVAL_TO_IMPL(val);
1146 ma_mov(Imm32(jv.s.tag), secondScratchReg_);
1147 ma_str(secondScratchReg_, Address(dest.base, dest.offset + 4));
1148 if (val.isMarkable())
1149 ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), secondScratchReg_);
1150 else
1151 ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_);
1152 ma_str(secondScratchReg_, dest);
1153 }
1154 void storeValue(const Value &val, BaseIndex dest) {
1155 // Harder cases not handled yet.
1156 JS_ASSERT(dest.offset == 0);
1157 ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add);
1158 storeValue(val, Address(ScratchRegister, 0));
1159 }
1161 void loadValue(Address src, ValueOperand val);
1162 void loadValue(Operand dest, ValueOperand val) {
1163 loadValue(dest.toAddress(), val);
1164 }
1165 void loadValue(const BaseIndex &addr, ValueOperand val);
1166 void tagValue(JSValueType type, Register payload, ValueOperand dest);
1168 void pushValue(ValueOperand val);
1169 void popValue(ValueOperand val);
1170 void pushValue(const Value &val) {
1171 jsval_layout jv = JSVAL_TO_IMPL(val);
1172 push(Imm32(jv.s.tag));
1173 if (val.isMarkable())
1174 push(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
1175 else
1176 push(Imm32(jv.s.payload.i32));
1177 }
1178 void pushValue(JSValueType type, Register reg) {
1179 push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
1180 ma_push(reg);
1181 }
1182 void pushValue(const Address &addr);
1183 void Push(const ValueOperand &val) {
1184 pushValue(val);
1185 framePushed_ += sizeof(Value);
1186 }
1187 void Pop(const ValueOperand &val) {
1188 popValue(val);
1189 framePushed_ -= sizeof(Value);
1190 }
1191 void storePayload(const Value &val, Operand dest);
1192 void storePayload(Register src, Operand dest);
1193 void storePayload(const Value &val, Register base, Register index, int32_t shift = defaultShift);
1194 void storePayload(Register src, Register base, Register index, int32_t shift = defaultShift);
1195 void storeTypeTag(ImmTag tag, Operand dest);
1196 void storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift = defaultShift);
1198 void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
1199 ma_lsl(Imm32(FRAMESIZE_SHIFT), frameSizeReg, frameSizeReg);
1200 ma_orr(Imm32(type), frameSizeReg);
1201 }
1203 void linkExitFrame();
1204 void linkParallelExitFrame(const Register &pt);
1205 void handleFailureWithHandler(void *handler);
1206 void handleFailureWithHandlerTail();
1208 /////////////////////////////////////////////////////////////////
1209 // Common interface.
1210 /////////////////////////////////////////////////////////////////
1211 public:
1212 // The following functions are exposed for use in platform-shared code.
1213 void Push(const Register ®) {
1214 ma_push(reg);
1215 adjustFrame(sizeof(intptr_t));
1216 }
1217 void Push(const Imm32 imm) {
1218 push(imm);
1219 adjustFrame(sizeof(intptr_t));
1220 }
1221 void Push(const ImmWord imm) {
1222 push(imm);
1223 adjustFrame(sizeof(intptr_t));
1224 }
1225 void Push(const ImmPtr imm) {
1226 Push(ImmWord(uintptr_t(imm.value)));
1227 }
1228 void Push(const ImmGCPtr ptr) {
1229 push(ptr);
1230 adjustFrame(sizeof(intptr_t));
1231 }
1232 void Push(const FloatRegister &t) {
1233 VFPRegister r = VFPRegister(t);
1234 ma_vpush(VFPRegister(t));
1235 adjustFrame(r.size());
1236 }
1238 CodeOffsetLabel PushWithPatch(const ImmWord &word) {
1239 framePushed_ += sizeof(word.value);
1240 return pushWithPatch(word);
1241 }
1242 CodeOffsetLabel PushWithPatch(const ImmPtr &imm) {
1243 return PushWithPatch(ImmWord(uintptr_t(imm.value)));
1244 }
1246 void PushWithPadding(const Register ®, const Imm32 extraSpace) {
1247 pushWithPadding(reg, extraSpace);
1248 adjustFrame(sizeof(intptr_t) + extraSpace.value);
1249 }
1250 void PushWithPadding(const Imm32 imm, const Imm32 extraSpace) {
1251 pushWithPadding(imm, extraSpace);
1252 adjustFrame(sizeof(intptr_t) + extraSpace.value);
1253 }
1255 void Pop(const Register ®) {
1256 ma_pop(reg);
1257 adjustFrame(-sizeof(intptr_t));
1258 }
1259 void implicitPop(uint32_t args) {
1260 JS_ASSERT(args % sizeof(intptr_t) == 0);
1261 adjustFrame(-args);
1262 }
1263 uint32_t framePushed() const {
1264 return framePushed_;
1265 }
1266 void setFramePushed(uint32_t framePushed) {
1267 framePushed_ = framePushed;
1268 }
1270 // Builds an exit frame on the stack, with a return address to an internal
1271 // non-function. Returns offset to be passed to markSafepointAt().
1272 bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
1274 void callWithExitFrame(JitCode *target);
1275 void callWithExitFrame(JitCode *target, Register dynStack);
1277 // Makes an Ion call using the only two methods that it is sane for
1278 // indep code to make a call
1279 void callIon(const Register &callee);
1281 void reserveStack(uint32_t amount);
1282 void freeStack(uint32_t amount);
1283 void freeStack(Register amount);
1285 void add32(Register src, Register dest);
1286 void add32(Imm32 imm, Register dest);
1287 void add32(Imm32 imm, const Address &dest);
1288 void sub32(Imm32 imm, Register dest);
1289 void sub32(Register src, Register dest);
1290 template <typename T>
1291 void branchAdd32(Condition cond, T src, Register dest, Label *label) {
1292 add32(src, dest);
1293 j(cond, label);
1294 }
1295 template <typename T>
1296 void branchSub32(Condition cond, T src, Register dest, Label *label) {
1297 sub32(src, dest);
1298 j(cond, label);
1299 }
1300 void xor32(Imm32 imm, Register dest);
1302 void and32(Imm32 imm, Register dest);
1303 void and32(Imm32 imm, const Address &dest);
1304 void or32(Imm32 imm, const Address &dest);
1305 void xorPtr(Imm32 imm, Register dest);
1306 void xorPtr(Register src, Register dest);
1307 void orPtr(Imm32 imm, Register dest);
1308 void orPtr(Register src, Register dest);
1309 void andPtr(Imm32 imm, Register dest);
1310 void andPtr(Register src, Register dest);
1311 void addPtr(Register src, Register dest);
1312 void addPtr(const Address &src, Register dest);
1313 void not32(Register reg);
1315 void move32(const Imm32 &imm, const Register &dest);
1316 void move32(const Register &src, const Register &dest);
1318 void movePtr(const Register &src, const Register &dest);
1319 void movePtr(const ImmWord &imm, const Register &dest);
1320 void movePtr(const ImmPtr &imm, const Register &dest);
1321 void movePtr(const AsmJSImmPtr &imm, const Register &dest);
1322 void movePtr(const ImmGCPtr &imm, const Register &dest);
1324 void load8SignExtend(const Address &address, const Register &dest);
1325 void load8SignExtend(const BaseIndex &src, const Register &dest);
1327 void load8ZeroExtend(const Address &address, const Register &dest);
1328 void load8ZeroExtend(const BaseIndex &src, const Register &dest);
1330 void load16SignExtend(const Address &address, const Register &dest);
1331 void load16SignExtend(const BaseIndex &src, const Register &dest);
1333 void load16ZeroExtend(const Address &address, const Register &dest);
1334 void load16ZeroExtend(const BaseIndex &src, const Register &dest);
1336 void load32(const Address &address, const Register &dest);
1337 void load32(const BaseIndex &address, const Register &dest);
1338 void load32(const AbsoluteAddress &address, const Register &dest);
1340 void loadPtr(const Address &address, const Register &dest);
1341 void loadPtr(const BaseIndex &src, const Register &dest);
1342 void loadPtr(const AbsoluteAddress &address, const Register &dest);
1343 void loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest);
1345 void loadPrivate(const Address &address, const Register &dest);
1347 void loadDouble(const Address &addr, const FloatRegister &dest);
1348 void loadDouble(const BaseIndex &src, const FloatRegister &dest);
1350 // Load a float value into a register, then expand it to a double.
1351 void loadFloatAsDouble(const Address &addr, const FloatRegister &dest);
1352 void loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest);
1354 void loadFloat32(const Address &addr, const FloatRegister &dest);
1355 void loadFloat32(const BaseIndex &src, const FloatRegister &dest);
1357 void store8(const Register &src, const Address &address);
1358 void store8(const Imm32 &imm, const Address &address);
1359 void store8(const Register &src, const BaseIndex &address);
1360 void store8(const Imm32 &imm, const BaseIndex &address);
1362 void store16(const Register &src, const Address &address);
1363 void store16(const Imm32 &imm, const Address &address);
1364 void store16(const Register &src, const BaseIndex &address);
1365 void store16(const Imm32 &imm, const BaseIndex &address);
1367 void store32(const Register &src, const AbsoluteAddress &address);
1368 void store32(const Register &src, const Address &address);
1369 void store32(const Register &src, const BaseIndex &address);
1370 void store32(const Imm32 &src, const Address &address);
1371 void store32(const Imm32 &src, const BaseIndex &address);
1373 void storePtr(ImmWord imm, const Address &address);
1374 void storePtr(ImmPtr imm, const Address &address);
1375 void storePtr(ImmGCPtr imm, const Address &address);
1376 void storePtr(Register src, const Address &address);
1377 void storePtr(const Register &src, const AbsoluteAddress &dest);
1378 void storeDouble(FloatRegister src, Address addr) {
1379 ma_vstr(src, Operand(addr));
1380 }
1381 void storeDouble(FloatRegister src, BaseIndex addr) {
1382 // Harder cases not handled yet.
1383 JS_ASSERT(addr.offset == 0);
1384 uint32_t scale = Imm32::ShiftOf(addr.scale).value;
1385 ma_vstr(src, addr.base, addr.index, scale);
1386 }
1387 void moveDouble(FloatRegister src, FloatRegister dest) {
1388 ma_vmov(src, dest);
1389 }
1391 void storeFloat32(FloatRegister src, Address addr) {
1392 ma_vstr(VFPRegister(src).singleOverlay(), Operand(addr));
1393 }
1394 void storeFloat32(FloatRegister src, BaseIndex addr) {
1395 // Harder cases not handled yet.
1396 JS_ASSERT(addr.offset == 0);
1397 uint32_t scale = Imm32::ShiftOf(addr.scale).value;
1398 ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale);
1399 }
1401 void clampIntToUint8(Register reg) {
1402 // look at (reg >> 8) if it is 0, then reg shouldn't be clamped
1403 // if it is <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
1404 as_mov(ScratchRegister, asr(reg, 8), SetCond);
1405 ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
1406 ma_mov(Imm32(0), reg, NoSetCond, Signed);
1407 }
1409 void cmp32(const Register &lhs, const Imm32 &rhs);
1410 void cmp32(const Register &lhs, const Register &rhs);
1411 void cmp32(const Operand &lhs, const Imm32 &rhs);
1412 void cmp32(const Operand &lhs, const Register &rhs);
1414 void cmpPtr(const Register &lhs, const ImmWord &rhs);
1415 void cmpPtr(const Register &lhs, const ImmPtr &rhs);
1416 void cmpPtr(const Register &lhs, const Register &rhs);
1417 void cmpPtr(const Register &lhs, const ImmGCPtr &rhs);
1418 void cmpPtr(const Register &lhs, const Imm32 &rhs);
1419 void cmpPtr(const Address &lhs, const Register &rhs);
1420 void cmpPtr(const Address &lhs, const ImmWord &rhs);
1421 void cmpPtr(const Address &lhs, const ImmPtr &rhs);
1423 void subPtr(Imm32 imm, const Register dest);
1424 void subPtr(const Address &addr, const Register dest);
1425 void subPtr(const Register &src, const Register &dest);
1426 void subPtr(const Register &src, const Address &dest);
1427 void addPtr(Imm32 imm, const Register dest);
1428 void addPtr(Imm32 imm, const Address &dest);
1429 void addPtr(ImmWord imm, const Register dest) {
1430 addPtr(Imm32(imm.value), dest);
1431 }
1432 void addPtr(ImmPtr imm, const Register dest) {
1433 addPtr(ImmWord(uintptr_t(imm.value)), dest);
1434 }
1436 void setStackArg(const Register ®, uint32_t arg);
1438 void breakpoint();
1439 // conditional breakpoint
1440 void breakpoint(Condition cc);
1442 void compareDouble(FloatRegister lhs, FloatRegister rhs);
1443 void branchDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
1444 Label *label);
1446 void compareFloat(FloatRegister lhs, FloatRegister rhs);
1447 void branchFloat(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
1448 Label *label);
1450 void checkStackAlignment();
1452 void rshiftPtr(Imm32 imm, Register dest) {
1453 ma_lsr(imm, dest, dest);
1454 }
1455 void lshiftPtr(Imm32 imm, Register dest) {
1456 ma_lsl(imm, dest, dest);
1457 }
1459 // If source is a double, load it into dest. If source is int32,
1460 // convert it to double. Else, branch to failure.
1461 void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure);
1463 void
1464 emitSet(Assembler::Condition cond, const Register &dest)
1465 {
1466 ma_mov(Imm32(0), dest);
1467 ma_mov(Imm32(1), dest, NoSetCond, cond);
1468 }
1470 template <typename T1, typename T2>
1471 void cmpPtrSet(Assembler::Condition cond, T1 lhs, T2 rhs, const Register &dest)
1472 {
1473 cmpPtr(lhs, rhs);
1474 emitSet(cond, dest);
1475 }
1476 template <typename T1, typename T2>
1477 void cmp32Set(Assembler::Condition cond, T1 lhs, T2 rhs, const Register &dest)
1478 {
1479 cmp32(lhs, rhs);
1480 emitSet(cond, dest);
1481 }
1483 void testNullSet(Condition cond, const ValueOperand &value, Register dest) {
1484 cond = testNull(cond, value);
1485 emitSet(cond, dest);
1486 }
1487 void testUndefinedSet(Condition cond, const ValueOperand &value, Register dest) {
1488 cond = testUndefined(cond, value);
1489 emitSet(cond, dest);
1490 }
1492 // Setup a call to C/C++ code, given the number of general arguments it
1493 // takes. Note that this only supports cdecl.
1494 //
1495 // In order for alignment to work correctly, the MacroAssembler must have a
1496 // consistent view of the stack displacement. It is okay to call "push"
1497 // manually, however, if the stack alignment were to change, the macro
1498 // assembler should be notified before starting a call.
1499 void setupAlignedABICall(uint32_t args);
1501 // Sets up an ABI call for when the alignment is not known. This may need a
1502 // scratch register.
1503 void setupUnalignedABICall(uint32_t args, const Register &scratch);
1505 // Arguments must be assigned in a left-to-right order. This process may
1506 // temporarily use more stack, in which case esp-relative addresses will be
1507 // automatically adjusted. It is extremely important that esp-relative
1508 // addresses are computed *after* setupABICall(). Furthermore, no
1509 // operations should be emitted while setting arguments.
1510 void passABIArg(const MoveOperand &from, MoveOp::Type type);
1511 void passABIArg(const Register ®);
1512 void passABIArg(const FloatRegister ®, MoveOp::Type type);
1513 void passABIArg(const ValueOperand ®s);
1515 private:
1516 void passHardFpABIArg(const MoveOperand &from, MoveOp::Type type);
1517 void passSoftFpABIArg(const MoveOperand &from, MoveOp::Type type);
1519 protected:
1520 bool buildOOLFakeExitFrame(void *fakeReturnAddr);
1522 private:
1523 void callWithABIPre(uint32_t *stackAdjust);
1524 void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
1526 public:
1527 // Emits a call to a C/C++ function, resolving all argument moves.
1528 void callWithABI(void *fun, MoveOp::Type result = MoveOp::GENERAL);
1529 void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
1530 void callWithABI(const Address &fun, MoveOp::Type result = MoveOp::GENERAL);
1532 CodeOffsetLabel labelForPatch() {
1533 return CodeOffsetLabel(nextOffset().getOffset());
1534 }
1536 void computeEffectiveAddress(const Address &address, Register dest) {
1537 ma_add(address.base, Imm32(address.offset), dest, NoSetCond);
1538 }
1539 void computeEffectiveAddress(const BaseIndex &address, Register dest) {
1540 ma_alu(address.base, lsl(address.index, address.scale), dest, op_add, NoSetCond);
1541 if (address.offset)
1542 ma_add(dest, Imm32(address.offset), dest, NoSetCond);
1543 }
1544 void floor(FloatRegister input, Register output, Label *handleNotAnInt);
1545 void floorf(FloatRegister input, Register output, Label *handleNotAnInt);
1546 void round(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
1547 void roundf(FloatRegister input, Register output, Label *handleNotAnInt, FloatRegister tmp);
1549 void clampCheck(Register r, Label *handleNotAnInt) {
1550 // check explicitly for r == INT_MIN || r == INT_MAX
1551 // this is the instruction sequence that gcc generated for this
1552 // operation.
1553 ma_sub(r, Imm32(0x80000001), ScratchRegister);
1554 ma_cmn(ScratchRegister, Imm32(3));
1555 ma_b(handleNotAnInt, Above);
1556 }
1558 void memIntToValue(Address Source, Address Dest) {
1559 load32(Source, lr);
1560 storeValue(JSVAL_TYPE_INT32, lr, Dest);
1561 }
1562 void memMove32(Address Source, Address Dest) {
1563 loadPtr(Source, lr);
1564 storePtr(lr, Dest);
1565 }
1566 void memMove64(Address Source, Address Dest) {
1567 loadPtr(Source, lr);
1568 storePtr(lr, Dest);
1569 loadPtr(Address(Source.base, Source.offset+4), lr);
1570 storePtr(lr, Address(Dest.base, Dest.offset+4));
1571 }
1573 void lea(Operand addr, Register dest) {
1574 ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
1575 }
1577 void stackCheck(ImmWord limitAddr, Label *label) {
1578 int *foo = 0;
1579 *foo = 5;
1580 movePtr(limitAddr, ScratchRegister);
1581 ma_ldr(Address(ScratchRegister, 0), ScratchRegister);
1582 ma_cmp(ScratchRegister, StackPointer);
1583 ma_b(label, Assembler::AboveOrEqual);
1584 }
1585 void abiret() {
1586 as_bx(lr);
1587 }
1589 void ma_storeImm(Imm32 c, const Operand &dest) {
1590 ma_mov(c, lr);
1591 ma_str(lr, dest);
1592 }
1593 BufferOffset ma_BoundsCheck(Register bounded) {
1594 return as_cmp(bounded, Imm8(0));
1595 }
1597 void moveFloat32(FloatRegister src, FloatRegister dest) {
1598 as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay());
1599 }
1601 #ifdef JSGC_GENERATIONAL
1602 void branchPtrInNurseryRange(Register ptr, Register temp, Label *label);
1603 void branchValueIsNurseryObject(ValueOperand value, Register temp, Label *label);
1604 #endif
1605 };
1607 typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
1609 } // namespace jit
1610 } // namespace js
1612 #endif /* jit_arm_MacroAssembler_arm_h */