Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #ifndef jit_shared_MacroAssembler_x86_shared_h
8 #define jit_shared_MacroAssembler_x86_shared_h
10 #include "mozilla/Casting.h"
11 #include "mozilla/DebugOnly.h"
13 #if defined(JS_CODEGEN_X86)
14 # include "jit/x86/Assembler-x86.h"
15 #elif defined(JS_CODEGEN_X64)
16 # include "jit/x64/Assembler-x64.h"
17 #endif
19 namespace js {
20 namespace jit {
22 class MacroAssemblerX86Shared : public Assembler
23 {
24 protected:
25 // Bytes pushed onto the frame by the callee; includes frameDepth_. This is
26 // needed to compute offsets to stack slots while temporary space has been
27 // reserved for unexpected spills or C++ function calls. It is maintained
28 // by functions which track stack alignment, which for clear distinction
29 // use StudlyCaps (for example, Push, Pop).
30 uint32_t framePushed_;
32 public:
33 using Assembler::call;
35 MacroAssemblerX86Shared()
36 : framePushed_(0)
37 { }
39 void compareDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs) {
40 if (cond & DoubleConditionBitInvert)
41 ucomisd(rhs, lhs);
42 else
43 ucomisd(lhs, rhs);
44 }
45 void branchDouble(DoubleCondition cond, const FloatRegister &lhs,
46 const FloatRegister &rhs, Label *label)
47 {
48 compareDouble(cond, lhs, rhs);
50 if (cond == DoubleEqual) {
51 Label unordered;
52 j(Parity, &unordered);
53 j(Equal, label);
54 bind(&unordered);
55 return;
56 }
57 if (cond == DoubleNotEqualOrUnordered) {
58 j(NotEqual, label);
59 j(Parity, label);
60 return;
61 }
63 JS_ASSERT(!(cond & DoubleConditionBitSpecial));
64 j(ConditionFromDoubleCondition(cond), label);
65 }
67 void compareFloat(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs) {
68 if (cond & DoubleConditionBitInvert)
69 ucomiss(rhs, lhs);
70 else
71 ucomiss(lhs, rhs);
72 }
73 void branchFloat(DoubleCondition cond, const FloatRegister &lhs,
74 const FloatRegister &rhs, Label *label)
75 {
76 compareFloat(cond, lhs, rhs);
78 if (cond == DoubleEqual) {
79 Label unordered;
80 j(Parity, &unordered);
81 j(Equal, label);
82 bind(&unordered);
83 return;
84 }
85 if (cond == DoubleNotEqualOrUnordered) {
86 j(NotEqual, label);
87 j(Parity, label);
88 return;
89 }
91 JS_ASSERT(!(cond & DoubleConditionBitSpecial));
92 j(ConditionFromDoubleCondition(cond), label);
93 }
95 void branchNegativeZero(const FloatRegister ®, const Register &scratch, Label *label);
96 void branchNegativeZeroFloat32(const FloatRegister ®, const Register &scratch, Label *label);
98 void move32(const Imm32 &imm, const Register &dest) {
99 // Use the ImmWord version of mov to register, which has special
100 // optimizations. Casting to uint32_t here ensures that the value
101 // is zero-extended.
102 mov(ImmWord(uint32_t(imm.value)), dest);
103 }
104 void move32(const Imm32 &imm, const Operand &dest) {
105 movl(imm, dest);
106 }
107 void move32(const Register &src, const Register &dest) {
108 movl(src, dest);
109 }
110 void move32(const Register &src, const Operand &dest) {
111 movl(src, dest);
112 }
113 void and32(const Imm32 &imm, const Register &dest) {
114 andl(imm, dest);
115 }
116 void and32(const Imm32 &imm, const Address &dest) {
117 andl(imm, Operand(dest));
118 }
119 void or32(const Register &src, const Register &dest) {
120 orl(src, dest);
121 }
122 void or32(const Imm32 &imm, const Register &dest) {
123 orl(imm, dest);
124 }
125 void or32(const Imm32 &imm, const Address &dest) {
126 orl(imm, Operand(dest));
127 }
128 void neg32(const Register ®) {
129 negl(reg);
130 }
131 void test32(const Register &lhs, const Register &rhs) {
132 testl(lhs, rhs);
133 }
134 void test32(const Address &addr, Imm32 imm) {
135 testl(Operand(addr), imm);
136 }
137 void test32(const Register &lhs, const Imm32 &rhs) {
138 testl(lhs, rhs);
139 }
140 void cmp32(const Register &lhs, const Imm32 &rhs) {
141 cmpl(lhs, rhs);
142 }
143 void cmp32(Register a, Register b) {
144 cmpl(a, b);
145 }
146 void cmp32(const Operand &lhs, const Imm32 &rhs) {
147 cmpl(lhs, rhs);
148 }
149 void cmp32(const Operand &lhs, const Register &rhs) {
150 cmpl(lhs, rhs);
151 }
152 void add32(Register src, Register dest) {
153 addl(src, dest);
154 }
155 void add32(Imm32 imm, Register dest) {
156 addl(imm, dest);
157 }
158 void add32(Imm32 imm, const Address &dest) {
159 addl(imm, Operand(dest));
160 }
161 void sub32(Imm32 imm, Register dest) {
162 subl(imm, dest);
163 }
164 void sub32(Register src, Register dest) {
165 subl(src, dest);
166 }
167 template <typename T>
168 void branchAdd32(Condition cond, T src, Register dest, Label *label) {
169 add32(src, dest);
170 j(cond, label);
171 }
172 template <typename T>
173 void branchSub32(Condition cond, T src, Register dest, Label *label) {
174 sub32(src, dest);
175 j(cond, label);
176 }
177 void xor32(Imm32 imm, Register dest) {
178 xorl(imm, dest);
179 }
180 void xor32(Register src, Register dest) {
181 xorl(src, dest);
182 }
183 void not32(Register reg) {
184 notl(reg);
185 }
186 void inc32(const Operand &addr) {
187 incl(addr);
188 }
189 void atomic_inc32(const Operand &addr) {
190 lock_incl(addr);
191 }
192 void dec32(const Operand &addr) {
193 decl(addr);
194 }
195 void atomic_dec32(const Operand &addr) {
196 lock_decl(addr);
197 }
198 void atomic_cmpxchg32(const Register &src, const Operand &addr, const Register &dest) {
199 // %eax must be explicitly provided for calling clarity.
200 MOZ_ASSERT(dest.code() == JSC::X86Registers::eax);
201 lock_cmpxchg32(src, addr);
202 }
204 void branch16(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
205 cmpw(lhs, rhs);
206 j(cond, label);
207 }
208 void branch32(Condition cond, const Operand &lhs, const Register &rhs, Label *label) {
209 cmpl(lhs, rhs);
210 j(cond, label);
211 }
212 void branch32(Condition cond, const Operand &lhs, Imm32 rhs, Label *label) {
213 cmpl(lhs, rhs);
214 j(cond, label);
215 }
216 void branch32(Condition cond, const Address &lhs, const Register &rhs, Label *label) {
217 cmpl(Operand(lhs), rhs);
218 j(cond, label);
219 }
220 void branch32(Condition cond, const Address &lhs, Imm32 imm, Label *label) {
221 cmpl(Operand(lhs), imm);
222 j(cond, label);
223 }
224 void branch32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
225 cmpl(lhs, imm);
226 j(cond, label);
227 }
228 void branch32(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
229 cmpl(lhs, rhs);
230 j(cond, label);
231 }
232 void branchTest16(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
233 testw(lhs, rhs);
234 j(cond, label);
235 }
236 void branchTest32(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
237 testl(lhs, rhs);
238 j(cond, label);
239 }
240 void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
241 testl(lhs, imm);
242 j(cond, label);
243 }
244 void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
245 testl(Operand(address), imm);
246 j(cond, label);
247 }
249 // The following functions are exposed for use in platform-shared code.
250 template <typename T>
251 void Push(const T &t) {
252 push(t);
253 framePushed_ += sizeof(intptr_t);
254 }
255 void Push(const FloatRegister &t) {
256 push(t);
257 framePushed_ += sizeof(double);
258 }
259 CodeOffsetLabel PushWithPatch(const ImmWord &word) {
260 framePushed_ += sizeof(word.value);
261 return pushWithPatch(word);
262 }
263 CodeOffsetLabel PushWithPatch(const ImmPtr &imm) {
264 return PushWithPatch(ImmWord(uintptr_t(imm.value)));
265 }
267 template <typename T>
268 void Pop(const T &t) {
269 pop(t);
270 framePushed_ -= sizeof(intptr_t);
271 }
272 void Pop(const FloatRegister &t) {
273 pop(t);
274 framePushed_ -= sizeof(double);
275 }
276 void implicitPop(uint32_t args) {
277 JS_ASSERT(args % sizeof(intptr_t) == 0);
278 framePushed_ -= args;
279 }
280 uint32_t framePushed() const {
281 return framePushed_;
282 }
283 void setFramePushed(uint32_t framePushed) {
284 framePushed_ = framePushed;
285 }
287 void jump(Label *label) {
288 jmp(label);
289 }
290 void jump(RepatchLabel *label) {
291 jmp(label);
292 }
293 void jump(Register reg) {
294 jmp(Operand(reg));
295 }
296 void jump(const Address &addr) {
297 jmp(Operand(addr));
298 }
300 void convertInt32ToDouble(const Register &src, const FloatRegister &dest) {
301 // cvtsi2sd and friends write only part of their output register, which
302 // causes slowdowns on out-of-order processors. Explicitly break
303 // dependencies with xorpd (and xorps elsewhere), which are handled
304 // specially in modern CPUs, for this purpose. See sections 8.14, 9.8,
305 // 10.8, 12.9, 13.16, 14.14, and 15.8 of Agner's Microarchitecture
306 // document.
307 zeroDouble(dest);
308 cvtsi2sd(src, dest);
309 }
310 void convertInt32ToDouble(const Address &src, FloatRegister dest) {
311 convertInt32ToDouble(Operand(src), dest);
312 }
313 void convertInt32ToDouble(const Operand &src, FloatRegister dest) {
314 // Clear the output register first to break dependencies; see above;
315 zeroDouble(dest);
316 cvtsi2sd(Operand(src), dest);
317 }
318 void convertInt32ToFloat32(const Register &src, const FloatRegister &dest) {
319 // Clear the output register first to break dependencies; see above;
320 zeroFloat32(dest);
321 cvtsi2ss(src, dest);
322 }
323 void convertInt32ToFloat32(const Address &src, FloatRegister dest) {
324 convertInt32ToFloat32(Operand(src), dest);
325 }
326 void convertInt32ToFloat32(const Operand &src, FloatRegister dest) {
327 // Clear the output register first to break dependencies; see above;
328 zeroFloat32(dest);
329 cvtsi2ss(src, dest);
330 }
331 Condition testDoubleTruthy(bool truthy, const FloatRegister ®) {
332 zeroDouble(ScratchFloatReg);
333 ucomisd(ScratchFloatReg, reg);
334 return truthy ? NonZero : Zero;
335 }
336 void branchTestDoubleTruthy(bool truthy, const FloatRegister ®, Label *label) {
337 Condition cond = testDoubleTruthy(truthy, reg);
338 j(cond, label);
339 }
340 void load8ZeroExtend(const Address &src, const Register &dest) {
341 movzbl(Operand(src), dest);
342 }
343 void load8ZeroExtend(const BaseIndex &src, const Register &dest) {
344 movzbl(Operand(src), dest);
345 }
346 void load8SignExtend(const Address &src, const Register &dest) {
347 movsbl(Operand(src), dest);
348 }
349 void load8SignExtend(const BaseIndex &src, const Register &dest) {
350 movsbl(Operand(src), dest);
351 }
352 template <typename S, typename T>
353 void store8(const S &src, const T &dest) {
354 movb(src, Operand(dest));
355 }
356 void load16ZeroExtend(const Address &src, const Register &dest) {
357 movzwl(Operand(src), dest);
358 }
359 void load16ZeroExtend(const BaseIndex &src, const Register &dest) {
360 movzwl(Operand(src), dest);
361 }
362 template <typename S, typename T>
363 void store16(const S &src, const T &dest) {
364 movw(src, Operand(dest));
365 }
366 void load16SignExtend(const Address &src, const Register &dest) {
367 movswl(Operand(src), dest);
368 }
369 void load16SignExtend(const BaseIndex &src, const Register &dest) {
370 movswl(Operand(src), dest);
371 }
372 void load32(const Address &address, Register dest) {
373 movl(Operand(address), dest);
374 }
375 void load32(const BaseIndex &src, Register dest) {
376 movl(Operand(src), dest);
377 }
378 void load32(const Operand &src, Register dest) {
379 movl(src, dest);
380 }
381 template <typename S, typename T>
382 void store32(const S &src, const T &dest) {
383 movl(src, Operand(dest));
384 }
385 void loadDouble(const Address &src, FloatRegister dest) {
386 movsd(src, dest);
387 }
388 void loadDouble(const BaseIndex &src, FloatRegister dest) {
389 movsd(src, dest);
390 }
391 void loadDouble(const Operand &src, FloatRegister dest) {
392 switch (src.kind()) {
393 case Operand::MEM_REG_DISP:
394 loadDouble(src.toAddress(), dest);
395 break;
396 case Operand::MEM_SCALE:
397 loadDouble(src.toBaseIndex(), dest);
398 break;
399 default:
400 MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
401 }
402 }
403 void storeDouble(FloatRegister src, const Address &dest) {
404 movsd(src, dest);
405 }
406 void storeDouble(FloatRegister src, const BaseIndex &dest) {
407 movsd(src, dest);
408 }
409 void storeDouble(FloatRegister src, const Operand &dest) {
410 switch (dest.kind()) {
411 case Operand::MEM_REG_DISP:
412 storeDouble(src, dest.toAddress());
413 break;
414 case Operand::MEM_SCALE:
415 storeDouble(src, dest.toBaseIndex());
416 break;
417 default:
418 MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
419 }
420 }
421 void moveDouble(FloatRegister src, FloatRegister dest) {
422 // Use movapd instead of movsd to avoid dependencies.
423 movapd(src, dest);
424 }
425 void zeroDouble(FloatRegister reg) {
426 xorpd(reg, reg);
427 }
428 void zeroFloat32(FloatRegister reg) {
429 xorps(reg, reg);
430 }
431 void negateDouble(FloatRegister reg) {
432 // From MacroAssemblerX86Shared::maybeInlineDouble
433 pcmpeqw(ScratchFloatReg, ScratchFloatReg);
434 psllq(Imm32(63), ScratchFloatReg);
436 // XOR the float in a float register with -0.0.
437 xorpd(ScratchFloatReg, reg); // s ^ 0x80000000000000
438 }
439 void negateFloat(FloatRegister reg) {
440 pcmpeqw(ScratchFloatReg, ScratchFloatReg);
441 psllq(Imm32(31), ScratchFloatReg);
443 // XOR the float in a float register with -0.0.
444 xorps(ScratchFloatReg, reg); // s ^ 0x80000000
445 }
446 void addDouble(FloatRegister src, FloatRegister dest) {
447 addsd(src, dest);
448 }
449 void subDouble(FloatRegister src, FloatRegister dest) {
450 subsd(src, dest);
451 }
452 void mulDouble(FloatRegister src, FloatRegister dest) {
453 mulsd(src, dest);
454 }
455 void divDouble(FloatRegister src, FloatRegister dest) {
456 divsd(src, dest);
457 }
458 void convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest) {
459 cvtss2sd(src, dest);
460 }
461 void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest) {
462 cvtsd2ss(src, dest);
463 }
464 void moveFloatAsDouble(const Register &src, FloatRegister dest) {
465 movd(src, dest);
466 cvtss2sd(dest, dest);
467 }
468 void loadFloatAsDouble(const Address &src, FloatRegister dest) {
469 movss(src, dest);
470 cvtss2sd(dest, dest);
471 }
472 void loadFloatAsDouble(const BaseIndex &src, FloatRegister dest) {
473 movss(src, dest);
474 cvtss2sd(dest, dest);
475 }
476 void loadFloatAsDouble(const Operand &src, FloatRegister dest) {
477 loadFloat32(src, dest);
478 cvtss2sd(dest, dest);
479 }
480 void loadFloat32(const Address &src, FloatRegister dest) {
481 movss(src, dest);
482 }
483 void loadFloat32(const BaseIndex &src, FloatRegister dest) {
484 movss(src, dest);
485 }
486 void loadFloat32(const Operand &src, FloatRegister dest) {
487 switch (src.kind()) {
488 case Operand::MEM_REG_DISP:
489 loadFloat32(src.toAddress(), dest);
490 break;
491 case Operand::MEM_SCALE:
492 loadFloat32(src.toBaseIndex(), dest);
493 break;
494 default:
495 MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
496 }
497 }
498 void storeFloat32(FloatRegister src, const Address &dest) {
499 movss(src, dest);
500 }
501 void storeFloat32(FloatRegister src, const BaseIndex &dest) {
502 movss(src, dest);
503 }
504 void storeFloat32(FloatRegister src, const Operand &dest) {
505 switch (dest.kind()) {
506 case Operand::MEM_REG_DISP:
507 storeFloat32(src, dest.toAddress());
508 break;
509 case Operand::MEM_SCALE:
510 storeFloat32(src, dest.toBaseIndex());
511 break;
512 default:
513 MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
514 }
515 }
516 void moveFloat32(FloatRegister src, FloatRegister dest) {
517 // Use movaps instead of movss to avoid dependencies.
518 movaps(src, dest);
519 }
521 // Checks whether a double is representable as a 32-bit integer. If so, the
522 // integer is written to the output register. Otherwise, a bailout is taken to
523 // the given snapshot. This function overwrites the scratch float register.
524 void convertDoubleToInt32(FloatRegister src, Register dest, Label *fail,
525 bool negativeZeroCheck = true)
526 {
527 // Check for -0.0
528 if (negativeZeroCheck)
529 branchNegativeZero(src, dest, fail);
531 cvttsd2si(src, dest);
532 cvtsi2sd(dest, ScratchFloatReg);
533 ucomisd(src, ScratchFloatReg);
534 j(Assembler::Parity, fail);
535 j(Assembler::NotEqual, fail);
537 }
539 // Checks whether a float32 is representable as a 32-bit integer. If so, the
540 // integer is written to the output register. Otherwise, a bailout is taken to
541 // the given snapshot. This function overwrites the scratch float register.
542 void convertFloat32ToInt32(FloatRegister src, Register dest, Label *fail,
543 bool negativeZeroCheck = true)
544 {
545 // Check for -0.0
546 if (negativeZeroCheck)
547 branchNegativeZeroFloat32(src, dest, fail);
549 cvttss2si(src, dest);
550 convertInt32ToFloat32(dest, ScratchFloatReg);
551 ucomiss(src, ScratchFloatReg);
552 j(Assembler::Parity, fail);
553 j(Assembler::NotEqual, fail);
554 }
556 void clampIntToUint8(Register reg) {
557 Label inRange;
558 branchTest32(Assembler::Zero, reg, Imm32(0xffffff00), &inRange);
559 {
560 sarl(Imm32(31), reg);
561 notl(reg);
562 andl(Imm32(255), reg);
563 }
564 bind(&inRange);
565 }
567 bool maybeInlineDouble(double d, const FloatRegister &dest) {
568 uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
570 // Loading zero with xor is specially optimized in hardware.
571 if (u == 0) {
572 xorpd(dest, dest);
573 return true;
574 }
576 // It is also possible to load several common constants using pcmpeqw
577 // to get all ones and then psllq and psrlq to get zeros at the ends,
578 // as described in "13.4 Generating constants" of
579 // "2. Optimizing subroutines in assembly language" by Agner Fog, and as
580 // previously implemented here. However, with x86 and x64 both using
581 // constant pool loads for double constants, this is probably only
582 // worthwhile in cases where a load is likely to be delayed.
584 return false;
585 }
587 bool maybeInlineFloat(float f, const FloatRegister &dest) {
588 uint32_t u = mozilla::BitwiseCast<uint32_t>(f);
590 // See comment above
591 if (u == 0) {
592 xorps(dest, dest);
593 return true;
594 }
595 return false;
596 }
598 void convertBoolToInt32(Register source, Register dest) {
599 // Note that C++ bool is only 1 byte, so zero extend it to clear the
600 // higher-order bits.
601 movzbl(source, dest);
602 }
604 void emitSet(Assembler::Condition cond, const Register &dest,
605 Assembler::NaNCond ifNaN = Assembler::NaN_HandledByCond) {
606 if (GeneralRegisterSet(Registers::SingleByteRegs).has(dest)) {
607 // If the register we're defining is a single byte register,
608 // take advantage of the setCC instruction
609 setCC(cond, dest);
610 movzbl(dest, dest);
612 if (ifNaN != Assembler::NaN_HandledByCond) {
613 Label noNaN;
614 j(Assembler::NoParity, &noNaN);
615 mov(ImmWord(ifNaN == Assembler::NaN_IsTrue), dest);
616 bind(&noNaN);
617 }
618 } else {
619 Label end;
620 Label ifFalse;
622 if (ifNaN == Assembler::NaN_IsFalse)
623 j(Assembler::Parity, &ifFalse);
624 // Note a subtlety here: FLAGS is live at this point, and the
625 // mov interface doesn't guarantee to preserve FLAGS. Use
626 // movl instead of mov, because the movl instruction
627 // preserves FLAGS.
628 movl(Imm32(1), dest);
629 j(cond, &end);
630 if (ifNaN == Assembler::NaN_IsTrue)
631 j(Assembler::Parity, &end);
632 bind(&ifFalse);
633 mov(ImmWord(0), dest);
635 bind(&end);
636 }
637 }
639 template <typename T1, typename T2>
640 void cmp32Set(Assembler::Condition cond, T1 lhs, T2 rhs, const Register &dest)
641 {
642 cmp32(lhs, rhs);
643 emitSet(cond, dest);
644 }
646 // Emit a JMP that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp().
647 CodeOffsetLabel toggledJump(Label *label) {
648 CodeOffsetLabel offset(size());
649 jump(label);
650 return offset;
651 }
653 template <typename T>
654 void computeEffectiveAddress(const T &address, Register dest) {
655 lea(Operand(address), dest);
656 }
658 // Builds an exit frame on the stack, with a return address to an internal
659 // non-function. Returns offset to be passed to markSafepointAt().
660 bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
661 void callWithExitFrame(JitCode *target);
663 void callIon(const Register &callee) {
664 call(callee);
665 }
667 void appendCallSite(const CallSiteDesc &desc) {
668 // Add an extra sizeof(void*) to include the return address that was
669 // pushed by the call instruction (see CallSite::stackDepth).
670 enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_ + sizeof(void*)));
671 }
673 void call(const CallSiteDesc &desc, Label *label) {
674 call(label);
675 appendCallSite(desc);
676 }
677 void call(const CallSiteDesc &desc, const Register ®) {
678 call(reg);
679 appendCallSite(desc);
680 }
681 void callIonFromAsmJS(const Register ®) {
682 call(CallSiteDesc::Exit(), reg);
683 }
685 void checkStackAlignment() {
686 // Exists for ARM compatibility.
687 }
689 CodeOffsetLabel labelForPatch() {
690 return CodeOffsetLabel(size());
691 }
693 void abiret() {
694 ret();
695 }
697 protected:
698 bool buildOOLFakeExitFrame(void *fakeReturnAddr);
699 };
701 } // namespace jit
702 } // namespace js
704 #endif /* jit_shared_MacroAssembler_x86_shared_h */