Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/mips/MacroAssembler-mips.h"
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
12 #include "jit/Bailouts.h"
13 #include "jit/BaselineFrame.h"
14 #include "jit/BaselineRegisters.h"
15 #include "jit/IonFrames.h"
16 #include "jit/MoveEmitter.h"
18 using namespace js;
19 using namespace jit;
21 using mozilla::Abs;
23 static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
24 static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
26 static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
28 void
29 MacroAssemblerMIPS::convertBoolToInt32(Register src, Register dest)
30 {
31 // Note that C++ bool is only 1 byte, so zero extend it to clear the
32 // higher-order bits.
33 ma_and(dest, src, Imm32(0xff));
34 }
36 void
37 MacroAssemblerMIPS::convertInt32ToDouble(const Register &src, const FloatRegister &dest)
38 {
39 as_mtc1(src, dest);
40 as_cvtdw(dest, dest);
41 }
43 void
44 MacroAssemblerMIPS::convertInt32ToDouble(const Address &src, FloatRegister dest)
45 {
46 ma_lw(ScratchRegister, src);
47 as_mtc1(ScratchRegister, dest);
48 as_cvtdw(dest, dest);
49 }
51 void
52 MacroAssemblerMIPS::convertUInt32ToDouble(const Register &src, const FloatRegister &dest)
53 {
54 // We use SecondScratchFloatReg because MacroAssembler::loadFromTypedArray
55 // calls with ScratchFloatReg as dest.
56 MOZ_ASSERT(dest != SecondScratchFloatReg);
58 // Subtract INT32_MIN to get a positive number
59 ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
61 // Convert value
62 as_mtc1(ScratchRegister, dest);
63 as_cvtdw(dest, dest);
65 // Add unsigned value of INT32_MIN
66 ma_lid(SecondScratchFloatReg, 2147483648.0);
67 as_addd(dest, dest, SecondScratchFloatReg);
68 }
70 void
71 MacroAssemblerMIPS::convertUInt32ToFloat32(const Register &src, const FloatRegister &dest)
72 {
73 MOZ_ASSUME_UNREACHABLE("NYI");
74 }
76 void
77 MacroAssemblerMIPS::convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest)
78 {
79 as_cvtsd(dest, src);
80 }
82 // Convert the floating point value to an integer, if it did not fit, then it
83 // was clamped to INT32_MIN/INT32_MAX, and we can test it.
84 // NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
85 // will be wrong.
86 void
87 MacroAssemblerMIPS::branchTruncateDouble(const FloatRegister &src, const Register &dest,
88 Label *fail)
89 {
90 Label test, success;
91 as_truncwd(ScratchFloatReg, src);
92 as_mfc1(dest, ScratchFloatReg);
94 ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
95 }
97 // Checks whether a double is representable as a 32-bit integer. If so, the
98 // integer is written to the output register. Otherwise, a bailout is taken to
99 // the given snapshot. This function overwrites the scratch float register.
100 void
101 MacroAssemblerMIPS::convertDoubleToInt32(const FloatRegister &src, const Register &dest,
102 Label *fail, bool negativeZeroCheck)
103 {
104 // Convert double to int, then convert back and check if we have the
105 // same number.
106 as_cvtwd(ScratchFloatReg, src);
107 as_mfc1(dest, ScratchFloatReg);
108 as_cvtdw(ScratchFloatReg, ScratchFloatReg);
109 ma_bc1d(src, ScratchFloatReg, fail, Assembler::DoubleNotEqualOrUnordered);
111 if (negativeZeroCheck) {
112 Label notZero;
113 ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
114 // Test and bail for -0.0, when integer result is 0
115 // Move the top word of the double into the output reg, if it is
116 // non-zero, then the original value was -0.0
117 moveFromDoubleHi(src, dest);
118 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
119 bind(¬Zero);
120 }
121 }
123 // Checks whether a float32 is representable as a 32-bit integer. If so, the
124 // integer is written to the output register. Otherwise, a bailout is taken to
125 // the given snapshot. This function overwrites the scratch float register.
126 void
127 MacroAssemblerMIPS::convertFloat32ToInt32(const FloatRegister &src, const Register &dest,
128 Label *fail, bool negativeZeroCheck)
129 {
130 // convert the floating point value to an integer, if it did not fit, then
131 // when we convert it *back* to a float, it will have a different value,
132 // which we can test.
133 as_cvtws(ScratchFloatReg, src);
134 as_mfc1(dest, ScratchFloatReg);
135 as_cvtsw(ScratchFloatReg, ScratchFloatReg);
136 ma_bc1s(src, ScratchFloatReg, fail, Assembler::DoubleNotEqualOrUnordered);
138 if (negativeZeroCheck) {
139 Label notZero;
140 ma_b(dest, Imm32(0), ¬Zero, Assembler::NotEqual, ShortJump);
141 // Test and bail for -0.0, when integer result is 0
142 // Move the top word of the double into the output reg,
143 // if it is non-zero, then the original value was -0.0
144 moveFromDoubleHi(src, dest);
145 ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
146 bind(¬Zero);
147 }
148 }
150 void
151 MacroAssemblerMIPS::convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest)
152 {
153 as_cvtds(dest, src);
154 }
156 void
157 MacroAssemblerMIPS::branchTruncateFloat32(const FloatRegister &src, const Register &dest,
158 Label *fail)
159 {
160 Label test, success;
161 as_truncws(ScratchFloatReg, src);
162 as_mfc1(dest, ScratchFloatReg);
164 ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
165 }
167 void
168 MacroAssemblerMIPS::convertInt32ToFloat32(const Register &src, const FloatRegister &dest)
169 {
170 as_mtc1(src, dest);
171 as_cvtsw(dest, dest);
172 }
174 void
175 MacroAssemblerMIPS::convertInt32ToFloat32(const Address &src, FloatRegister dest)
176 {
177 ma_lw(ScratchRegister, src);
178 as_mtc1(ScratchRegister, dest);
179 as_cvtsw(dest, dest);
180 }
182 void
183 MacroAssemblerMIPS::addDouble(FloatRegister src, FloatRegister dest)
184 {
185 as_addd(dest, dest, src);
186 }
188 void
189 MacroAssemblerMIPS::subDouble(FloatRegister src, FloatRegister dest)
190 {
191 as_subd(dest, dest, src);
192 }
194 void
195 MacroAssemblerMIPS::mulDouble(FloatRegister src, FloatRegister dest)
196 {
197 as_muld(dest, dest, src);
198 }
200 void
201 MacroAssemblerMIPS::divDouble(FloatRegister src, FloatRegister dest)
202 {
203 as_divd(dest, dest, src);
204 }
206 void
207 MacroAssemblerMIPS::negateDouble(FloatRegister reg)
208 {
209 as_negd(reg, reg);
210 }
212 void
213 MacroAssemblerMIPS::inc64(AbsoluteAddress dest)
214 {
215 ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
216 as_lw(SecondScratchReg, ScratchRegister, 0);
218 as_addiu(SecondScratchReg, SecondScratchReg, 1);
219 as_sw(SecondScratchReg, ScratchRegister, 0);
221 as_sltiu(SecondScratchReg, SecondScratchReg, 1);
222 as_lw(ScratchRegister, ScratchRegister, 4);
224 as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
226 ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
227 as_sw(SecondScratchReg, ScratchRegister, 4);
228 }
230 void
231 MacroAssemblerMIPS::ma_move(Register rd, Register rs)
232 {
233 as_or(rd, rs, zero);
234 }
236 void
237 MacroAssemblerMIPS::ma_li(Register dest, const ImmGCPtr &ptr)
238 {
239 writeDataRelocation(ptr);
240 ma_liPatchable(dest, Imm32(ptr.value));
241 }
243 void
244 MacroAssemblerMIPS::ma_li(const Register &dest, AbsoluteLabel *label)
245 {
246 MOZ_ASSERT(!label->bound());
247 // Thread the patch list through the unpatched address word in the
248 // instruction stream.
249 BufferOffset bo = m_buffer.nextOffset();
250 ma_liPatchable(dest, Imm32(label->prev()));
251 label->setPrev(bo.getOffset());
252 }
254 void
255 MacroAssemblerMIPS::ma_li(Register dest, Imm32 imm)
256 {
257 if (Imm16::isInSignedRange(imm.value)) {
258 as_addiu(dest, zero, imm.value);
259 } else if (Imm16::isInUnsignedRange(imm.value)) {
260 as_ori(dest, zero, Imm16::lower(imm).encode());
261 } else if (Imm16::lower(imm).encode() == 0) {
262 as_lui(dest, Imm16::upper(imm).encode());
263 } else {
264 as_lui(dest, Imm16::upper(imm).encode());
265 as_ori(dest, dest, Imm16::lower(imm).encode());
266 }
267 }
270 // This method generates lui and ori instruction pair that can be modified by
271 // updateLuiOriValue, either during compilation (eg. Assembler::bind), or
272 // during execution (eg. jit::PatchJump).
273 void
274 MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
275 {
276 m_buffer.ensureSpace(2 * sizeof(uint32_t));
277 as_lui(dest, Imm16::upper(imm).encode());
278 as_ori(dest, dest, Imm16::lower(imm).encode());
279 }
281 void
282 MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
283 {
284 return ma_liPatchable(dest, Imm32(int32_t(imm.value)));
285 }
287 // Shifts
288 void
289 MacroAssemblerMIPS::ma_sll(Register rd, Register rt, Imm32 shift)
290 {
291 as_sll(rd, rt, shift.value % 32);
292 }
293 void
294 MacroAssemblerMIPS::ma_srl(Register rd, Register rt, Imm32 shift)
295 {
296 as_srl(rd, rt, shift.value % 32);
297 }
299 void
300 MacroAssemblerMIPS::ma_sra(Register rd, Register rt, Imm32 shift)
301 {
302 as_sra(rd, rt, shift.value % 32);
303 }
305 void
306 MacroAssemblerMIPS::ma_ror(Register rd, Register rt, Imm32 shift)
307 {
308 as_rotr(rd, rt, shift.value % 32);
309 }
311 void
312 MacroAssemblerMIPS::ma_rol(Register rd, Register rt, Imm32 shift)
313 {
314 as_rotr(rd, rt, 32 - (shift.value % 32));
315 }
317 void
318 MacroAssemblerMIPS::ma_sll(Register rd, Register rt, Register shift)
319 {
320 as_sllv(rd, rt, shift);
321 }
323 void
324 MacroAssemblerMIPS::ma_srl(Register rd, Register rt, Register shift)
325 {
326 as_srlv(rd, rt, shift);
327 }
329 void
330 MacroAssemblerMIPS::ma_sra(Register rd, Register rt, Register shift)
331 {
332 as_srav(rd, rt, shift);
333 }
335 void
336 MacroAssemblerMIPS::ma_ror(Register rd, Register rt, Register shift)
337 {
338 as_rotrv(rd, rt, shift);
339 }
341 void
342 MacroAssemblerMIPS::ma_rol(Register rd, Register rt, Register shift)
343 {
344 ma_negu(ScratchRegister, shift);
345 as_rotrv(rd, rt, ScratchRegister);
346 }
348 void
349 MacroAssemblerMIPS::ma_negu(Register rd, Register rs)
350 {
351 as_subu(rd, zero, rs);
352 }
354 void
355 MacroAssemblerMIPS::ma_not(Register rd, Register rs)
356 {
357 as_nor(rd, rs, zero);
358 }
360 // And.
361 void
362 MacroAssemblerMIPS::ma_and(Register rd, Register rs)
363 {
364 as_and(rd, rd, rs);
365 }
367 void
368 MacroAssemblerMIPS::ma_and(Register rd, Register rs, Register rt)
369 {
370 as_and(rd, rs, rt);
371 }
373 void
374 MacroAssemblerMIPS::ma_and(Register rd, Imm32 imm)
375 {
376 ma_and(rd, rd, imm);
377 }
379 void
380 MacroAssemblerMIPS::ma_and(Register rd, Register rs, Imm32 imm)
381 {
382 if (Imm16::isInUnsignedRange(imm.value)) {
383 as_andi(rd, rs, imm.value);
384 } else {
385 ma_li(ScratchRegister, imm);
386 as_and(rd, rs, ScratchRegister);
387 }
388 }
390 // Or.
391 void
392 MacroAssemblerMIPS::ma_or(Register rd, Register rs)
393 {
394 as_or(rd, rd, rs);
395 }
397 void
398 MacroAssemblerMIPS::ma_or(Register rd, Register rs, Register rt)
399 {
400 as_or(rd, rs, rt);
401 }
403 void
404 MacroAssemblerMIPS::ma_or(Register rd, Imm32 imm)
405 {
406 ma_or(rd, rd, imm);
407 }
409 void
410 MacroAssemblerMIPS::ma_or(Register rd, Register rs, Imm32 imm)
411 {
412 if (Imm16::isInUnsignedRange(imm.value)) {
413 as_ori(rd, rs, imm.value);
414 } else {
415 ma_li(ScratchRegister, imm);
416 as_or(rd, rs, ScratchRegister);
417 }
418 }
420 // xor
421 void
422 MacroAssemblerMIPS::ma_xor(Register rd, Register rs)
423 {
424 as_xor(rd, rd, rs);
425 }
427 void
428 MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Register rt)
429 {
430 as_xor(rd, rs, rt);
431 }
433 void
434 MacroAssemblerMIPS::ma_xor(Register rd, Imm32 imm)
435 {
436 ma_xor(rd, rd, imm);
437 }
439 void
440 MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm)
441 {
442 if (Imm16::isInUnsignedRange(imm.value)) {
443 as_xori(rd, rs, imm.value);
444 } else {
445 ma_li(ScratchRegister, imm);
446 as_xor(rd, rs, ScratchRegister);
447 }
448 }
450 // Arithmetic-based ops.
452 // Add.
453 void
454 MacroAssemblerMIPS::ma_addu(Register rd, Register rs, Imm32 imm)
455 {
456 if (Imm16::isInSignedRange(imm.value)) {
457 as_addiu(rd, rs, imm.value);
458 } else {
459 ma_li(ScratchRegister, imm);
460 as_addu(rd, rs, ScratchRegister);
461 }
462 }
464 void
465 MacroAssemblerMIPS::ma_addu(Register rd, Register rs)
466 {
467 as_addu(rd, rd, rs);
468 }
470 void
471 MacroAssemblerMIPS::ma_addu(Register rd, Imm32 imm)
472 {
473 ma_addu(rd, rd, imm);
474 }
476 void
477 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
478 {
479 Label goodAddition;
480 as_addu(SecondScratchReg, rs, rt);
482 as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
483 ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
485 // If different sign, then overflow
486 as_xor(ScratchRegister, rs, SecondScratchReg);
487 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
489 bind(&goodAddition);
490 ma_move(rd, SecondScratchReg);
491 }
493 void
494 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
495 {
496 // Check for signed range because of as_addiu
497 // Check for unsigned range because of as_xori
498 if (Imm16::isInSignedRange(imm.value) && Imm16::isInUnsignedRange(imm.value)) {
499 Label goodAddition;
500 as_addiu(SecondScratchReg, rs, imm.value);
502 // If different sign, no overflow
503 as_xori(ScratchRegister, rs, imm.value);
504 ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
506 // If different sign, then overflow
507 as_xor(ScratchRegister, rs, SecondScratchReg);
508 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
510 bind(&goodAddition);
511 ma_move(rd, SecondScratchReg);
512 } else {
513 ma_li(ScratchRegister, imm);
514 ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
515 }
516 }
518 // Subtract.
519 void
520 MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Register rt)
521 {
522 as_subu(rd, rs, rt);
523 }
525 void
526 MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Imm32 imm)
527 {
528 if (Imm16::isInSignedRange(-imm.value)) {
529 as_addiu(rd, rs, -imm.value);
530 } else {
531 ma_li(ScratchRegister, imm);
532 as_subu(rd, rs, ScratchRegister);
533 }
534 }
536 void
537 MacroAssemblerMIPS::ma_subu(Register rd, Imm32 imm)
538 {
539 ma_subu(rd, rd, imm);
540 }
542 void
543 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
544 {
545 Label goodSubtraction;
546 // Use second scratch. The instructions generated by ma_b don't use the
547 // second scratch register.
548 ma_subu(SecondScratchReg, rs, rt);
550 as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
551 ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
553 // If different sign, then overflow
554 as_xor(ScratchRegister, rs, SecondScratchReg);
555 ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
557 bind(&goodSubtraction);
558 ma_move(rd, SecondScratchReg);
559 }
561 void
562 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
563 {
564 if (imm.value != INT32_MIN) {
565 ma_addTestOverflow(rd, rs, Imm32(-imm.value), overflow);
566 } else {
567 ma_li(ScratchRegister, Imm32(imm.value));
568 ma_subTestOverflow(rd, rs, ScratchRegister, overflow);
569 }
570 }
572 void
573 MacroAssemblerMIPS::ma_mult(Register rs, Imm32 imm)
574 {
575 ma_li(ScratchRegister, imm);
576 as_mult(rs, ScratchRegister);
577 }
579 void
580 MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
581 {
582 as_mult(rs, rt);
583 as_mflo(rd);
584 as_sra(ScratchRegister, rd, 31);
585 as_mfhi(SecondScratchReg);
586 ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
587 }
589 void
590 MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
591 {
592 ma_li(ScratchRegister, imm);
593 ma_mul_branch_overflow(rd, rs, ScratchRegister, overflow);
594 }
596 void
597 MacroAssemblerMIPS::ma_div_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
598 {
599 as_div(rs, rt);
600 as_mflo(rd);
601 as_mfhi(ScratchRegister);
602 ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
603 }
605 void
606 MacroAssemblerMIPS::ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
607 {
608 ma_li(ScratchRegister, imm);
609 ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
610 }
612 void
613 MacroAssemblerMIPS::ma_mod_mask(Register src, Register dest, Register hold, int32_t shift,
614 Label *negZero)
615 {
616 // MATH:
617 // We wish to compute x % (1<<y) - 1 for a known constant, y.
618 // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
619 // dividend as a number in base b, namely
620 // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
621 // now, since both addition and multiplication commute with modulus,
622 // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
623 // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
624 // now, since b == C + 1, b % C == 1, and b^n % C == 1
625 // this means that the whole thing simplifies to:
626 // c_0 + c_1 + c_2 ... c_n % C
627 // each c_n can easily be computed by a shift/bitextract, and the modulus
628 // can be maintained by simply subtracting by C whenever the number gets
629 // over C.
630 int32_t mask = (1 << shift) - 1;
631 Label head, negative, sumSigned, done;
633 // hold holds -1 if the value was negative, 1 otherwise.
634 // ScratchRegister holds the remaining bits that have not been processed
635 // lr serves as a temporary location to store extracted bits into as well
636 // as holding the trial subtraction as a temp value dest is the
637 // accumulator (and holds the final result)
639 // move the whole value into the scratch register, setting the codition
640 // codes so we can muck with them later.
641 ma_move(ScratchRegister, src);
642 // Zero out the dest.
643 ma_subu(dest, dest, dest);
644 // Set the hold appropriately.
645 ma_b(ScratchRegister, ScratchRegister, &negative, Signed, ShortJump);
646 ma_li(hold, Imm32(1));
647 ma_b(&head, ShortJump);
649 bind(&negative);
650 ma_li(hold, Imm32(-1));
651 ma_negu(ScratchRegister, ScratchRegister);
653 // Begin the main loop.
654 bind(&head);
656 // Extract the bottom bits into lr.
657 ma_and(SecondScratchReg, ScratchRegister, Imm32(mask));
658 // Add those bits to the accumulator.
659 as_addu(dest, dest, SecondScratchReg);
660 // Do a trial subtraction, this is the same operation as cmp, but we
661 // store the dest
662 ma_subu(SecondScratchReg, dest, Imm32(mask));
663 // If (sum - C) > 0, store sum - C back into sum, thus performing a
664 // modulus.
665 ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
666 ma_move(dest, SecondScratchReg);
667 bind(&sumSigned);
668 // Get rid of the bits that we extracted before.
669 as_srl(ScratchRegister, ScratchRegister, shift);
670 // If the shift produced zero, finish, otherwise, continue in the loop.
671 ma_b(ScratchRegister, ScratchRegister, &head, NonZero, ShortJump);
672 // Check the hold to see if we need to negate the result.
673 ma_b(hold, hold, &done, NotSigned, ShortJump);
675 // If the hold was non-zero, negate the result to be in line with
676 // what JS wants
677 if (negZero != nullptr) {
678 // Jump out in case of negative zero.
679 ma_b(hold, hold, negZero, Zero);
680 ma_negu(dest, dest);
681 } else {
682 ma_negu(dest, dest);
683 }
685 bind(&done);
686 }
688 // Memory.
690 void
691 MacroAssemblerMIPS::ma_load(const Register &dest, Address address,
692 LoadStoreSize size, LoadStoreExtension extension)
693 {
694 int16_t encodedOffset;
695 Register base;
696 if (!Imm16::isInSignedRange(address.offset)) {
697 ma_li(ScratchRegister, Imm32(address.offset));
698 as_addu(ScratchRegister, address.base, ScratchRegister);
699 base = ScratchRegister;
700 encodedOffset = Imm16(0).encode();
701 } else {
702 encodedOffset = Imm16(address.offset).encode();
703 base = address.base;
704 }
706 switch (size) {
707 case SizeByte:
708 if (ZeroExtend == extension)
709 as_lbu(dest, base, encodedOffset);
710 else
711 as_lb(dest, base, encodedOffset);
712 break;
713 case SizeHalfWord:
714 if (ZeroExtend == extension)
715 as_lhu(dest, base, encodedOffset);
716 else
717 as_lh(dest, base, encodedOffset);
718 break;
719 case SizeWord:
720 as_lw(dest, base, encodedOffset);
721 break;
722 default:
723 MOZ_ASSUME_UNREACHABLE("Invalid argument for ma_load");
724 break;
725 }
726 }
728 void
729 MacroAssemblerMIPS::ma_load(const Register &dest, const BaseIndex &src,
730 LoadStoreSize size, LoadStoreExtension extension)
731 {
732 computeScaledAddress(src, SecondScratchReg);
733 ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
734 }
736 void
737 MacroAssemblerMIPS::ma_store(const Register &data, Address address, LoadStoreSize size,
738 LoadStoreExtension extension)
739 {
740 int16_t encodedOffset;
741 Register base;
742 if (!Imm16::isInSignedRange(address.offset)) {
743 ma_li(ScratchRegister, Imm32(address.offset));
744 as_addu(ScratchRegister, address.base, ScratchRegister);
745 base = ScratchRegister;
746 encodedOffset = Imm16(0).encode();
747 } else {
748 encodedOffset = Imm16(address.offset).encode();
749 base = address.base;
750 }
752 switch (size) {
753 case SizeByte:
754 as_sb(data, base, encodedOffset);
755 break;
756 case SizeHalfWord:
757 as_sh(data, base, encodedOffset);
758 break;
759 case SizeWord:
760 as_sw(data, base, encodedOffset);
761 break;
762 default:
763 MOZ_ASSUME_UNREACHABLE("Invalid argument for ma_store");
764 break;
765 }
766 }
768 void
769 MacroAssemblerMIPS::ma_store(const Register &data, const BaseIndex &dest,
770 LoadStoreSize size, LoadStoreExtension extension)
771 {
772 computeScaledAddress(dest, SecondScratchReg);
773 ma_store(data, Address(SecondScratchReg, dest.offset), size, extension);
774 }
776 void
777 MacroAssemblerMIPS::ma_store(const Imm32 &imm, const BaseIndex &dest,
778 LoadStoreSize size, LoadStoreExtension extension)
779 {
780 // Make sure that SecondScratchReg contains absolute address so that
781 // offset is 0.
782 computeEffectiveAddress(dest, SecondScratchReg);
784 // Scrach register is free now, use it for loading imm value
785 ma_li(ScratchRegister, imm);
787 // with offset=0 ScratchRegister will not be used in ma_store()
788 // so we can use it as a parameter here
789 ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
790 }
792 void
793 MacroAssemblerMIPS::computeScaledAddress(const BaseIndex &address, Register dest)
794 {
795 int32_t shift = Imm32::ShiftOf(address.scale).value;
796 if (shift) {
797 ma_sll(dest, address.index, Imm32(shift));
798 as_addu(dest, address.base, dest);
799 } else {
800 as_addu(dest, address.base, address.index);
801 }
802 }
804 // Shortcut for when we know we're transferring 32 bits of data.
805 void
806 MacroAssemblerMIPS::ma_lw(Register data, Address address)
807 {
808 ma_load(data, address, SizeWord);
809 }
811 void
812 MacroAssemblerMIPS::ma_sw(Register data, Address address)
813 {
814 ma_store(data, address, SizeWord);
815 }
817 void
818 MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
819 {
820 MOZ_ASSERT(address.base != ScratchRegister);
821 ma_li(ScratchRegister, imm);
823 if (Imm16::isInSignedRange(address.offset)) {
824 as_sw(ScratchRegister, address.base, Imm16(address.offset).encode());
825 } else {
826 MOZ_ASSERT(address.base != SecondScratchReg);
828 ma_li(SecondScratchReg, Imm32(address.offset));
829 as_addu(SecondScratchReg, address.base, SecondScratchReg);
830 as_sw(ScratchRegister, SecondScratchReg, 0);
831 }
832 }
834 void
835 MacroAssemblerMIPS::ma_pop(Register r)
836 {
837 as_lw(r, StackPointer, 0);
838 as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
839 }
841 void
842 MacroAssemblerMIPS::ma_push(Register r)
843 {
844 if (r == sp) {
845 // Pushing sp requires one more instruction.
846 ma_move(ScratchRegister, sp);
847 r = ScratchRegister;
848 }
850 as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
851 as_sw(r, StackPointer, 0);
852 }
854 // Branches when done from within mips-specific code.
855 void
856 MacroAssemblerMIPS::ma_b(Register lhs, Register rhs, Label *label, Condition c, JumpKind jumpKind)
857 {
858 switch (c) {
859 case Equal :
860 case NotEqual:
861 branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
862 break;
863 case Always:
864 ma_b(label, jumpKind);
865 break;
866 case Zero:
867 case NonZero:
868 case Signed:
869 case NotSigned:
870 MOZ_ASSERT(lhs == rhs);
871 branchWithCode(getBranchCode(lhs, c), label, jumpKind);
872 break;
873 default:
874 Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
875 branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
876 break;
877 }
878 }
880 void
881 MacroAssemblerMIPS::ma_b(Register lhs, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
882 {
883 MOZ_ASSERT(c != Overflow);
884 if (imm.value == 0) {
885 if (c == Always || c == AboveOrEqual)
886 ma_b(label, jumpKind);
887 else if (c == Below)
888 ; // This condition is always false. No branch required.
889 else
890 branchWithCode(getBranchCode(lhs, c), label, jumpKind);
891 } else {
892 MOZ_ASSERT(lhs != ScratchRegister);
893 ma_li(ScratchRegister, imm);
894 ma_b(lhs, ScratchRegister, label, c, jumpKind);
895 }
896 }
898 void
899 MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label *label, Condition c, JumpKind jumpKind)
900 {
901 MOZ_ASSERT(lhs != ScratchRegister);
902 ma_lw(ScratchRegister, addr);
903 ma_b(lhs, ScratchRegister, label, c, jumpKind);
904 }
906 void
907 MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
908 {
909 ma_lw(SecondScratchReg, addr);
910 ma_b(SecondScratchReg, imm, label, c, jumpKind);
911 }
913 void
914 MacroAssemblerMIPS::ma_b(Label *label, JumpKind jumpKind)
915 {
916 branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
917 }
919 void
920 MacroAssemblerMIPS::ma_bal(Label *label, JumpKind jumpKind)
921 {
922 branchWithCode(getBranchCode(BranchIsCall), label, jumpKind);
923 }
925 void
926 MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind)
927 {
928 InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
929 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
931 if (label->bound()) {
932 int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
934 if (BOffImm16::isInRange(offset))
935 jumpKind = ShortJump;
937 if (jumpKind == ShortJump) {
938 MOZ_ASSERT(BOffImm16::isInRange(offset));
939 code.setBOffImm16(BOffImm16(offset));
940 writeInst(code.encode());
941 as_nop();
942 return;
943 }
945 // Generate long jump because target is out of range of short jump.
946 if (code.encode() == inst_bgezal.encode()) {
947 // Handle long call
948 addLongJump(nextOffset());
949 ma_liPatchable(ScratchRegister, Imm32(label->offset()));
950 as_jalr(ScratchRegister);
951 as_nop();
952 return;
953 }
954 if (code.encode() == inst_beq.encode()) {
955 // Handle long jump
956 addLongJump(nextOffset());
957 ma_liPatchable(ScratchRegister, Imm32(label->offset()));
958 as_jr(ScratchRegister);
959 as_nop();
960 return;
961 }
963 // Handle long conditional branch
964 writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
965 // No need for a "nop" here because we can clobber scratch.
966 addLongJump(nextOffset());
967 ma_liPatchable(ScratchRegister, Imm32(label->offset()));
968 as_jr(ScratchRegister);
969 as_nop();
970 return;
971 }
973 // Generate open jump and link it to a label.
975 // Second word holds a pointer to the next branch in label's chain.
976 uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
978 if (jumpKind == ShortJump) {
979 // Make the whole branch continous in the buffer.
980 m_buffer.ensureSpace(2 * sizeof(uint32_t));
982 // Indicate that this is short jump with offset 4.
983 code.setBOffImm16(BOffImm16(4));
984 BufferOffset bo = writeInst(code.encode());
985 writeInst(nextInChain);
986 label->use(bo.getOffset());
987 return;
988 }
990 bool conditional = (code.encode() != inst_bgezal.encode() &&
991 code.encode() != inst_beq.encode());
993 // Make the whole branch continous in the buffer.
994 m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
996 BufferOffset bo = writeInst(code.encode());
997 writeInst(nextInChain);
998 label->use(bo.getOffset());
999 // Leave space for potential long jump.
1000 as_nop();
1001 as_nop();
1002 if (conditional)
1003 as_nop();
1004 }
1006 Assembler::Condition
1007 MacroAssemblerMIPS::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
1008 {
1009 switch (c) {
1010 case Above:
1011 // bgtu s,t,label =>
1012 // sltu at,t,s
1013 // bne at,$zero,offs
1014 as_sltu(scratch, rhs, lhs);
1015 return NotEqual;
1016 case AboveOrEqual:
1017 // bgeu s,t,label =>
1018 // sltu at,s,t
1019 // beq at,$zero,offs
1020 as_sltu(scratch, lhs, rhs);
1021 return Equal;
1022 case Below:
1023 // bltu s,t,label =>
1024 // sltu at,s,t
1025 // bne at,$zero,offs
1026 as_sltu(scratch, lhs, rhs);
1027 return NotEqual;
1028 case BelowOrEqual:
1029 // bleu s,t,label =>
1030 // sltu at,t,s
1031 // beq at,$zero,offs
1032 as_sltu(scratch, rhs, lhs);
1033 return Equal;
1034 case GreaterThan:
1035 // bgt s,t,label =>
1036 // slt at,t,s
1037 // bne at,$zero,offs
1038 as_slt(scratch, rhs, lhs);
1039 return NotEqual;
1040 case GreaterThanOrEqual:
1041 // bge s,t,label =>
1042 // slt at,s,t
1043 // beq at,$zero,offs
1044 as_slt(scratch, lhs, rhs);
1045 return Equal;
1046 case LessThan:
1047 // blt s,t,label =>
1048 // slt at,s,t
1049 // bne at,$zero,offs
1050 as_slt(scratch, lhs, rhs);
1051 return NotEqual;
1052 case LessThanOrEqual:
1053 // ble s,t,label =>
1054 // slt at,t,s
1055 // beq at,$zero,offs
1056 as_slt(scratch, rhs, lhs);
1057 return Equal;
1058 case Equal :
1059 case NotEqual:
1060 case Zero:
1061 case NonZero:
1062 case Always:
1063 case Signed:
1064 case NotSigned:
1065 MOZ_ASSUME_UNREACHABLE("There is a better way to compare for equality.");
1066 break;
1067 case Overflow:
1068 MOZ_ASSUME_UNREACHABLE("Overflow condition not supported for MIPS.");
1069 break;
1070 default:
1071 MOZ_ASSUME_UNREACHABLE("Invalid condition for branch.");
1072 }
1073 return Always;
1074 }
1076 void
1077 MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Register rt, Condition c)
1078 {
1079 switch (c) {
1080 case Equal :
1081 // seq d,s,t =>
1082 // xor d,s,t
1083 // sltiu d,d,1
1084 as_xor(rd, rs, rt);
1085 as_sltiu(rd, rd, 1);
1086 break;
1087 case NotEqual:
1088 // sne d,s,t =>
1089 // xor d,s,t
1090 // sltu d,$zero,d
1091 as_xor(rd, rs, rt);
1092 as_sltu(rd, zero, rd);
1093 break;
1094 case Above:
1095 // sgtu d,s,t =>
1096 // sltu d,t,s
1097 as_sltu(rd, rt, rs);
1098 break;
1099 case AboveOrEqual:
1100 // sgeu d,s,t =>
1101 // sltu d,s,t
1102 // xori d,d,1
1103 as_sltu(rd, rs, rt);
1104 as_xori(rd, rd, 1);
1105 break;
1106 case Below:
1107 // sltu d,s,t
1108 as_sltu(rd, rs, rt);
1109 break;
1110 case BelowOrEqual:
1111 // sleu d,s,t =>
1112 // sltu d,t,s
1113 // xori d,d,1
1114 as_sltu(rd, rt, rs);
1115 as_xori(rd, rd, 1);
1116 break;
1117 case GreaterThan:
1118 // sgt d,s,t =>
1119 // slt d,t,s
1120 as_slt(rd, rt, rs);
1121 break;
1122 case GreaterThanOrEqual:
1123 // sge d,s,t =>
1124 // slt d,s,t
1125 // xori d,d,1
1126 as_slt(rd, rs, rt);
1127 as_xori(rd, rd, 1);
1128 break;
1129 case LessThan:
1130 // slt d,s,t
1131 as_slt(rd, rs, rt);
1132 break;
1133 case LessThanOrEqual:
1134 // sle d,s,t =>
1135 // slt d,t,s
1136 // xori d,d,1
1137 as_slt(rd, rt, rs);
1138 as_xori(rd, rd, 1);
1139 break;
1140 case Zero:
1141 MOZ_ASSERT(rs == rt);
1142 // seq d,s,$zero =>
1143 // xor d,s,$zero
1144 // sltiu d,d,1
1145 as_xor(rd, rs, zero);
1146 as_sltiu(rd, rd, 1);
1147 break;
1148 case NonZero:
1149 // sne d,s,$zero =>
1150 // xor d,s,$zero
1151 // sltu d,$zero,d
1152 as_xor(rd, rs, zero);
1153 as_sltu(rd, zero, rd);
1154 break;
1155 case Signed:
1156 as_slt(rd, rs, zero);
1157 break;
1158 case NotSigned:
1159 // sge d,s,$zero =>
1160 // slt d,s,$zero
1161 // xori d,d,1
1162 as_slt(rd, rs, zero);
1163 as_xori(rd, rd, 1);
1164 break;
1165 default:
1166 MOZ_ASSUME_UNREACHABLE("Invalid condition for ma_cmp_set.");
1167 break;
1168 }
1169 }
1171 void
1172 MacroAssemblerMIPS::compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
1173 DoubleCondition c, FloatTestKind *testKind,
1174 FPConditionBit fcc)
1175 {
1176 switch (c) {
1177 case DoubleOrdered:
1178 as_cun(fmt, lhs, rhs, fcc);
1179 *testKind = TestForFalse;
1180 break;
1181 case DoubleEqual:
1182 as_ceq(fmt, lhs, rhs, fcc);
1183 *testKind = TestForTrue;
1184 break;
1185 case DoubleNotEqual:
1186 as_cueq(fmt, lhs, rhs, fcc);
1187 *testKind = TestForFalse;
1188 break;
1189 case DoubleGreaterThan:
1190 as_colt(fmt, rhs, lhs, fcc);
1191 *testKind = TestForTrue;
1192 break;
1193 case DoubleGreaterThanOrEqual:
1194 as_cole(fmt, rhs, lhs, fcc);
1195 *testKind = TestForTrue;
1196 break;
1197 case DoubleLessThan:
1198 as_colt(fmt, lhs, rhs, fcc);
1199 *testKind = TestForTrue;
1200 break;
1201 case DoubleLessThanOrEqual:
1202 as_cole(fmt, lhs, rhs, fcc);
1203 *testKind = TestForTrue;
1204 break;
1205 case DoubleUnordered:
1206 as_cun(fmt, lhs, rhs, fcc);
1207 *testKind = TestForTrue;
1208 break;
1209 case DoubleEqualOrUnordered:
1210 as_cueq(fmt, lhs, rhs, fcc);
1211 *testKind = TestForTrue;
1212 break;
1213 case DoubleNotEqualOrUnordered:
1214 as_ceq(fmt, lhs, rhs, fcc);
1215 *testKind = TestForFalse;
1216 break;
1217 case DoubleGreaterThanOrUnordered:
1218 as_cult(fmt, rhs, lhs, fcc);
1219 *testKind = TestForTrue;
1220 break;
1221 case DoubleGreaterThanOrEqualOrUnordered:
1222 as_cule(fmt, rhs, lhs, fcc);
1223 *testKind = TestForTrue;
1224 break;
1225 case DoubleLessThanOrUnordered:
1226 as_cult(fmt, lhs, rhs, fcc);
1227 *testKind = TestForTrue;
1228 break;
1229 case DoubleLessThanOrEqualOrUnordered:
1230 as_cule(fmt, lhs, rhs, fcc);
1231 *testKind = TestForTrue;
1232 break;
1233 default:
1234 MOZ_ASSUME_UNREACHABLE("Invalid DoubleCondition.");
1235 break;
1236 }
1237 }
1239 void
1240 MacroAssemblerMIPS::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
1241 DoubleCondition c)
1242 {
1243 ma_li(dest, Imm32(0));
1244 ma_li(ScratchRegister, Imm32(1));
1246 FloatTestKind moveCondition;
1247 compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
1249 if (moveCondition == TestForTrue)
1250 as_movt(dest, ScratchRegister);
1251 else
1252 as_movf(dest, ScratchRegister);
1253 }
1255 void
1256 MacroAssemblerMIPS::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
1257 DoubleCondition c)
1258 {
1259 ma_li(dest, Imm32(0));
1260 ma_li(ScratchRegister, Imm32(1));
1262 FloatTestKind moveCondition;
1263 compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
1265 if (moveCondition == TestForTrue)
1266 as_movt(dest, ScratchRegister);
1267 else
1268 as_movf(dest, ScratchRegister);
1269 }
1271 void
1272 MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
1273 {
1274 ma_li(ScratchRegister, imm);
1275 ma_cmp_set(rd, rs, ScratchRegister, c);
1276 }
1278 void
1279 MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
1280 {
1281 ma_lw(ScratchRegister, addr);
1282 ma_cmp_set(rd, rs, ScratchRegister, c);
1283 }
1285 void
1286 MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
1287 {
1288 ma_lw(ScratchRegister, lhs);
1289 ma_cmp_set(dst, ScratchRegister, rhs, c);
1290 }
1292 // fp instructions
1293 void
1294 MacroAssemblerMIPS::ma_lis(FloatRegister dest, float value)
1295 {
1296 Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
1298 ma_li(ScratchRegister, imm);
1299 moveToFloat32(ScratchRegister, dest);
1300 }
1302 void
1303 MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value)
1304 {
1305 struct DoubleStruct {
1306 uint32_t lo;
1307 uint32_t hi;
1308 } ;
1309 DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
1311 // put hi part of 64 bit value into the odd register
1312 if (intStruct.hi == 0) {
1313 moveToDoubleHi(zero, dest);
1314 } else {
1315 ma_li(ScratchRegister, Imm32(intStruct.hi));
1316 moveToDoubleHi(ScratchRegister, dest);
1317 }
1319 // put low part of 64 bit value into the even register
1320 if (intStruct.lo == 0) {
1321 moveToDoubleLo(zero, dest);
1322 } else {
1323 ma_li(ScratchRegister, Imm32(intStruct.lo));
1324 moveToDoubleLo(ScratchRegister, dest);
1325 }
1326 }
1328 void
1329 MacroAssemblerMIPS::ma_liNegZero(FloatRegister dest)
1330 {
1331 moveToDoubleLo(zero, dest);
1332 ma_li(ScratchRegister, Imm32(INT_MIN));
1333 moveToDoubleHi(ScratchRegister, dest);
1334 }
1336 void
1337 MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest)
1338 {
1339 moveFromDoubleLo(src, dest.payloadReg());
1340 moveFromDoubleHi(src, dest.typeReg());
1341 }
1343 void
1344 MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
1345 {
1346 moveToDoubleLo(src.payloadReg(), dest);
1347 moveToDoubleHi(src.typeReg(), dest);
1348 }
1350 void
1351 MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
1352 {
1353 if (Imm16::isInSignedRange(address.offset)) {
1354 as_ls(ft, address.base, Imm16(address.offset).encode());
1355 } else {
1356 MOZ_ASSERT(address.base != ScratchRegister);
1357 ma_li(ScratchRegister, Imm32(address.offset));
1358 as_addu(ScratchRegister, address.base, ScratchRegister);
1359 as_ls(ft, ScratchRegister, 0);
1360 }
1361 }
1363 void
1364 MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
1365 {
1366 // Use single precision load instructions so we don't have to worry about
1367 // alignment.
1369 int32_t off2 = address.offset + TAG_OFFSET;
1370 if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
1371 as_ls(ft, address.base, Imm16(address.offset).encode());
1372 as_ls(getOddPair(ft), address.base, Imm16(off2).encode());
1373 } else {
1374 ma_li(ScratchRegister, Imm32(address.offset));
1375 as_addu(ScratchRegister, address.base, ScratchRegister);
1376 as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
1377 as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
1378 }
1379 }
1381 void
1382 MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
1383 {
1384 int32_t off2 = address.offset + TAG_OFFSET;
1385 if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
1386 as_ss(ft, address.base, Imm16(address.offset).encode());
1387 as_ss(getOddPair(ft), address.base, Imm16(off2).encode());
1388 } else {
1389 ma_li(ScratchRegister, Imm32(address.offset));
1390 as_addu(ScratchRegister, address.base, ScratchRegister);
1391 as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
1392 as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
1393 }
1394 }
1396 void
1397 MacroAssemblerMIPS::ma_sd(FloatRegister ft, BaseIndex address)
1398 {
1399 computeScaledAddress(address, SecondScratchReg);
1400 ma_sd(ft, Address(SecondScratchReg, address.offset));
1401 }
1403 void
1404 MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
1405 {
1406 if (Imm16::isInSignedRange(address.offset)) {
1407 as_ss(ft, address.base, Imm16(address.offset).encode());
1408 } else {
1409 ma_li(ScratchRegister, Imm32(address.offset));
1410 as_addu(ScratchRegister, address.base, ScratchRegister);
1411 as_ss(ft, ScratchRegister, 0);
1412 }
1413 }
1415 void
1416 MacroAssemblerMIPS::ma_ss(FloatRegister ft, BaseIndex address)
1417 {
1418 computeScaledAddress(address, SecondScratchReg);
1419 ma_ss(ft, Address(SecondScratchReg, address.offset));
1420 }
1422 void
1423 MacroAssemblerMIPS::ma_pop(FloatRegister fs)
1424 {
1425 ma_ld(fs, Address(StackPointer, 0));
1426 as_addiu(StackPointer, StackPointer, sizeof(double));
1427 }
1429 void
1430 MacroAssemblerMIPS::ma_push(FloatRegister fs)
1431 {
1432 as_addiu(StackPointer, StackPointer, -sizeof(double));
1433 ma_sd(fs, Address(StackPointer, 0));
1434 }
1436 void
1437 MacroAssemblerMIPS::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label *label,
1438 DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
1439 {
1440 FloatTestKind testKind;
1441 compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
1442 branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
1443 }
1445 void
1446 MacroAssemblerMIPS::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label *label,
1447 DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
1448 {
1449 FloatTestKind testKind;
1450 compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
1451 branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
1452 }
1454 bool
1455 MacroAssemblerMIPSCompat::buildFakeExitFrame(const Register &scratch, uint32_t *offset)
1456 {
1457 mozilla::DebugOnly<uint32_t> initialDepth = framePushed();
1459 CodeLabel cl;
1460 ma_li(scratch, cl.dest());
1462 uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
1463 Push(Imm32(descriptor));
1464 Push(scratch);
1466 bind(cl.src());
1467 *offset = currentOffset();
1469 MOZ_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size());
1470 return addCodeLabel(cl);
1471 }
1473 bool
1474 MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void *fakeReturnAddr)
1475 {
1476 DebugOnly<uint32_t> initialDepth = framePushed();
1477 uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
1479 Push(Imm32(descriptor)); // descriptor_
1480 Push(ImmPtr(fakeReturnAddr));
1482 return true;
1483 }
1485 void
1486 MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target)
1487 {
1488 uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
1489 Push(Imm32(descriptor)); // descriptor
1491 addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
1492 ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
1493 ma_callIonHalfPush(ScratchRegister);
1494 }
1496 void
1497 MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target, Register dynStack)
1498 {
1499 ma_addu(dynStack, dynStack, Imm32(framePushed()));
1500 makeFrameDescriptor(dynStack, JitFrame_IonJS);
1501 Push(dynStack); // descriptor
1503 addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
1504 ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
1505 ma_callIonHalfPush(ScratchRegister);
1506 }
1508 void
1509 MacroAssemblerMIPSCompat::callIon(const Register &callee)
1510 {
1511 MOZ_ASSERT((framePushed() & 3) == 0);
1512 if ((framePushed() & 7) == 4) {
1513 ma_callIonHalfPush(callee);
1514 } else {
1515 adjustFrame(sizeof(uint32_t));
1516 ma_callIon(callee);
1517 }
1518 }
1520 void
1521 MacroAssemblerMIPSCompat::reserveStack(uint32_t amount)
1522 {
1523 if (amount)
1524 ma_subu(StackPointer, StackPointer, Imm32(amount));
1525 adjustFrame(amount);
1526 }
1528 void
1529 MacroAssemblerMIPSCompat::freeStack(uint32_t amount)
1530 {
1531 MOZ_ASSERT(amount <= framePushed_);
1532 if (amount)
1533 ma_addu(StackPointer, StackPointer, Imm32(amount));
1534 adjustFrame(-amount);
1535 }
1537 void
1538 MacroAssemblerMIPSCompat::freeStack(Register amount)
1539 {
1540 as_addu(StackPointer, StackPointer, amount);
1541 }
1543 void
1544 MacroAssembler::PushRegsInMask(RegisterSet set)
1545 {
1546 int32_t diffF = set.fpus().size() * sizeof(double);
1547 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1549 reserveStack(diffG);
1550 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
1551 diffG -= sizeof(intptr_t);
1552 storePtr(*iter, Address(StackPointer, diffG));
1553 }
1554 MOZ_ASSERT(diffG == 0);
1556 // Double values have to be aligned. We reserve extra space so that we can
1557 // start writing from the first aligned location.
1558 // We reserve a whole extra double so that the buffer has even size.
1559 ma_and(SecondScratchReg, sp, Imm32(~(StackAlignment - 1)));
1560 reserveStack(diffF + sizeof(double));
1562 for (FloatRegisterForwardIterator iter(set.fpus()); iter.more(); iter++) {
1563 // Use assembly s.d because we have alligned the stack.
1564 // :TODO: (Bug 972836) Fix this once odd regs can be used as
1565 // float32 only. For now we skip saving odd regs for O32 ABI.
1567 // :TODO: (Bug 985881) Make a switch for N32 ABI.
1568 if ((*iter).code() % 2 == 0)
1569 as_sd(*iter, SecondScratchReg, -diffF);
1570 diffF -= sizeof(double);
1571 }
1572 MOZ_ASSERT(diffF == 0);
1573 }
1575 void
1576 MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore)
1577 {
1578 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
1579 int32_t diffF = set.fpus().size() * sizeof(double);
1580 const int32_t reservedG = diffG;
1581 const int32_t reservedF = diffF;
1583 // Read the buffer form the first aligned location.
1584 ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
1585 ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(StackAlignment - 1)));
1587 for (FloatRegisterForwardIterator iter(set.fpus()); iter.more(); iter++) {
1588 // :TODO: (Bug 972836) Fix this once odd regs can be used as
1589 // float32 only. For now we skip loading odd regs for O32 ABI.
1591 // :TODO: (Bug 985881) Make a switch for N32 ABI.
1592 if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
1593 // Use assembly l.d because we have alligned the stack.
1594 as_ld(*iter, SecondScratchReg, -diffF);
1595 diffF -= sizeof(double);
1596 }
1597 freeStack(reservedF + sizeof(double));
1598 MOZ_ASSERT(diffF == 0);
1600 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
1601 diffG -= sizeof(intptr_t);
1602 if (!ignore.has(*iter))
1603 loadPtr(Address(StackPointer, diffG), *iter);
1604 }
1605 freeStack(reservedG);
1606 MOZ_ASSERT(diffG == 0);
1607 }
1609 void
1610 MacroAssemblerMIPSCompat::add32(Register src, Register dest)
1611 {
1612 as_addu(dest, dest, src);
1613 }
1615 void
1616 MacroAssemblerMIPSCompat::add32(Imm32 imm, Register dest)
1617 {
1618 ma_addu(dest, dest, imm);
1619 }
1621 void
1623 MacroAssemblerMIPSCompat::add32(Imm32 imm, const Address &dest)
1624 {
1625 load32(dest, SecondScratchReg);
1626 ma_addu(SecondScratchReg, imm);
1627 store32(SecondScratchReg, dest);
1628 }
1630 void
1631 MacroAssemblerMIPSCompat::sub32(Imm32 imm, Register dest)
1632 {
1633 ma_subu(dest, dest, imm);
1634 }
1636 void
1637 MacroAssemblerMIPSCompat::sub32(Register src, Register dest)
1638 {
1639 ma_subu(dest, dest, src);
1640 }
1642 void
1643 MacroAssemblerMIPSCompat::addPtr(Register src, Register dest)
1644 {
1645 ma_addu(dest, src);
1646 }
1648 void
1649 MacroAssemblerMIPSCompat::addPtr(const Address &src, Register dest)
1650 {
1651 loadPtr(src, ScratchRegister);
1652 ma_addu(dest, ScratchRegister);
1653 }
1655 void
1656 MacroAssemblerMIPSCompat::subPtr(Register src, Register dest)
1657 {
1658 ma_subu(dest, dest, src);
1659 }
1661 void
1662 MacroAssemblerMIPSCompat::not32(Register reg)
1663 {
1664 ma_not(reg, reg);
1665 }
1667 // Logical operations
1668 void
1669 MacroAssemblerMIPSCompat::and32(Imm32 imm, Register dest)
1670 {
1671 ma_and(dest, imm);
1672 }
1674 void
1675 MacroAssemblerMIPSCompat::and32(Imm32 imm, const Address &dest)
1676 {
1677 load32(dest, SecondScratchReg);
1678 ma_and(SecondScratchReg, imm);
1679 store32(SecondScratchReg, dest);
1680 }
1682 void
1683 MacroAssemblerMIPSCompat::or32(Imm32 imm, const Address &dest)
1684 {
1685 load32(dest, SecondScratchReg);
1686 ma_or(SecondScratchReg, imm);
1687 store32(SecondScratchReg, dest);
1688 }
1690 void
1691 MacroAssemblerMIPSCompat::xor32(Imm32 imm, Register dest)
1692 {
1693 ma_xor(dest, imm);
1694 }
1696 void
1697 MacroAssemblerMIPSCompat::xorPtr(Imm32 imm, Register dest)
1698 {
1699 ma_xor(dest, imm);
1700 }
1702 void
1703 MacroAssemblerMIPSCompat::xorPtr(Register src, Register dest)
1704 {
1705 ma_xor(dest, src);
1706 }
1708 void
1709 MacroAssemblerMIPSCompat::orPtr(Imm32 imm, Register dest)
1710 {
1711 ma_or(dest, imm);
1712 }
1714 void
1715 MacroAssemblerMIPSCompat::orPtr(Register src, Register dest)
1716 {
1717 ma_or(dest, src);
1718 }
1720 void
1721 MacroAssemblerMIPSCompat::andPtr(Imm32 imm, Register dest)
1722 {
1723 ma_and(dest, imm);
1724 }
1726 void
1727 MacroAssemblerMIPSCompat::andPtr(Register src, Register dest)
1728 {
1729 ma_and(dest, src);
1730 }
1732 void
1733 MacroAssemblerMIPSCompat::move32(const Imm32 &imm, const Register &dest)
1734 {
1735 ma_li(dest, imm);
1736 }
1738 void
1739 MacroAssemblerMIPSCompat::move32(const Register &src, const Register &dest)
1740 {
1741 ma_move(dest, src);
1742 }
1744 void
1745 MacroAssemblerMIPSCompat::movePtr(const Register &src, const Register &dest)
1746 {
1747 ma_move(dest, src);
1748 }
1749 void
1750 MacroAssemblerMIPSCompat::movePtr(const ImmWord &imm, const Register &dest)
1751 {
1752 ma_li(dest, Imm32(imm.value));
1753 }
1755 void
1756 MacroAssemblerMIPSCompat::movePtr(const ImmGCPtr &imm, const Register &dest)
1757 {
1758 ma_li(dest, imm);
1759 }
1760 void
1761 MacroAssemblerMIPSCompat::movePtr(const ImmPtr &imm, const Register &dest)
1762 {
1763 movePtr(ImmWord(uintptr_t(imm.value)), dest);
1764 }
1765 void
1766 MacroAssemblerMIPSCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest)
1767 {
1768 MOZ_ASSUME_UNREACHABLE("NYI");
1769 }
1771 void
1772 MacroAssemblerMIPSCompat::load8ZeroExtend(const Address &address, const Register &dest)
1773 {
1774 ma_load(dest, address, SizeByte, ZeroExtend);
1775 }
1777 void
1778 MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest)
1779 {
1780 ma_load(dest, src, SizeByte, ZeroExtend);
1781 }
1783 void
1784 MacroAssemblerMIPSCompat::load8SignExtend(const Address &address, const Register &dest)
1785 {
1786 ma_load(dest, address, SizeByte, SignExtend);
1787 }
1789 void
1790 MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex &src, const Register &dest)
1791 {
1792 ma_load(dest, src, SizeByte, SignExtend);
1793 }
1795 void
1796 MacroAssemblerMIPSCompat::load16ZeroExtend(const Address &address, const Register &dest)
1797 {
1798 ma_load(dest, address, SizeHalfWord, ZeroExtend);
1799 }
1801 void
1802 MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest)
1803 {
1804 ma_load(dest, src, SizeHalfWord, ZeroExtend);
1805 }
1807 void
1808 MacroAssemblerMIPSCompat::load16SignExtend(const Address &address, const Register &dest)
1809 {
1810 ma_load(dest, address, SizeHalfWord, SignExtend);
1811 }
1813 void
1814 MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex &src, const Register &dest)
1815 {
1816 ma_load(dest, src, SizeHalfWord, SignExtend);
1817 }
1819 void
1820 MacroAssemblerMIPSCompat::load32(const Address &address, const Register &dest)
1821 {
1822 ma_lw(dest, address);
1823 }
1825 void
1826 MacroAssemblerMIPSCompat::load32(const BaseIndex &address, const Register &dest)
1827 {
1828 ma_load(dest, address, SizeWord);
1829 }
1831 void
1832 MacroAssemblerMIPSCompat::load32(const AbsoluteAddress &address, const Register &dest)
1833 {
1834 ma_li(ScratchRegister, Imm32((uint32_t)address.addr));
1835 as_lw(dest, ScratchRegister, 0);
1836 }
1838 void
1839 MacroAssemblerMIPSCompat::loadPtr(const Address &address, const Register &dest)
1840 {
1841 ma_lw(dest, address);
1842 }
1844 void
1845 MacroAssemblerMIPSCompat::loadPtr(const BaseIndex &src, const Register &dest)
1846 {
1847 load32(src, dest);
1848 }
1850 void
1851 MacroAssemblerMIPSCompat::loadPtr(const AbsoluteAddress &address, const Register &dest)
1852 {
1853 ma_li(ScratchRegister, Imm32((uint32_t)address.addr));
1854 as_lw(dest, ScratchRegister, 0);
1855 }
1856 void
1857 MacroAssemblerMIPSCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest)
1858 {
1859 movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
1860 loadPtr(Address(ScratchRegister, 0x0), dest);
1861 }
1863 void
1864 MacroAssemblerMIPSCompat::loadPrivate(const Address &address, const Register &dest)
1865 {
1866 ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
1867 }
1869 void
1870 MacroAssemblerMIPSCompat::loadDouble(const Address &address, const FloatRegister &dest)
1871 {
1872 ma_ld(dest, address);
1873 }
1875 void
1876 MacroAssemblerMIPSCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest)
1877 {
1878 computeScaledAddress(src, SecondScratchReg);
1879 ma_ld(dest, Address(SecondScratchReg, src.offset));
1880 }
1882 void
1883 MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest)
1884 {
1885 ma_ls(dest, address);
1886 as_cvtds(dest, dest);
1887 }
1889 void
1890 MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest)
1891 {
1892 loadFloat32(src, dest);
1893 as_cvtds(dest, dest);
1894 }
1896 void
1897 MacroAssemblerMIPSCompat::loadFloat32(const Address &address, const FloatRegister &dest)
1898 {
1899 ma_ls(dest, address);
1900 }
1902 void
1903 MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest)
1904 {
1905 computeScaledAddress(src, SecondScratchReg);
1906 ma_ls(dest, Address(SecondScratchReg, src.offset));
1907 }
1909 void
1910 MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const Address &address)
1911 {
1912 ma_li(SecondScratchReg, imm);
1913 ma_store(SecondScratchReg, address, SizeByte);
1914 }
1916 void
1917 MacroAssemblerMIPSCompat::store8(const Register &src, const Address &address)
1918 {
1919 ma_store(src, address, SizeByte);
1920 }
1922 void
1923 MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const BaseIndex &dest)
1924 {
1925 ma_store(imm, dest, SizeByte);
1926 }
1928 void
1929 MacroAssemblerMIPSCompat::store8(const Register &src, const BaseIndex &dest)
1930 {
1931 ma_store(src, dest, SizeByte);
1932 }
1934 void
1935 MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const Address &address)
1936 {
1937 ma_li(SecondScratchReg, imm);
1938 ma_store(SecondScratchReg, address, SizeHalfWord);
1939 }
1941 void
1942 MacroAssemblerMIPSCompat::store16(const Register &src, const Address &address)
1943 {
1944 ma_store(src, address, SizeHalfWord);
1945 }
1947 void
1948 MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const BaseIndex &dest)
1949 {
1950 ma_store(imm, dest, SizeHalfWord);
1951 }
1953 void
1954 MacroAssemblerMIPSCompat::store16(const Register &src, const BaseIndex &address)
1955 {
1956 ma_store(src, address, SizeHalfWord);
1957 }
1959 void
1960 MacroAssemblerMIPSCompat::store32(const Register &src, const AbsoluteAddress &address)
1961 {
1962 storePtr(src, address);
1963 }
1965 void
1966 MacroAssemblerMIPSCompat::store32(const Register &src, const Address &address)
1967 {
1968 storePtr(src, address);
1969 }
1971 void
1972 MacroAssemblerMIPSCompat::store32(const Imm32 &src, const Address &address)
1973 {
1974 move32(src, ScratchRegister);
1975 storePtr(ScratchRegister, address);
1976 }
1978 void
1979 MacroAssemblerMIPSCompat::store32(const Imm32 &imm, const BaseIndex &dest)
1980 {
1981 ma_store(imm, dest, SizeWord);
1982 }
1984 void
1985 MacroAssemblerMIPSCompat::store32(const Register &src, const BaseIndex &dest)
1986 {
1987 ma_store(src, dest, SizeWord);
1988 }
1990 void
1991 MacroAssemblerMIPSCompat::storePtr(ImmWord imm, const Address &address)
1992 {
1993 ma_li(ScratchRegister, Imm32(imm.value));
1994 ma_sw(ScratchRegister, address);
1995 }
1997 void
1998 MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, const Address &address)
1999 {
2000 storePtr(ImmWord(uintptr_t(imm.value)), address);
2001 }
2003 void
2004 MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, const Address &address)
2005 {
2006 ma_li(ScratchRegister, imm);
2007 ma_sw(ScratchRegister, address);
2008 }
2010 void
2011 MacroAssemblerMIPSCompat::storePtr(Register src, const Address &address)
2012 {
2013 ma_sw(src, address);
2014 }
2016 void
2017 MacroAssemblerMIPSCompat::storePtr(const Register &src, const AbsoluteAddress &dest)
2018 {
2019 ma_li(ScratchRegister, Imm32((uint32_t)dest.addr));
2020 as_sw(src, ScratchRegister, 0);
2021 }
2023 void
2024 MacroAssemblerMIPSCompat::subPtr(Imm32 imm, const Register dest)
2025 {
2026 ma_subu(dest, dest, imm);
2027 }
2029 void
2030 MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Register dest)
2031 {
2032 ma_addu(dest, imm);
2033 }
2035 void
2036 MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Address &dest)
2037 {
2038 loadPtr(dest, ScratchRegister);
2039 addPtr(imm, ScratchRegister);
2040 storePtr(ScratchRegister, dest);
2041 }
2043 void
2044 MacroAssemblerMIPSCompat::branchDouble(DoubleCondition cond, const FloatRegister &lhs,
2045 const FloatRegister &rhs, Label *label)
2046 {
2047 ma_bc1d(lhs, rhs, label, cond);
2048 }
2050 void
2051 MacroAssemblerMIPSCompat::branchFloat(DoubleCondition cond, const FloatRegister &lhs,
2052 const FloatRegister &rhs, Label *label)
2053 {
2054 ma_bc1s(lhs, rhs, label, cond);
2055 }
2057 // higher level tag testing code
2058 Operand
2059 ToPayload(Operand base)
2060 {
2061 return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
2062 }
2064 Operand
2065 ToType(Operand base)
2066 {
2067 return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
2068 }
2070 void
2071 MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const Address &address, Label *label)
2072 {
2073 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2074 extractTag(address, SecondScratchReg);
2075 ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
2076 (cond == Equal) ? AboveOrEqual : Below);
2077 }
2078 void
2079 MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const BaseIndex &src, Label *label)
2080 {
2081 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2082 extractTag(src, SecondScratchReg);
2083 ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
2084 (cond == Equal) ? AboveOrEqual : Below);
2085 }
2087 void
2088 MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const ValueOperand &value,
2089 Label *label)
2090 {
2091 branchTestPrimitive(cond, value.typeReg(), label);
2092 }
2093 void
2094 MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const Register &tag, Label *label)
2095 {
2096 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2097 ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
2098 (cond == Equal) ? Below : AboveOrEqual);
2099 }
2101 void
2102 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const ValueOperand &value, Label *label)
2103 {
2104 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2105 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_INT32), label, cond);
2106 }
2108 void
2109 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Register &tag, Label *label)
2110 {
2111 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2112 ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
2113 }
2115 void
2116 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Address &address, Label *label)
2117 {
2118 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2119 extractTag(address, SecondScratchReg);
2120 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
2121 }
2123 void
2124 MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const BaseIndex &src, Label *label)
2125 {
2126 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2127 extractTag(src, SecondScratchReg);
2128 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
2129 }
2131 void
2132 MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const ValueOperand &value,
2133 Label *label)
2134 {
2135 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2136 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
2137 }
2139 void
2140 MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const Register &tag, Label *label)
2141 {
2142 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2143 ma_b(tag, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
2144 }
2146 void
2147 MacroAssemblerMIPSCompat::branchTestBoolean(Condition cond, const BaseIndex &src, Label *label)
2148 {
2149 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2150 extractTag(src, SecondScratchReg);
2151 ma_b(SecondScratchReg, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
2152 }
2154 void
2155 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const ValueOperand &value, Label *label)
2156 {
2157 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2158 Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
2159 ma_b(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), label, actual);
2160 }
2162 void
2163 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Register &tag, Label *label)
2164 {
2165 MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
2166 Condition actual = (cond == Equal) ? Below : AboveOrEqual;
2167 ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
2168 }
2170 void
2171 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Address &address, Label *label)
2172 {
2173 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2174 extractTag(address, SecondScratchReg);
2175 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, cond);
2176 }
2178 void
2179 MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const BaseIndex &src, Label *label)
2180 {
2181 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2182 Condition actual = (cond == Equal) ? Below : AboveOrEqual;
2183 extractTag(src, SecondScratchReg);
2184 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_CLEAR), label, actual);
2185 }
2187 void
2188 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const ValueOperand &value, Label *label)
2189 {
2190 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2191 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_NULL), label, cond);
2192 }
2194 void
2195 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const Register &tag, Label *label)
2196 {
2197 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2198 ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
2199 }
2201 void
2202 MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const BaseIndex &src, Label *label)
2203 {
2204 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2205 extractTag(src, SecondScratchReg);
2206 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
2207 }
2210 void
2211 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const ValueOperand &value, Label *label)
2212 {
2213 branchTestObject(cond, value.typeReg(), label);
2214 }
2216 void
2217 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const Register &tag, Label *label)
2218 {
2219 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2220 ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
2221 }
2223 void
2224 MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const BaseIndex &src, Label *label)
2225 {
2226 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2227 extractTag(src, SecondScratchReg);
2228 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
2229 }
2232 void
2233 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const ValueOperand &value, Label *label)
2234 {
2235 branchTestString(cond, value.typeReg(), label);
2236 }
2238 void
2239 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const Register &tag, Label *label)
2240 {
2241 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2242 ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
2243 }
2245 void
2246 MacroAssemblerMIPSCompat::branchTestString(Condition cond, const BaseIndex &src, Label *label)
2247 {
2248 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2249 extractTag(src, SecondScratchReg);
2250 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_STRING), label, cond);
2251 }
2253 void
2254 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const ValueOperand &value,
2255 Label *label)
2256 {
2257 MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
2258 ma_b(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), label, cond);
2259 }
2261 void
2262 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Register &tag, Label *label)
2263 {
2264 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2265 ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
2266 }
2268 void
2269 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const BaseIndex &src, Label *label)
2270 {
2271 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2272 extractTag(src, SecondScratchReg);
2273 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
2274 }
2276 void
2277 MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Address &address, Label *label)
2278 {
2279 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2280 extractTag(address, SecondScratchReg);
2281 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
2282 }
2285 void
2286 MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const ValueOperand &value, Label *label)
2287 {
2288 branchTestNumber(cond, value.typeReg(), label);
2289 }
2291 void
2292 MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const Register &tag, Label *label)
2293 {
2294 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2295 ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label,
2296 cond == Equal ? BelowOrEqual : Above);
2297 }
2299 void
2300 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const ValueOperand &value, Label *label)
2301 {
2302 branchTestMagic(cond, value.typeReg(), label);
2303 }
2305 void
2306 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Register &tag, Label *label)
2307 {
2308 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2309 ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
2310 }
2312 void
2313 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Address &address, Label *label)
2314 {
2315 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2316 extractTag(address, SecondScratchReg);
2317 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
2318 }
2320 void
2321 MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const BaseIndex &src, Label *label)
2322 {
2323 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2324 extractTag(src, SecondScratchReg);
2325 ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
2326 }
2328 void
2329 MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const ValueOperand &value,
2330 const Value &v, Label *label)
2331 {
2332 moveData(v, ScratchRegister);
2334 if (cond == Equal) {
2335 Label done;
2336 ma_b(value.payloadReg(), ScratchRegister, &done, NotEqual, ShortJump);
2337 {
2338 ma_b(value.typeReg(), Imm32(getType(v)), label, Equal);
2339 }
2340 bind(&done);
2341 } else {
2342 MOZ_ASSERT(cond == NotEqual);
2343 ma_b(value.payloadReg(), ScratchRegister, label, NotEqual);
2345 ma_b(value.typeReg(), Imm32(getType(v)), label, NotEqual);
2346 }
2347 }
2349 void
2350 MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const Address &valaddr,
2351 const ValueOperand &value, Label *label)
2352 {
2353 MOZ_ASSERT(cond == Equal || cond == NotEqual);
2355 // Load tag.
2356 ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + TAG_OFFSET));
2357 branchPtr(cond, ScratchRegister, value.typeReg(), label);
2359 // Load payload
2360 ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + PAYLOAD_OFFSET));
2361 branchPtr(cond, ScratchRegister, value.payloadReg(), label);
2362 }
2364 // unboxing code
2365 void
2366 MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand &operand, const Register &dest)
2367 {
2368 ma_move(dest, operand.payloadReg());
2369 }
2371 void
2372 MacroAssemblerMIPSCompat::unboxInt32(const Address &src, const Register &dest)
2373 {
2374 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
2375 }
2377 void
2378 MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand &operand, const Register &dest)
2379 {
2380 ma_move(dest, operand.payloadReg());
2381 }
2383 void
2384 MacroAssemblerMIPSCompat::unboxBoolean(const Address &src, const Register &dest)
2385 {
2386 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
2387 }
2389 void
2390 MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand &operand, const FloatRegister &dest)
2391 {
2392 MOZ_ASSERT(dest != ScratchFloatReg);
2393 moveToDoubleLo(operand.payloadReg(), dest);
2394 moveToDoubleHi(operand.typeReg(), dest);
2395 }
2397 void
2398 MacroAssemblerMIPSCompat::unboxDouble(const Address &src, const FloatRegister &dest)
2399 {
2400 ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
2401 moveToDoubleLo(ScratchRegister, dest);
2402 ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
2403 moveToDoubleHi(ScratchRegister, dest);
2404 }
2406 void
2407 MacroAssemblerMIPSCompat::unboxString(const ValueOperand &operand, const Register &dest)
2408 {
2409 ma_move(dest, operand.payloadReg());
2410 }
2412 void
2413 MacroAssemblerMIPSCompat::unboxString(const Address &src, const Register &dest)
2414 {
2415 ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
2416 }
2418 void
2419 MacroAssemblerMIPSCompat::unboxObject(const ValueOperand &src, const Register &dest)
2420 {
2421 ma_move(dest, src.payloadReg());
2422 }
2424 void
2425 MacroAssemblerMIPSCompat::unboxValue(const ValueOperand &src, AnyRegister dest)
2426 {
2427 if (dest.isFloat()) {
2428 Label notInt32, end;
2429 branchTestInt32(Assembler::NotEqual, src, ¬Int32);
2430 convertInt32ToDouble(src.payloadReg(), dest.fpu());
2431 ma_b(&end, ShortJump);
2432 bind(¬Int32);
2433 unboxDouble(src, dest.fpu());
2434 bind(&end);
2435 } else if (src.payloadReg() != dest.gpr()) {
2436 ma_move(dest.gpr(), src.payloadReg());
2437 }
2438 }
2440 void
2441 MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand &src, Register dest)
2442 {
2443 ma_move(dest, src.payloadReg());
2444 }
2446 void
2447 MacroAssemblerMIPSCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest)
2448 {
2449 moveFromDoubleLo(src, dest.payloadReg());
2450 moveFromDoubleHi(src, dest.typeReg());
2451 }
2453 void
2454 MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, const Register &src,
2455 const ValueOperand &dest)
2456 {
2457 if (src != dest.payloadReg())
2458 ma_move(dest.payloadReg(), src);
2459 ma_li(dest.typeReg(), ImmType(type));
2460 }
2462 void
2463 MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest)
2464 {
2465 convertBoolToInt32(ScratchRegister, operand.payloadReg());
2466 convertInt32ToDouble(ScratchRegister, dest);
2467 }
2469 void
2470 MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand &operand,
2471 const FloatRegister &dest)
2472 {
2473 convertInt32ToDouble(operand.payloadReg(), dest);
2474 }
2476 void
2477 MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand &operand,
2478 const FloatRegister &dest)
2479 {
2481 convertBoolToInt32(ScratchRegister, operand.payloadReg());
2482 convertInt32ToFloat32(ScratchRegister, dest);
2483 }
2485 void
2486 MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand &operand,
2487 const FloatRegister &dest)
2488 {
2489 convertInt32ToFloat32(operand.payloadReg(), dest);
2490 }
2492 void
2493 MacroAssemblerMIPSCompat::loadConstantFloat32(float f, const FloatRegister &dest)
2494 {
2495 ma_lis(dest, f);
2496 }
2498 void
2499 MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address &src, const FloatRegister &dest)
2500 {
2501 Label notInt32, end;
2502 // If it's an int, convert it to double.
2503 ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
2504 branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
2505 ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
2506 convertInt32ToDouble(SecondScratchReg, dest);
2507 ma_b(&end, ShortJump);
2509 // Not an int, just load as double.
2510 bind(¬Int32);
2511 ma_ld(dest, src);
2512 bind(&end);
2513 }
2515 void
2516 MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
2517 const FloatRegister &dest, int32_t shift)
2518 {
2519 Label notInt32, end;
2521 // If it's an int, convert it to double.
2523 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
2524 // Since we only have one scratch, we need to stomp over it with the tag.
2525 load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
2526 branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32);
2528 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
2529 load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
2530 convertInt32ToDouble(SecondScratchReg, dest);
2531 ma_b(&end, ShortJump);
2533 // Not an int, just load as double.
2534 bind(¬Int32);
2535 // First, recompute the offset that had been stored in the scratch register
2536 // since the scratch register was overwritten loading in the type.
2537 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
2538 loadDouble(Address(SecondScratchReg, 0), dest);
2539 bind(&end);
2540 }
2542 void
2543 MacroAssemblerMIPSCompat::loadConstantDouble(double dp, const FloatRegister &dest)
2544 {
2545 ma_lid(dest, dp);
2546 }
2548 void
2549 MacroAssemblerMIPSCompat::branchTestInt32Truthy(bool b, const ValueOperand &value, Label *label)
2550 {
2551 ma_and(ScratchRegister, value.payloadReg(), value.payloadReg());
2552 ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
2553 }
2555 void
2556 MacroAssemblerMIPSCompat::branchTestStringTruthy(bool b, const ValueOperand &value, Label *label)
2557 {
2558 Register string = value.payloadReg();
2559 size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT);
2560 ma_lw(SecondScratchReg, Address(string, JSString::offsetOfLengthAndFlags()));
2562 // Use SecondScratchReg because ma_and will clobber ScratchRegister
2563 ma_and(ScratchRegister, SecondScratchReg, Imm32(mask));
2564 ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
2565 }
2567 void
2568 MacroAssemblerMIPSCompat::branchTestDoubleTruthy(bool b, const FloatRegister &value, Label *label)
2569 {
2570 ma_lid(ScratchFloatReg, 0.0);
2571 DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
2572 ma_bc1d(value, ScratchFloatReg, label, cond);
2573 }
2575 void
2576 MacroAssemblerMIPSCompat::branchTestBooleanTruthy(bool b, const ValueOperand &operand,
2577 Label *label)
2578 {
2579 ma_b(operand.payloadReg(), operand.payloadReg(), label, b ? NonZero : Zero);
2580 }
2582 Register
2583 MacroAssemblerMIPSCompat::extractObject(const Address &address, Register scratch)
2584 {
2585 ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
2586 return scratch;
2587 }
2589 Register
2590 MacroAssemblerMIPSCompat::extractTag(const Address &address, Register scratch)
2591 {
2592 ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
2593 return scratch;
2594 }
2596 Register
2597 MacroAssemblerMIPSCompat::extractTag(const BaseIndex &address, Register scratch)
2598 {
2599 computeScaledAddress(address, scratch);
2600 return extractTag(Address(scratch, address.offset), scratch);
2601 }
2604 uint32_t
2605 MacroAssemblerMIPSCompat::getType(const Value &val)
2606 {
2607 jsval_layout jv = JSVAL_TO_IMPL(val);
2608 return jv.s.tag;
2609 }
2611 void
2612 MacroAssemblerMIPSCompat::moveData(const Value &val, Register data)
2613 {
2614 jsval_layout jv = JSVAL_TO_IMPL(val);
2615 if (val.isMarkable())
2616 ma_li(data, ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
2617 else
2618 ma_li(data, Imm32(jv.s.payload.i32));
2619 }
2621 void
2622 MacroAssemblerMIPSCompat::moveValue(const Value &val, Register type, Register data)
2623 {
2624 MOZ_ASSERT(type != data);
2625 ma_li(type, Imm32(getType(val)));
2626 moveData(val, data);
2627 }
2628 void
2629 MacroAssemblerMIPSCompat::moveValue(const Value &val, const ValueOperand &dest)
2630 {
2631 moveValue(val, dest.typeReg(), dest.payloadReg());
2632 }
2634 CodeOffsetJump
2635 MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel *label)
2636 {
2637 // Only one branch per label.
2638 MOZ_ASSERT(!label->used());
2639 uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
2641 BufferOffset bo = nextOffset();
2642 label->use(bo.getOffset());
2643 addLongJump(bo);
2644 ma_liPatchable(ScratchRegister, Imm32(dest));
2645 as_jr(ScratchRegister);
2646 as_nop();
2647 return CodeOffsetJump(bo.getOffset());
2648 }
2651 /////////////////////////////////////////////////////////////////
2652 // X86/X64-common/ARM/MIPS interface.
2653 /////////////////////////////////////////////////////////////////
2654 void
2655 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
2656 {
2657 storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
2658 }
2660 void
2661 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex &dest)
2662 {
2663 computeScaledAddress(dest, SecondScratchReg);
2664 storeValue(val, Address(SecondScratchReg, dest.offset));
2665 }
2667 void
2668 MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
2669 {
2670 computeScaledAddress(dest, ScratchRegister);
2672 // Make sure that ma_sw doesn't clobber ScratchRegister
2673 int32_t offset = dest.offset;
2674 if (!Imm16::isInSignedRange(offset)) {
2675 ma_li(SecondScratchReg, Imm32(offset));
2676 as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
2677 offset = 0;
2678 }
2680 storeValue(type, reg, Address(ScratchRegister, offset));
2681 }
2683 void
2684 MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address &dest)
2685 {
2686 ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2687 ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
2688 }
2690 void
2691 MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
2692 {
2693 MOZ_ASSERT(dest.base != SecondScratchReg);
2695 ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2696 ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
2697 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
2698 }
2700 void
2701 MacroAssemblerMIPSCompat::storeValue(const Value &val, Address dest)
2702 {
2703 MOZ_ASSERT(dest.base != SecondScratchReg);
2705 ma_li(SecondScratchReg, Imm32(getType(val)));
2706 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
2707 moveData(val, SecondScratchReg);
2708 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2709 }
2711 void
2712 MacroAssemblerMIPSCompat::storeValue(const Value &val, BaseIndex dest)
2713 {
2714 computeScaledAddress(dest, ScratchRegister);
2716 // Make sure that ma_sw doesn't clobber ScratchRegister
2717 int32_t offset = dest.offset;
2718 if (!Imm16::isInSignedRange(offset)) {
2719 ma_li(SecondScratchReg, Imm32(offset));
2720 as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
2721 offset = 0;
2722 }
2723 storeValue(val, Address(ScratchRegister, offset));
2724 }
2726 void
2727 MacroAssemblerMIPSCompat::loadValue(const BaseIndex &addr, ValueOperand val)
2728 {
2729 computeScaledAddress(addr, SecondScratchReg);
2730 loadValue(Address(SecondScratchReg, addr.offset), val);
2731 }
2733 void
2734 MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
2735 {
2736 // Ensure that loading the payload does not erase the pointer to the
2737 // Value in memory.
2738 if (src.base != val.payloadReg()) {
2739 ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
2740 ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
2741 } else {
2742 ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
2743 ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
2744 }
2745 }
2747 void
2748 MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
2749 {
2750 MOZ_ASSERT(payload != dest.typeReg());
2751 ma_li(dest.typeReg(), ImmType(type));
2752 if (payload != dest.payloadReg())
2753 ma_move(dest.payloadReg(), payload);
2754 }
2756 void
2757 MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
2758 {
2759 // Allocate stack slots for type and payload. One for each.
2760 ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
2761 // Store type and payload.
2762 storeValue(val, Address(StackPointer, 0));
2763 }
2765 void
2766 MacroAssemblerMIPSCompat::pushValue(const Address &addr)
2767 {
2768 // Allocate stack slots for type and payload. One for each.
2769 ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
2770 // Store type and payload.
2771 ma_lw(ScratchRegister, Address(addr.base, addr.offset + TAG_OFFSET));
2772 ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
2773 ma_lw(ScratchRegister, Address(addr.base, addr.offset + PAYLOAD_OFFSET));
2774 ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
2775 }
2777 void
2778 MacroAssemblerMIPSCompat::popValue(ValueOperand val)
2779 {
2780 // Load payload and type.
2781 as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
2782 as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
2783 // Free stack.
2784 as_addiu(StackPointer, StackPointer, sizeof(Value));
2785 }
2787 void
2788 MacroAssemblerMIPSCompat::storePayload(const Value &val, Address dest)
2789 {
2790 moveData(val, SecondScratchReg);
2791 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2792 }
2794 void
2795 MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
2796 {
2797 ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
2798 return;
2799 }
2801 void
2802 MacroAssemblerMIPSCompat::storePayload(const Value &val, Register base, Register index,
2803 int32_t shift)
2804 {
2805 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
2807 moveData(val, ScratchRegister);
2809 as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
2810 }
2812 void
2813 MacroAssemblerMIPSCompat::storePayload(Register src, Register base, Register index, int32_t shift)
2814 {
2815 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
2816 as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
2817 }
2819 void
2820 MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
2821 {
2822 ma_li(SecondScratchReg, tag);
2823 ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
2824 }
2826 void
2827 MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift)
2828 {
2829 computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
2830 ma_li(ScratchRegister, tag);
2831 as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
2832 }
2834 void
2835 MacroAssemblerMIPSCompat::linkExitFrame()
2836 {
2837 uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop();
2838 movePtr(ImmPtr(dest), ScratchRegister);
2839 ma_sw(StackPointer, Address(ScratchRegister, 0));
2840 }
2842 void
2843 MacroAssemblerMIPSCompat::linkParallelExitFrame(const Register &pt)
2844 {
2845 ma_sw(StackPointer, Address(pt, offsetof(PerThreadData, ionTop)));
2846 }
2848 // This macrosintruction calls the ion code and pushes the return address to
2849 // the stack in the case when stack is alligned.
2850 void
2851 MacroAssemblerMIPS::ma_callIon(const Register r)
2852 {
2853 // This is a MIPS hack to push return address during jalr delay slot.
2854 as_addiu(StackPointer, StackPointer, -2 * sizeof(intptr_t));
2855 as_jalr(r);
2856 as_sw(ra, StackPointer, 0);
2857 }
2859 // This macrosintruction calls the ion code and pushes the return address to
2860 // the stack in the case when stack is not alligned.
2861 void
2862 MacroAssemblerMIPS::ma_callIonHalfPush(const Register r)
2863 {
2864 // This is a MIPS hack to push return address during jalr delay slot.
2865 as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
2866 as_jalr(r);
2867 as_sw(ra, StackPointer, 0);
2868 }
2870 void
2871 MacroAssemblerMIPS::ma_call(ImmPtr dest)
2872 {
2873 ma_liPatchable(CallReg, dest);
2874 as_jalr(CallReg);
2875 as_nop();
2876 }
2878 void
2879 MacroAssemblerMIPS::ma_jump(ImmPtr dest)
2880 {
2881 ma_liPatchable(ScratchRegister, dest);
2882 as_jr(ScratchRegister);
2883 as_nop();
2884 }
2886 void
2887 MacroAssemblerMIPSCompat::breakpoint()
2888 {
2889 as_break(0);
2890 }
2892 void
2893 MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand &source, FloatRegister dest,
2894 Label *failure)
2895 {
2896 Label isDouble, done;
2897 branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
2898 branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
2900 convertInt32ToDouble(source.payloadReg(), dest);
2901 jump(&done);
2903 bind(&isDouble);
2904 unboxDouble(source, dest);
2906 bind(&done);
2907 }
2909 void
2910 MacroAssemblerMIPSCompat::setupABICall(uint32_t args)
2911 {
2912 MOZ_ASSERT(!inCall_);
2913 inCall_ = true;
2914 args_ = args;
2915 passedArgs_ = 0;
2917 usedArgSlots_ = 0;
2918 firstArgType = MoveOp::GENERAL;
2919 }
2921 void
2922 MacroAssemblerMIPSCompat::setupAlignedABICall(uint32_t args)
2923 {
2924 setupABICall(args);
2926 dynamicAlignment_ = false;
2927 }
2929 void
2930 MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, const Register &scratch)
2931 {
2932 setupABICall(args);
2933 dynamicAlignment_ = true;
2935 ma_move(scratch, StackPointer);
2937 // Force sp to be aligned
2938 ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
2939 ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
2940 as_sw(scratch, StackPointer, 0);
2941 }
2943 void
2944 MacroAssemblerMIPSCompat::passABIArg(const MoveOperand &from, MoveOp::Type type)
2945 {
2946 ++passedArgs_;
2947 if (!enoughMemory_)
2948 return;
2949 switch (type) {
2950 case MoveOp::FLOAT32:
2951 if (!usedArgSlots_) {
2952 if (from.floatReg() != f12)
2953 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
2954 firstArgType = MoveOp::FLOAT32;
2955 } else if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
2956 (usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
2957 if (from.floatReg() != f14)
2958 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
2959 } else {
2960 Register destReg;
2961 if (GetIntArgReg(usedArgSlots_, &destReg)) {
2962 if (from.isGeneralReg() && from.reg() == destReg) {
2963 // Nothing to do. Value is in the right register already
2964 } else {
2965 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
2966 }
2967 } else {
2968 uint32_t disp = GetArgStackDisp(usedArgSlots_);
2969 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
2970 }
2971 }
2972 usedArgSlots_++;
2973 break;
2974 case MoveOp::DOUBLE:
2975 if (!usedArgSlots_) {
2976 if (from.floatReg() != f12)
2977 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
2978 usedArgSlots_ = 2;
2979 firstArgType = MoveOp::DOUBLE;
2980 } else if (usedArgSlots_ <= 2) {
2981 if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
2982 (usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
2983 if (from.floatReg() != f14)
2984 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
2985 } else {
2986 // Create two moves so that cycles are found. Move emitter
2987 // will have special case to handle this.
2988 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a2), type);
2989 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a3), type);
2990 }
2991 usedArgSlots_ = 4;
2992 } else {
2993 // Align if necessary
2994 usedArgSlots_ += usedArgSlots_ % 2;
2996 uint32_t disp = GetArgStackDisp(usedArgSlots_);
2997 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
2998 usedArgSlots_ += 2;
2999 }
3000 break;
3001 case MoveOp::GENERAL:
3002 Register destReg;
3003 if (GetIntArgReg(usedArgSlots_, &destReg)) {
3004 if (from.isGeneralReg() && from.reg() == destReg) {
3005 // Nothing to do. Value is in the right register already
3006 } else {
3007 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
3008 }
3009 } else {
3010 uint32_t disp = GetArgStackDisp(usedArgSlots_);
3011 enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
3012 }
3013 usedArgSlots_++;
3014 break;
3015 default:
3016 MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
3017 }
3018 }
3020 void
3021 MacroAssemblerMIPSCompat::passABIArg(const Register ®)
3022 {
3023 passABIArg(MoveOperand(reg), MoveOp::GENERAL);
3024 }
3026 void
3027 MacroAssemblerMIPSCompat::passABIArg(const FloatRegister &freg, MoveOp::Type type)
3028 {
3029 passABIArg(MoveOperand(freg), type);
3030 }
3032 void MacroAssemblerMIPSCompat::checkStackAlignment()
3033 {
3034 #ifdef DEBUG
3035 Label aligned;
3036 as_andi(ScratchRegister, sp, StackAlignment - 1);
3037 ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
3038 as_break(MAX_BREAK_CODE);
3039 bind(&aligned);
3040 #endif
3041 }
3043 void
3044 MacroAssemblerMIPSCompat::alignPointerUp(Register src, Register dest, uint32_t alignment)
3045 {
3046 MOZ_ASSERT(alignment > 1);
3047 ma_addu(dest, src, Imm32(alignment - 1));
3048 ma_and(dest, dest, Imm32(~(alignment - 1)));
3049 }
3051 void
3052 MacroAssemblerMIPSCompat::callWithABIPre(uint32_t *stackAdjust)
3053 {
3054 MOZ_ASSERT(inCall_);
3056 // Reserve place for $ra.
3057 *stackAdjust = sizeof(intptr_t);
3059 *stackAdjust += usedArgSlots_ > NumIntArgRegs ?
3060 usedArgSlots_ * sizeof(intptr_t) :
3061 NumIntArgRegs * sizeof(intptr_t);
3063 if (dynamicAlignment_) {
3064 *stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
3065 } else {
3066 *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust, StackAlignment);
3067 }
3069 reserveStack(*stackAdjust);
3071 // Save $ra because call is going to clobber it. Restore it in
3072 // callWithABIPost. NOTE: This is needed for calls from BaselineIC.
3073 // Maybe we can do this differently.
3074 ma_sw(ra, Address(StackPointer, *stackAdjust - sizeof(intptr_t)));
3076 // Position all arguments.
3077 {
3078 enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
3079 if (!enoughMemory_)
3080 return;
3082 MoveEmitter emitter(*this);
3083 emitter.emit(moveResolver_);
3084 emitter.finish();
3085 }
3087 checkStackAlignment();
3088 }
3090 void
3091 MacroAssemblerMIPSCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
3092 {
3093 // Restore ra value (as stored in callWithABIPre()).
3094 ma_lw(ra, Address(StackPointer, stackAdjust - sizeof(intptr_t)));
3096 if (dynamicAlignment_) {
3097 // Restore sp value from stack (as stored in setupUnalignedABICall()).
3098 ma_lw(StackPointer, Address(StackPointer, stackAdjust));
3099 // Use adjustFrame instead of freeStack because we already restored sp.
3100 adjustFrame(-stackAdjust);
3101 } else {
3102 freeStack(stackAdjust);
3103 }
3105 MOZ_ASSERT(inCall_);
3106 inCall_ = false;
3107 }
3109 void
3110 MacroAssemblerMIPSCompat::callWithABI(void *fun, MoveOp::Type result)
3111 {
3112 uint32_t stackAdjust;
3113 callWithABIPre(&stackAdjust);
3114 ma_call(ImmPtr(fun));
3115 callWithABIPost(stackAdjust, result);
3116 }
3118 void
3119 MacroAssemblerMIPSCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
3120 {
3121 uint32_t stackAdjust;
3122 callWithABIPre(&stackAdjust);
3123 call(imm);
3124 callWithABIPost(stackAdjust, result);
3125 }
3127 void
3128 MacroAssemblerMIPSCompat::callWithABI(const Address &fun, MoveOp::Type result)
3129 {
3130 // Load the callee in t9, no instruction between the lw and call
3131 // should clobber it. Note that we can't use fun.base because it may
3132 // be one of the IntArg registers clobbered before the call.
3133 ma_lw(t9, Address(fun.base, fun.offset));
3134 uint32_t stackAdjust;
3135 callWithABIPre(&stackAdjust);
3136 call(t9);
3137 callWithABIPost(stackAdjust, result);
3139 }
3141 void
3142 MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
3143 {
3144 // Reserve space for exception information.
3145 int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
3146 ma_subu(StackPointer, StackPointer, Imm32(size));
3147 ma_move(a0, StackPointer); // Use a0 since it is a first function argument
3149 // Ask for an exception handler.
3150 setupUnalignedABICall(1, a1);
3151 passABIArg(a0);
3152 callWithABI(handler);
3154 JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail();
3155 branch(excTail);
3156 }
3158 void
3159 MacroAssemblerMIPSCompat::handleFailureWithHandlerTail()
3160 {
3161 Label entryFrame;
3162 Label catch_;
3163 Label finally;
3164 Label return_;
3165 Label bailout;
3167 // Already clobbered a0, so use it...
3168 ma_lw(a0, Address(StackPointer, offsetof(ResumeFromException, kind)));
3169 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
3170 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
3171 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
3172 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
3173 branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
3175 breakpoint(); // Invalid kind.
3177 // No exception handler. Load the error value, load the new stack pointer
3178 // and return from the entry frame.
3179 bind(&entryFrame);
3180 moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
3181 ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
3183 // We're going to be returning by the ion calling convention
3184 ma_pop(ra);
3185 as_jr(ra);
3186 as_nop();
3188 // If we found a catch handler, this must be a baseline frame. Restore
3189 // state and jump to the catch block.
3190 bind(&catch_);
3191 ma_lw(a0, Address(StackPointer, offsetof(ResumeFromException, target)));
3192 ma_lw(BaselineFrameReg, Address(StackPointer, offsetof(ResumeFromException, framePointer)));
3193 ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
3194 jump(a0);
3196 // If we found a finally block, this must be a baseline frame. Push
3197 // two values expected by JSOP_RETSUB: BooleanValue(true) and the
3198 // exception.
3199 bind(&finally);
3200 ValueOperand exception = ValueOperand(a1, a2);
3201 loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
3203 ma_lw(a0, Address(sp, offsetof(ResumeFromException, target)));
3204 ma_lw(BaselineFrameReg, Address(sp, offsetof(ResumeFromException, framePointer)));
3205 ma_lw(sp, Address(sp, offsetof(ResumeFromException, stackPointer)));
3207 pushValue(BooleanValue(true));
3208 pushValue(exception);
3209 jump(a0);
3211 // Only used in debug mode. Return BaselineFrame->returnValue() to the
3212 // caller.
3213 bind(&return_);
3214 ma_lw(BaselineFrameReg, Address(StackPointer, offsetof(ResumeFromException, framePointer)));
3215 ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
3216 loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
3217 JSReturnOperand);
3218 ma_move(StackPointer, BaselineFrameReg);
3219 pop(BaselineFrameReg);
3220 ret();
3222 // If we are bailing out to baseline to handle an exception, jump to
3223 // the bailout tail stub.
3224 bind(&bailout);
3225 ma_lw(a2, Address(sp, offsetof(ResumeFromException, bailoutInfo)));
3226 ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
3227 ma_lw(a1, Address(sp, offsetof(ResumeFromException, target)));
3228 jump(a1);
3229 }
3231 CodeOffsetLabel
3232 MacroAssemblerMIPSCompat::toggledJump(Label *label)
3233 {
3234 CodeOffsetLabel ret(nextOffset().getOffset());
3235 ma_b(label);
3236 return ret;
3237 }
3239 CodeOffsetLabel
3240 MacroAssemblerMIPSCompat::toggledCall(JitCode *target, bool enabled)
3241 {
3242 BufferOffset bo = nextOffset();
3243 CodeOffsetLabel offset(bo.getOffset());
3244 addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
3245 ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
3246 if (enabled) {
3247 as_jalr(ScratchRegister);
3248 as_nop();
3249 } else {
3250 as_nop();
3251 as_nop();
3252 }
3253 MOZ_ASSERT(nextOffset().getOffset() - offset.offset() == ToggledCallSize());
3254 return offset;
3255 }
3257 void
3258 MacroAssemblerMIPSCompat::branchPtrInNurseryRange(Register ptr, Register temp, Label *label)
3259 {
3260 JS_ASSERT(temp != InvalidReg);
3261 const Nursery &nursery = GetIonContext()->runtime->gcNursery();
3263 // ptr and temp may be the same register, in which case we mustn't trash it
3264 // before we use its contents.
3265 if (ptr == temp) {
3266 addPtr(ImmWord(-ptrdiff_t(nursery.start())), ptr);
3267 branchPtr(Assembler::Below, ptr, Imm32(Nursery::NurserySize), label);
3268 } else {
3269 movePtr(ImmWord(-ptrdiff_t(nursery.start())), temp);
3270 addPtr(ptr, temp);
3271 branchPtr(Assembler::Below, temp, Imm32(Nursery::NurserySize), label);
3272 }
3273 }