js/src/jit/mips/Assembler-mips.cpp

changeset 0
6474c204b198
equal deleted inserted replaced
-1:000000000000 0:92be163b2620
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips/Assembler-mips.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11
12 #include "jscompartment.h"
13 #include "jsutil.h"
14
15 #include "assembler/jit/ExecutableAllocator.h"
16 #include "gc/Marking.h"
17 #include "jit/JitCompartment.h"
18
19 using mozilla::DebugOnly;
20
21 using namespace js;
22 using namespace js::jit;
23
24 ABIArgGenerator::ABIArgGenerator()
25 : usedArgSlots_(0),
26 firstArgFloat(false),
27 current_()
28 {}
29
30 ABIArg
31 ABIArgGenerator::next(MIRType type)
32 {
33 MOZ_ASSUME_UNREACHABLE("NYI");
34 return ABIArg();
35 }
36 const Register ABIArgGenerator::NonArgReturnVolatileReg0 = t0;
37 const Register ABIArgGenerator::NonArgReturnVolatileReg1 = t1;
38
39 // Encode a standard register when it is being used as rd, the rs, and
40 // an extra register(rt). These should never be called with an InvalidReg.
41 uint32_t
42 js::jit::RS(Register r)
43 {
44 JS_ASSERT((r.code() & ~RegMask) == 0);
45 return r.code() << RSShift;
46 }
47
48 uint32_t
49 js::jit::RT(Register r)
50 {
51 JS_ASSERT((r.code() & ~RegMask) == 0);
52 return r.code() << RTShift;
53 }
54
55 uint32_t
56 js::jit::RT(FloatRegister r)
57 {
58 JS_ASSERT(r.code() < FloatRegisters::Total);
59 return r.code() << RTShift;
60 }
61
62 uint32_t
63 js::jit::RD(Register r)
64 {
65 JS_ASSERT((r.code() & ~RegMask) == 0);
66 return r.code() << RDShift;
67 }
68
69 uint32_t
70 js::jit::RD(FloatRegister r)
71 {
72 JS_ASSERT(r.code() < FloatRegisters::Total);
73 return r.code() << RDShift;
74 }
75
76 uint32_t
77 js::jit::SA(uint32_t value)
78 {
79 JS_ASSERT(value < 32);
80 return value << SAShift;
81 }
82
83 uint32_t
84 js::jit::SA(FloatRegister r)
85 {
86 JS_ASSERT(r.code() < FloatRegisters::Total);
87 return r.code() << SAShift;
88 }
89
90 Register
91 js::jit::toRS(Instruction &i)
92 {
93 return Register::FromCode((i.encode() & RSMask ) >> RSShift);
94 }
95
96 Register
97 js::jit::toRT(Instruction &i)
98 {
99 return Register::FromCode((i.encode() & RTMask ) >> RTShift);
100 }
101
102 Register
103 js::jit::toRD(Instruction &i)
104 {
105 return Register::FromCode((i.encode() & RDMask ) >> RDShift);
106 }
107
108 Register
109 js::jit::toR(Instruction &i)
110 {
111 return Register::FromCode(i.encode() & RegMask);
112 }
113
114 void
115 InstImm::extractImm16(BOffImm16 *dest)
116 {
117 *dest = BOffImm16(*this);
118 }
119
120 // Used to patch jumps created by MacroAssemblerMIPSCompat::jumpWithPatch.
121 void
122 jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label)
123 {
124 Instruction *inst1 = (Instruction *)jump_.raw();
125 Instruction *inst2 = inst1->next();
126
127 Assembler::updateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
128
129 AutoFlushICache::flush(uintptr_t(inst1), 8);
130 }
131
132 void
133 Assembler::finish()
134 {
135 JS_ASSERT(!isFinished);
136 isFinished = true;
137 }
138
139 void
140 Assembler::executableCopy(uint8_t *buffer)
141 {
142 JS_ASSERT(isFinished);
143 m_buffer.executableCopy(buffer);
144
145 // Patch all long jumps during code copy.
146 for (size_t i = 0; i < longJumps_.length(); i++) {
147 Instruction *inst1 = (Instruction *) ((uint32_t)buffer + longJumps_[i]);
148
149 uint32_t value = extractLuiOriValue(inst1, inst1->next());
150 updateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
151 }
152
153 AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
154 }
155
156 uint32_t
157 Assembler::actualOffset(uint32_t off_) const
158 {
159 return off_;
160 }
161
162 uint32_t
163 Assembler::actualIndex(uint32_t idx_) const
164 {
165 return idx_;
166 }
167
168 uint8_t *
169 Assembler::PatchableJumpAddress(JitCode *code, uint32_t pe_)
170 {
171 return code->raw() + pe_;
172 }
173
174 class RelocationIterator
175 {
176 CompactBufferReader reader_;
177 // offset in bytes
178 uint32_t offset_;
179
180 public:
181 RelocationIterator(CompactBufferReader &reader)
182 : reader_(reader)
183 { }
184
185 bool read() {
186 if (!reader_.more())
187 return false;
188 offset_ = reader_.readUnsigned();
189 return true;
190 }
191
192 uint32_t offset() const {
193 return offset_;
194 }
195 };
196
197 uintptr_t
198 Assembler::getPointer(uint8_t *instPtr)
199 {
200 Instruction *inst = (Instruction*)instPtr;
201 return Assembler::extractLuiOriValue(inst, inst->next());
202 }
203
204 static JitCode *
205 CodeFromJump(Instruction *jump)
206 {
207 uint8_t *target = (uint8_t *)Assembler::extractLuiOriValue(jump, jump->next());
208 return JitCode::FromExecutable(target);
209 }
210
211 void
212 Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
213 {
214 RelocationIterator iter(reader);
215 while (iter.read()) {
216 JitCode *child = CodeFromJump((Instruction *)(code->raw() + iter.offset()));
217 MarkJitCodeUnbarriered(trc, &child, "rel32");
218 }
219 }
220
221 static void
222 TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader)
223 {
224 while (reader.more()) {
225 size_t offset = reader.readUnsigned();
226 Instruction *inst = (Instruction*)(buffer + offset);
227 void *ptr = (void *)Assembler::extractLuiOriValue(inst, inst->next());
228
229 // No barrier needed since these are constants.
230 gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
231 }
232 }
233
234 static void
235 TraceDataRelocations(JSTracer *trc, MIPSBuffer *buffer, CompactBufferReader &reader)
236 {
237 while (reader.more()) {
238 BufferOffset bo (reader.readUnsigned());
239 MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
240
241 void *ptr = (void *)Assembler::extractLuiOriValue(iter.cur(), iter.next());
242
243 // No barrier needed since these are constants.
244 gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr");
245 }
246 }
247
248 void
249 Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader)
250 {
251 ::TraceDataRelocations(trc, code->raw(), reader);
252 }
253
254 void
255 Assembler::copyJumpRelocationTable(uint8_t *dest)
256 {
257 if (jumpRelocations_.length())
258 memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
259 }
260
261 void
262 Assembler::copyDataRelocationTable(uint8_t *dest)
263 {
264 if (dataRelocations_.length())
265 memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
266 }
267
268 void
269 Assembler::copyPreBarrierTable(uint8_t *dest)
270 {
271 if (preBarriers_.length())
272 memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
273 }
274
275 void
276 Assembler::trace(JSTracer *trc)
277 {
278 for (size_t i = 0; i < jumps_.length(); i++) {
279 RelativePatch &rp = jumps_[i];
280 if (rp.kind == Relocation::JITCODE) {
281 JitCode *code = JitCode::FromExecutable((uint8_t *)rp.target);
282 MarkJitCodeUnbarriered(trc, &code, "masmrel32");
283 JS_ASSERT(code == JitCode::FromExecutable((uint8_t *)rp.target));
284 }
285 }
286 if (dataRelocations_.length()) {
287 CompactBufferReader reader(dataRelocations_);
288 ::TraceDataRelocations(trc, &m_buffer, reader);
289 }
290 }
291
292 void
293 Assembler::processCodeLabels(uint8_t *rawCode)
294 {
295 for (size_t i = 0; i < codeLabels_.length(); i++) {
296 CodeLabel label = codeLabels_[i];
297 Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset()));
298 }
299 }
300
301 void
302 Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address)
303 {
304 if (label->used()) {
305 int32_t src = label->offset();
306 do {
307 Instruction *inst = (Instruction *) (rawCode + src);
308 uint32_t next = Assembler::extractLuiOriValue(inst, inst->next());
309 Assembler::updateLuiOriValue(inst, inst->next(), (uint32_t)address);
310 src = next;
311 } while (src != AbsoluteLabel::INVALID_OFFSET);
312 }
313 label->bind();
314 }
315
316 Assembler::Condition
317 Assembler::InvertCondition(Condition cond)
318 {
319 switch (cond) {
320 case Equal:
321 return NotEqual;
322 case NotEqual:
323 return Equal;
324 case Zero:
325 return NonZero;
326 case NonZero:
327 return Zero;
328 case LessThan:
329 return GreaterThanOrEqual;
330 case LessThanOrEqual:
331 return GreaterThan;
332 case GreaterThan:
333 return LessThanOrEqual;
334 case GreaterThanOrEqual:
335 return LessThan;
336 case Above:
337 return BelowOrEqual;
338 case AboveOrEqual:
339 return Below;
340 case Below:
341 return AboveOrEqual;
342 case BelowOrEqual:
343 return Above;
344 case Signed:
345 return NotSigned;
346 case NotSigned:
347 return Signed;
348 default:
349 MOZ_ASSUME_UNREACHABLE("unexpected condition");
350 return Equal;
351 }
352 }
353
354 Assembler::DoubleCondition
355 Assembler::InvertCondition(DoubleCondition cond)
356 {
357 switch (cond) {
358 case DoubleOrdered:
359 return DoubleUnordered;
360 case DoubleEqual:
361 return DoubleNotEqualOrUnordered;
362 case DoubleNotEqual:
363 return DoubleEqualOrUnordered;
364 case DoubleGreaterThan:
365 return DoubleLessThanOrEqualOrUnordered;
366 case DoubleGreaterThanOrEqual:
367 return DoubleLessThanOrUnordered;
368 case DoubleLessThan:
369 return DoubleGreaterThanOrEqualOrUnordered;
370 case DoubleLessThanOrEqual:
371 return DoubleGreaterThanOrUnordered;
372 case DoubleUnordered:
373 return DoubleOrdered;
374 case DoubleEqualOrUnordered:
375 return DoubleNotEqual;
376 case DoubleNotEqualOrUnordered:
377 return DoubleEqual;
378 case DoubleGreaterThanOrUnordered:
379 return DoubleLessThanOrEqual;
380 case DoubleGreaterThanOrEqualOrUnordered:
381 return DoubleLessThan;
382 case DoubleLessThanOrUnordered:
383 return DoubleGreaterThanOrEqual;
384 case DoubleLessThanOrEqualOrUnordered:
385 return DoubleGreaterThan;
386 default:
387 MOZ_ASSUME_UNREACHABLE("unexpected condition");
388 return DoubleEqual;
389 }
390 }
391
392 BOffImm16::BOffImm16(InstImm inst)
393 : data(inst.encode() & Imm16Mask)
394 {
395 }
396
397 bool
398 Assembler::oom() const
399 {
400 return m_buffer.oom() ||
401 !enoughMemory_ ||
402 jumpRelocations_.oom() ||
403 dataRelocations_.oom() ||
404 preBarriers_.oom();
405 }
406
407 bool
408 Assembler::addCodeLabel(CodeLabel label)
409 {
410 return codeLabels_.append(label);
411 }
412
413 // Size of the instruction stream, in bytes.
414 size_t
415 Assembler::size() const
416 {
417 return m_buffer.size();
418 }
419
420 // Size of the relocation table, in bytes.
421 size_t
422 Assembler::jumpRelocationTableBytes() const
423 {
424 return jumpRelocations_.length();
425 }
426
427 size_t
428 Assembler::dataRelocationTableBytes() const
429 {
430 return dataRelocations_.length();
431 }
432
433 size_t
434 Assembler::preBarrierTableBytes() const
435 {
436 return preBarriers_.length();
437 }
438
439 // Size of the data table, in bytes.
440 size_t
441 Assembler::bytesNeeded() const
442 {
443 return size() +
444 jumpRelocationTableBytes() +
445 dataRelocationTableBytes() +
446 preBarrierTableBytes();
447 }
448
449 // write a blob of binary into the instruction stream
450 BufferOffset
451 Assembler::writeInst(uint32_t x, uint32_t *dest)
452 {
453 if (dest == nullptr)
454 return m_buffer.putInt(x);
455
456 writeInstStatic(x, dest);
457 return BufferOffset();
458 }
459
460 void
461 Assembler::writeInstStatic(uint32_t x, uint32_t *dest)
462 {
463 JS_ASSERT(dest != nullptr);
464 *dest = x;
465 }
466
467 BufferOffset
468 Assembler::align(int alignment)
469 {
470 BufferOffset ret;
471 JS_ASSERT(m_buffer.isAligned(4));
472 if (alignment == 8) {
473 if (!m_buffer.isAligned(alignment)) {
474 BufferOffset tmp = as_nop();
475 if (!ret.assigned())
476 ret = tmp;
477 }
478 } else {
479 JS_ASSERT((alignment & (alignment - 1)) == 0);
480 while (size() & (alignment - 1)) {
481 BufferOffset tmp = as_nop();
482 if (!ret.assigned())
483 ret = tmp;
484 }
485 }
486 return ret;
487 }
488
489 BufferOffset
490 Assembler::as_nop()
491 {
492 return writeInst(op_special | ff_sll);
493 }
494
495 // Logical operations.
496 BufferOffset
497 Assembler::as_and(Register rd, Register rs, Register rt)
498 {
499 return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode());
500 }
501
502 BufferOffset
503 Assembler::as_or(Register rd, Register rs, Register rt)
504 {
505 return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode());
506 }
507
508 BufferOffset
509 Assembler::as_xor(Register rd, Register rs, Register rt)
510 {
511 return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode());
512 }
513
514 BufferOffset
515 Assembler::as_nor(Register rd, Register rs, Register rt)
516 {
517 return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode());
518 }
519
520 BufferOffset
521 Assembler::as_andi(Register rd, Register rs, int32_t j)
522 {
523 JS_ASSERT(Imm16::isInUnsignedRange(j));
524 return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
525 }
526
527 BufferOffset
528 Assembler::as_ori(Register rd, Register rs, int32_t j)
529 {
530 JS_ASSERT(Imm16::isInUnsignedRange(j));
531 return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
532 }
533
534 BufferOffset
535 Assembler::as_xori(Register rd, Register rs, int32_t j)
536 {
537 JS_ASSERT(Imm16::isInUnsignedRange(j));
538 return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
539 }
540
541 // Branch and jump instructions
542 BufferOffset
543 Assembler::as_bal(BOffImm16 off)
544 {
545 BufferOffset bo = writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
546 return bo;
547 }
548
549 InstImm
550 Assembler::getBranchCode(JumpOrCall jumpOrCall)
551 {
552 if (jumpOrCall == BranchIsCall)
553 return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
554
555 return InstImm(op_beq, zero, zero, BOffImm16(0));
556 }
557
558 InstImm
559 Assembler::getBranchCode(Register s, Register t, Condition c)
560 {
561 JS_ASSERT(c == Assembler::Equal || c == Assembler::NotEqual);
562 return InstImm(c == Assembler::Equal ? op_beq : op_bne, s, t, BOffImm16(0));
563 }
564
565 InstImm
566 Assembler::getBranchCode(Register s, Condition c)
567 {
568 switch (c) {
569 case Assembler::Equal:
570 case Assembler::Zero:
571 case Assembler::BelowOrEqual:
572 return InstImm(op_beq, s, zero, BOffImm16(0));
573 case Assembler::NotEqual:
574 case Assembler::NonZero:
575 case Assembler::Above:
576 return InstImm(op_bne, s, zero, BOffImm16(0));
577 case Assembler::GreaterThan:
578 return InstImm(op_bgtz, s, zero, BOffImm16(0));
579 case Assembler::GreaterThanOrEqual:
580 case Assembler::NotSigned:
581 return InstImm(op_regimm, s, rt_bgez, BOffImm16(0));
582 case Assembler::LessThan:
583 case Assembler::Signed:
584 return InstImm(op_regimm, s, rt_bltz, BOffImm16(0));
585 case Assembler::LessThanOrEqual:
586 return InstImm(op_blez, s, zero, BOffImm16(0));
587 default:
588 MOZ_ASSUME_UNREACHABLE("Condition not supported.");
589 }
590 }
591
592 InstImm
593 Assembler::getBranchCode(FloatTestKind testKind, FPConditionBit fcc)
594 {
595 JS_ASSERT(!(fcc && FccMask));
596 uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift)) << RTShift;
597
598 return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
599 }
600
601 BufferOffset
602 Assembler::as_j(JOffImm26 off)
603 {
604 BufferOffset bo = writeInst(InstJump(op_j, off).encode());
605 return bo;
606 }
607 BufferOffset
608 Assembler::as_jal(JOffImm26 off)
609 {
610 BufferOffset bo = writeInst(InstJump(op_jal, off).encode());
611 return bo;
612 }
613
614 BufferOffset
615 Assembler::as_jr(Register rs)
616 {
617 BufferOffset bo = writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode());
618 return bo;
619 }
620 BufferOffset
621 Assembler::as_jalr(Register rs)
622 {
623 BufferOffset bo = writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode());
624 return bo;
625 }
626
627
628 // Arithmetic instructions
629 BufferOffset
630 Assembler::as_addu(Register rd, Register rs, Register rt)
631 {
632 return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode());
633 }
634
635 BufferOffset
636 Assembler::as_addiu(Register rd, Register rs, int32_t j)
637 {
638 JS_ASSERT(Imm16::isInSignedRange(j));
639 return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
640 }
641
642 BufferOffset
643 Assembler::as_subu(Register rd, Register rs, Register rt)
644 {
645 return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode());
646 }
647
648 BufferOffset
649 Assembler::as_mult(Register rs, Register rt)
650 {
651 return writeInst(InstReg(op_special, rs, rt, ff_mult).encode());
652 }
653
654 BufferOffset
655 Assembler::as_multu(Register rs, Register rt)
656 {
657 return writeInst(InstReg(op_special, rs, rt, ff_multu).encode());
658 }
659
660 BufferOffset
661 Assembler::as_div(Register rs, Register rt)
662 {
663 return writeInst(InstReg(op_special, rs, rt, ff_div).encode());
664 }
665
666 BufferOffset
667 Assembler::as_divu(Register rs, Register rt)
668 {
669 return writeInst(InstReg(op_special, rs, rt, ff_divu).encode());
670 }
671
672 BufferOffset
673 Assembler::as_mul(Register rd, Register rs, Register rt)
674 {
675 return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode());
676 }
677
678 BufferOffset
679 Assembler::as_lui(Register rd, int32_t j)
680 {
681 JS_ASSERT(Imm16::isInUnsignedRange(j));
682 return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
683 }
684
685 // Shift instructions
686 BufferOffset
687 Assembler::as_sll(Register rd, Register rt, uint16_t sa)
688 {
689 JS_ASSERT(sa < 32);
690 return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode());
691 }
692
693 BufferOffset
694 Assembler::as_sllv(Register rd, Register rt, Register rs)
695 {
696 return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode());
697 }
698
699 BufferOffset
700 Assembler::as_srl(Register rd, Register rt, uint16_t sa)
701 {
702 JS_ASSERT(sa < 32);
703 return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode());
704 }
705
706 BufferOffset
707 Assembler::as_srlv(Register rd, Register rt, Register rs)
708 {
709 return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode());
710 }
711
712 BufferOffset
713 Assembler::as_sra(Register rd, Register rt, uint16_t sa)
714 {
715 JS_ASSERT(sa < 32);
716 return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode());
717 }
718
719 BufferOffset
720 Assembler::as_srav(Register rd, Register rt, Register rs)
721 {
722 return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode());
723 }
724
725 BufferOffset
726 Assembler::as_rotr(Register rd, Register rt, uint16_t sa)
727 {
728 JS_ASSERT(sa < 32);
729 return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
730 }
731
732 BufferOffset
733 Assembler::as_rotrv(Register rd, Register rt, Register rs)
734 {
735 return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
736 }
737
738 // Load and store instructions
739 BufferOffset
740 Assembler::as_lb(Register rd, Register rs, int16_t off)
741 {
742 return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode());
743 }
744
745 BufferOffset
746 Assembler::as_lbu(Register rd, Register rs, int16_t off)
747 {
748 return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode());
749 }
750
751 BufferOffset
752 Assembler::as_lh(Register rd, Register rs, int16_t off)
753 {
754 return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode());
755 }
756
757 BufferOffset
758 Assembler::as_lhu(Register rd, Register rs, int16_t off)
759 {
760 return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode());
761 }
762
763 BufferOffset
764 Assembler::as_lw(Register rd, Register rs, int16_t off)
765 {
766 return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode());
767 }
768
769 BufferOffset
770 Assembler::as_lwl(Register rd, Register rs, int16_t off)
771 {
772 return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode());
773 }
774
775 BufferOffset
776 Assembler::as_lwr(Register rd, Register rs, int16_t off)
777 {
778 return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode());
779 }
780
781 BufferOffset
782 Assembler::as_sb(Register rd, Register rs, int16_t off)
783 {
784 return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode());
785 }
786
787 BufferOffset
788 Assembler::as_sh(Register rd, Register rs, int16_t off)
789 {
790 return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode());
791 }
792
793 BufferOffset
794 Assembler::as_sw(Register rd, Register rs, int16_t off)
795 {
796 return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode());
797 }
798
799 BufferOffset
800 Assembler::as_swl(Register rd, Register rs, int16_t off)
801 {
802 return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode());
803 }
804
805 BufferOffset
806 Assembler::as_swr(Register rd, Register rs, int16_t off)
807 {
808 return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode());
809 }
810
811 // Move from HI/LO register.
812 BufferOffset
813 Assembler::as_mfhi(Register rd)
814 {
815 return writeInst(InstReg(op_special, rd, ff_mfhi).encode());
816 }
817
818 BufferOffset
819 Assembler::as_mflo(Register rd)
820 {
821 return writeInst(InstReg(op_special, rd, ff_mflo).encode());
822 }
823
824 // Set on less than.
825 BufferOffset
826 Assembler::as_slt(Register rd, Register rs, Register rt)
827 {
828 return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode());
829 }
830
831 BufferOffset
832 Assembler::as_sltu(Register rd, Register rs, Register rt)
833 {
834 return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode());
835 }
836
837 BufferOffset
838 Assembler::as_slti(Register rd, Register rs, int32_t j)
839 {
840 JS_ASSERT(Imm16::isInSignedRange(j));
841 return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
842 }
843
844 BufferOffset
845 Assembler::as_sltiu(Register rd, Register rs, uint32_t j)
846 {
847 JS_ASSERT(Imm16::isInUnsignedRange(j));
848 return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
849 }
850
851 // Conditional move.
852 BufferOffset
853 Assembler::as_movz(Register rd, Register rs, Register rt)
854 {
855 return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode());
856 }
857
858 BufferOffset
859 Assembler::as_movn(Register rd, Register rs, Register rt)
860 {
861 return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode());
862 }
863
864 BufferOffset
865 Assembler::as_movt(Register rd, Register rs, uint16_t cc)
866 {
867 Register rt;
868 rt = Register::FromCode((cc & 0x7) << 2 | 1);
869 return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
870 }
871
872 BufferOffset
873 Assembler::as_movf(Register rd, Register rs, uint16_t cc)
874 {
875 Register rt;
876 rt = Register::FromCode((cc & 0x7) << 2 | 0);
877 return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
878 }
879
880 // Bit twiddling.
881 BufferOffset
882 Assembler::as_clz(Register rd, Register rs, Register rt)
883 {
884 return writeInst(InstReg(op_special2, rs, rt, rd, ff_clz).encode());
885 }
886
887 BufferOffset
888 Assembler::as_ins(Register rt, Register rs, uint16_t pos, uint16_t size)
889 {
890 JS_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size >= 32);
891 Register rd;
892 rd = Register::FromCode(pos + size - 1);
893 return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
894 }
895
896 BufferOffset
897 Assembler::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size)
898 {
899 JS_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size >= 32);
900 Register rd;
901 rd = Register::FromCode(size - 1);
902 return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
903 }
904
905 // FP instructions
906 BufferOffset
907 Assembler::as_ld(FloatRegister fd, Register base, int32_t off)
908 {
909 JS_ASSERT(Imm16::isInSignedRange(off));
910 return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
911 }
912
913 BufferOffset
914 Assembler::as_sd(FloatRegister fd, Register base, int32_t off)
915 {
916 JS_ASSERT(Imm16::isInSignedRange(off));
917 return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
918 }
919
920 BufferOffset
921 Assembler::as_ls(FloatRegister fd, Register base, int32_t off)
922 {
923 JS_ASSERT(Imm16::isInSignedRange(off));
924 return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
925 }
926
927 BufferOffset
928 Assembler::as_ss(FloatRegister fd, Register base, int32_t off)
929 {
930 JS_ASSERT(Imm16::isInSignedRange(off));
931 return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
932 }
933
934 BufferOffset
935 Assembler::as_movs(FloatRegister fd, FloatRegister fs)
936 {
937 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode());
938 }
939
940 BufferOffset
941 Assembler::as_movd(FloatRegister fd, FloatRegister fs)
942 {
943 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode());
944 }
945
946 BufferOffset
947 Assembler::as_mtc1(Register rt, FloatRegister fs)
948 {
949 return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode());
950 }
951
952 BufferOffset
953 Assembler::as_mfc1(Register rt, FloatRegister fs)
954 {
955 return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode());
956 }
957
958 // FP convert instructions
959 BufferOffset
960 Assembler::as_ceilws(FloatRegister fd, FloatRegister fs)
961 {
962 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode());
963 }
964
965 BufferOffset
966 Assembler::as_floorws(FloatRegister fd, FloatRegister fs)
967 {
968 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode());
969 }
970
971 BufferOffset
972 Assembler::as_roundws(FloatRegister fd, FloatRegister fs)
973 {
974 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode());
975 }
976
977 BufferOffset
978 Assembler::as_truncws(FloatRegister fd, FloatRegister fs)
979 {
980 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode());
981 }
982
983 BufferOffset
984 Assembler::as_ceilwd(FloatRegister fd, FloatRegister fs)
985 {
986 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode());
987 }
988
989 BufferOffset
990 Assembler::as_floorwd(FloatRegister fd, FloatRegister fs)
991 {
992 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode());
993 }
994
995 BufferOffset
996 Assembler::as_roundwd(FloatRegister fd, FloatRegister fs)
997 {
998 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode());
999 }
1000
1001 BufferOffset
1002 Assembler::as_truncwd(FloatRegister fd, FloatRegister fs)
1003 {
1004 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode());
1005 }
1006
1007 BufferOffset
1008 Assembler::as_cvtds(FloatRegister fd, FloatRegister fs)
1009 {
1010 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode());
1011 }
1012
1013 BufferOffset
1014 Assembler::as_cvtdw(FloatRegister fd, FloatRegister fs)
1015 {
1016 return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode());
1017 }
1018
1019 BufferOffset
1020 Assembler::as_cvtsd(FloatRegister fd, FloatRegister fs)
1021 {
1022 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode());
1023 }
1024
1025 BufferOffset
1026 Assembler::as_cvtsw(FloatRegister fd, FloatRegister fs)
1027 {
1028 return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode());
1029 }
1030
1031 BufferOffset
1032 Assembler::as_cvtwd(FloatRegister fd, FloatRegister fs)
1033 {
1034 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode());
1035 }
1036
1037 BufferOffset
1038 Assembler::as_cvtws(FloatRegister fd, FloatRegister fs)
1039 {
1040 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode());
1041 }
1042
1043 // FP arithmetic instructions
1044 BufferOffset
1045 Assembler::as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1046 {
1047 return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode());
1048 }
1049
1050 BufferOffset
1051 Assembler::as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1052 {
1053 return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode());
1054 }
1055
1056 BufferOffset
1057 Assembler::as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1058 {
1059 return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode());
1060 }
1061
1062 BufferOffset
1063 Assembler::as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1064 {
1065 return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode());
1066 }
1067
1068 BufferOffset
1069 Assembler::as_abss(FloatRegister fd, FloatRegister fs)
1070 {
1071 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode());
1072 }
1073
1074 BufferOffset
1075 Assembler::as_absd(FloatRegister fd, FloatRegister fs)
1076 {
1077 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
1078 }
1079
1080 BufferOffset
1081 Assembler::as_negd(FloatRegister fd, FloatRegister fs)
1082 {
1083 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
1084 }
1085
1086 BufferOffset
1087 Assembler::as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1088 {
1089 return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode());
1090 }
1091
1092 BufferOffset
1093 Assembler::as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1094 {
1095 return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode());
1096 }
1097
1098 BufferOffset
1099 Assembler::as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1100 {
1101 return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode());
1102 }
1103
1104 BufferOffset
1105 Assembler::as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
1106 {
1107 return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode());
1108 }
1109
1110 BufferOffset
1111 Assembler::as_sqrts(FloatRegister fd, FloatRegister fs)
1112 {
1113 return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode());
1114 }
1115
1116 BufferOffset
1117 Assembler::as_sqrtd(FloatRegister fd, FloatRegister fs)
1118 {
1119 return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
1120 }
1121
1122 // FP compare instructions
1123 BufferOffset
1124 Assembler::as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1125 {
1126 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1127 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
1128 }
1129
1130 BufferOffset
1131 Assembler::as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1132 {
1133 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1134 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
1135 }
1136
1137 BufferOffset
1138 Assembler::as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1139 {
1140 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1141 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
1142 }
1143
1144 BufferOffset
1145 Assembler::as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1146 {
1147 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1148 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
1149 }
1150
1151 BufferOffset
1152 Assembler::as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1153 {
1154 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1155 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
1156 }
1157
1158 BufferOffset
1159 Assembler::as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1160 {
1161 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1162 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
1163 }
1164
1165 BufferOffset
1166 Assembler::as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1167 {
1168 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1169 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
1170 }
1171
1172 BufferOffset
1173 Assembler::as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
1174 {
1175 RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
1176 return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
1177 }
1178
1179
1180 void
1181 Assembler::bind(Label *label, BufferOffset boff)
1182 {
1183 // If our caller didn't give us an explicit target to bind to
1184 // then we want to bind to the location of the next instruction
1185 BufferOffset dest = boff.assigned() ? boff : nextOffset();
1186 if (label->used()) {
1187 int32_t next;
1188
1189 // A used label holds a link to branch that uses it.
1190 BufferOffset b(label);
1191 do {
1192 Instruction *inst = editSrc(b);
1193
1194 // Second word holds a pointer to the next branch in label's chain.
1195 next = inst[1].encode();
1196 bind(reinterpret_cast<InstImm *>(inst), b.getOffset(), dest.getOffset());
1197
1198 b = BufferOffset(next);
1199 } while (next != LabelBase::INVALID_OFFSET);
1200 }
1201 label->bind(dest.getOffset());
1202 }
1203
1204 void
1205 Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
1206 {
1207 int32_t offset = target - branch;
1208 InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
1209 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
1210
1211 // If encoded offset is 4, then the jump must be short
1212 if (BOffImm16(inst[0]).decode() == 4) {
1213 JS_ASSERT(BOffImm16::isInRange(offset));
1214 inst[0].setBOffImm16(BOffImm16(offset));
1215 inst[1].makeNop();
1216 return;
1217 }
1218 if (BOffImm16::isInRange(offset)) {
1219 bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
1220 inst[0].encode() != inst_beq.encode());
1221
1222 inst[0].setBOffImm16(BOffImm16(offset));
1223 inst[1].makeNop();
1224
1225 // Skip the trailing nops in conditional branches.
1226 if (conditional) {
1227 inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void *))).encode();
1228 // There are 2 nops after this
1229 }
1230 return;
1231 }
1232
1233 if (inst[0].encode() == inst_bgezal.encode()) {
1234 // Handle long call.
1235 addLongJump(BufferOffset(branch));
1236 writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
1237 inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
1238 // There is 1 nop after this.
1239 } else if (inst[0].encode() == inst_beq.encode()) {
1240 // Handle long unconditional jump.
1241 addLongJump(BufferOffset(branch));
1242 writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
1243 inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
1244 // There is 1 nop after this.
1245 } else {
1246 // Handle long conditional jump.
1247 inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void *)));
1248 // No need for a "nop" here because we can clobber scratch.
1249 addLongJump(BufferOffset(branch + sizeof(void *)));
1250 writeLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
1251 inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
1252 // There is 1 nop after this.
1253 }
1254 }
1255
1256 void
1257 Assembler::bind(RepatchLabel *label)
1258 {
1259 BufferOffset dest = nextOffset();
1260 if (label->used()) {
1261 // If the label has a use, then change this use to refer to
1262 // the bound label;
1263 BufferOffset b(label->offset());
1264 Instruction *inst1 = editSrc(b);
1265 Instruction *inst2 = inst1->next();
1266
1267 updateLuiOriValue(inst1, inst2, dest.getOffset());
1268 }
1269 label->bind(dest.getOffset());
1270 }
1271
1272 void
1273 Assembler::retarget(Label *label, Label *target)
1274 {
1275 if (label->used()) {
1276 if (target->bound()) {
1277 bind(label, BufferOffset(target));
1278 } else if (target->used()) {
1279 // The target is not bound but used. Prepend label's branch list
1280 // onto target's.
1281 int32_t next;
1282 BufferOffset labelBranchOffset(label);
1283
1284 // Find the head of the use chain for label.
1285 do {
1286 Instruction *inst = editSrc(labelBranchOffset);
1287
1288 // Second word holds a pointer to the next branch in chain.
1289 next = inst[1].encode();
1290 labelBranchOffset = BufferOffset(next);
1291 } while (next != LabelBase::INVALID_OFFSET);
1292
1293 // Then patch the head of label's use chain to the tail of
1294 // target's use chain, prepending the entire use chain of target.
1295 Instruction *inst = editSrc(labelBranchOffset);
1296 int32_t prev = target->use(label->offset());
1297 inst[1].setData(prev);
1298 } else {
1299 // The target is unbound and unused. We can just take the head of
1300 // the list hanging off of label, and dump that into target.
1301 DebugOnly<uint32_t> prev = target->use(label->offset());
1302 JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
1303 }
1304 }
1305 label->reset();
1306 }
1307
1308 void dbg_break() {}
1309 static int stopBKPT = -1;
1310 void
1311 Assembler::as_break(uint32_t code)
1312 {
1313 JS_ASSERT(code <= MAX_BREAK_CODE);
1314 writeInst(op_special | code << RTShift | ff_break);
1315 }
1316
1317 uint32_t
1318 Assembler::patchWrite_NearCallSize()
1319 {
1320 return 4 * sizeof(uint32_t);
1321 }
1322
1323 void
1324 Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
1325 {
1326 Instruction *inst = (Instruction *) start.raw();
1327 uint8_t *dest = toCall.raw();
1328
1329 // Overwrite whatever instruction used to be here with a call.
1330 // Always use long jump for two reasons:
1331 // - Jump has to be the same size because of patchWrite_NearCallSize.
1332 // - Return address has to be at the end of replaced block.
1333 // Short jump wouldn't be more efficient.
1334 writeLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
1335 inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
1336 inst[3] = InstNOP();
1337
1338 // Ensure everyone sees the code that was just written into memory.
1339 AutoFlushICache::flush(uintptr_t(inst), patchWrite_NearCallSize());
1340 }
1341
1342 uint32_t
1343 Assembler::extractLuiOriValue(Instruction *inst0, Instruction *inst1)
1344 {
1345 InstImm *i0 = (InstImm *) inst0;
1346 InstImm *i1 = (InstImm *) inst1;
1347 JS_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
1348 JS_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
1349
1350 uint32_t value = i0->extractImm16Value() << 16;
1351 value = value | i1->extractImm16Value();
1352 return value;
1353 }
1354
1355 void
1356 Assembler::updateLuiOriValue(Instruction *inst0, Instruction *inst1, uint32_t value)
1357 {
1358 JS_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
1359 JS_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
1360
1361 ((InstImm *) inst0)->setImm16(Imm16::upper(Imm32(value)));
1362 ((InstImm *) inst1)->setImm16(Imm16::lower(Imm32(value)));
1363 }
1364
1365 void
1366 Assembler::writeLuiOriInstructions(Instruction *inst0, Instruction *inst1,
1367 Register reg, uint32_t value)
1368 {
1369 *inst0 = InstImm(op_lui, zero, reg, Imm16::upper(Imm32(value)));
1370 *inst1 = InstImm(op_ori, reg, reg, Imm16::lower(Imm32(value)));
1371 }
1372
1373 void
1374 Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
1375 PatchedImmPtr expectedValue)
1376 {
1377 Instruction *inst = (Instruction *) label.raw();
1378
1379 // Extract old Value
1380 DebugOnly<uint32_t> value = Assembler::extractLuiOriValue(&inst[0], &inst[1]);
1381 JS_ASSERT(value == uint32_t(expectedValue.value));
1382
1383 // Replace with new value
1384 Assembler::updateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
1385
1386 AutoFlushICache::flush(uintptr_t(inst), 8);
1387 }
1388
1389 void
1390 Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue)
1391 {
1392 patchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
1393 PatchedImmPtr(expectedValue.value));
1394 }
1395
1396 // This just stomps over memory with 32 bits of raw data. Its purpose is to
1397 // overwrite the call of JITed code with 32 bits worth of an offset. This will
1398 // is only meant to function on code that has been invalidated, so it should
1399 // be totally safe. Since that instruction will never be executed again, a
1400 // ICache flush should not be necessary
1401 void
1402 Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
1403 {
1404 // Raw is going to be the return address.
1405 uint32_t *raw = (uint32_t*)label.raw();
1406 // Overwrite the 4 bytes before the return address, which will
1407 // end up being the call instruction.
1408 *(raw - 1) = imm.value;
1409 }
1410
1411 uint8_t *
1412 Assembler::nextInstruction(uint8_t *inst_, uint32_t *count)
1413 {
1414 Instruction *inst = reinterpret_cast<Instruction*>(inst_);
1415 if (count != nullptr)
1416 *count += sizeof(Instruction);
1417 return reinterpret_cast<uint8_t*>(inst->next());
1418 }
1419
1420 // Since there are no pools in MIPS implementation, this should be simple.
1421 Instruction *
1422 Instruction::next()
1423 {
1424 return this + 1;
1425 }
1426
1427 InstImm Assembler::invertBranch(InstImm branch, BOffImm16 skipOffset)
1428 {
1429 uint32_t rt = 0;
1430 Opcode op = (Opcode) (branch.extractOpcode() << OpcodeShift);
1431 switch(op) {
1432 case op_beq:
1433 branch.setBOffImm16(skipOffset);
1434 branch.setOpcode(op_bne);
1435 return branch;
1436 case op_bne:
1437 branch.setBOffImm16(skipOffset);
1438 branch.setOpcode(op_beq);
1439 return branch;
1440 case op_bgtz:
1441 branch.setBOffImm16(skipOffset);
1442 branch.setOpcode(op_blez);
1443 return branch;
1444 case op_blez:
1445 branch.setBOffImm16(skipOffset);
1446 branch.setOpcode(op_bgtz);
1447 return branch;
1448 case op_regimm:
1449 branch.setBOffImm16(skipOffset);
1450 rt = branch.extractRT();
1451 if (rt == (rt_bltz >> RTShift)) {
1452 branch.setRT(rt_bgez);
1453 return branch;
1454 }
1455 if (rt == (rt_bgez >> RTShift)) {
1456 branch.setRT(rt_bltz);
1457 return branch;
1458 }
1459
1460 MOZ_ASSUME_UNREACHABLE("Error creating long branch.");
1461 return branch;
1462
1463 case op_cop1:
1464 JS_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
1465
1466 branch.setBOffImm16(skipOffset);
1467 rt = branch.extractRT();
1468 if (rt & 0x1)
1469 branch.setRT((RTField) ((rt & ~0x1) << RTShift));
1470 else
1471 branch.setRT((RTField) ((rt | 0x1) << RTShift));
1472 return branch;
1473 }
1474
1475 MOZ_ASSUME_UNREACHABLE("Error creating long branch.");
1476 return branch;
1477 }
1478
1479 void
1480 Assembler::ToggleToJmp(CodeLocationLabel inst_)
1481 {
1482 InstImm * inst = (InstImm *)inst_.raw();
1483
1484 JS_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
1485 // We converted beq to andi, so now we restore it.
1486 inst->setOpcode(op_beq);
1487
1488 AutoFlushICache::flush(uintptr_t(inst), 4);
1489 }
1490
1491 void
1492 Assembler::ToggleToCmp(CodeLocationLabel inst_)
1493 {
1494 InstImm * inst = (InstImm *)inst_.raw();
1495
1496 // toggledJump is allways used for short jumps.
1497 JS_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
1498 // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
1499 inst->setOpcode(op_andi);
1500
1501 AutoFlushICache::flush(uintptr_t(inst), 4);
1502 }
1503
1504 void
1505 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
1506 {
1507 Instruction *inst = (Instruction *)inst_.raw();
1508 InstImm *i0 = (InstImm *) inst;
1509 InstImm *i1 = (InstImm *) i0->next();
1510 Instruction *i2 = (Instruction *) i1->next();
1511
1512 JS_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
1513 JS_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
1514
1515 if (enabled) {
1516 InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
1517 *i2 = jalr;
1518 } else {
1519 InstNOP nop;
1520 *i2 = nop;
1521 }
1522
1523 AutoFlushICache::flush(uintptr_t(i2), 4);
1524 }
1525
1526 void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
1527 {
1528 MOZ_ASSUME_UNREACHABLE("NYI");
1529 }

mercurial