|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "jit/arm/MacroAssembler-arm.h" |
|
8 |
|
9 #include "mozilla/Casting.h" |
|
10 #include "mozilla/DebugOnly.h" |
|
11 #include "mozilla/MathAlgorithms.h" |
|
12 |
|
13 #include "jit/arm/Simulator-arm.h" |
|
14 #include "jit/Bailouts.h" |
|
15 #include "jit/BaselineFrame.h" |
|
16 #include "jit/IonFrames.h" |
|
17 #include "jit/MoveEmitter.h" |
|
18 |
|
19 using namespace js; |
|
20 using namespace jit; |
|
21 |
|
22 using mozilla::Abs; |
|
23 using mozilla::BitwiseCast; |
|
24 |
|
25 bool |
|
26 isValueDTRDCandidate(ValueOperand &val) |
|
27 { |
|
28 // In order to be used for a DTRD memory function, the two target registers |
|
29 // need to be a) Adjacent, with the tag larger than the payload, and |
|
30 // b) Aligned to a multiple of two. |
|
31 if ((val.typeReg().code() != (val.payloadReg().code() + 1))) |
|
32 return false; |
|
33 if ((val.payloadReg().code() & 1) != 0) |
|
34 return false; |
|
35 return true; |
|
36 } |
|
37 |
|
38 void |
|
39 MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) |
|
40 { |
|
41 // Note that C++ bool is only 1 byte, so zero extend it to clear the |
|
42 // higher-order bits. |
|
43 ma_and(Imm32(0xff), source, dest); |
|
44 } |
|
45 |
|
46 void |
|
47 MacroAssemblerARM::convertInt32ToDouble(const Register &src, const FloatRegister &dest_) |
|
48 { |
|
49 // direct conversions aren't possible. |
|
50 VFPRegister dest = VFPRegister(dest_); |
|
51 as_vxfer(src, InvalidReg, dest.sintOverlay(), |
|
52 CoreToFloat); |
|
53 as_vcvt(dest, dest.sintOverlay()); |
|
54 } |
|
55 |
|
56 void |
|
57 MacroAssemblerARM::convertInt32ToDouble(const Address &src, FloatRegister dest) |
|
58 { |
|
59 ma_vldr(Operand(src), ScratchFloatReg); |
|
60 as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay()); |
|
61 } |
|
62 |
|
63 void |
|
64 MacroAssemblerARM::convertUInt32ToDouble(const Register &src, const FloatRegister &dest_) |
|
65 { |
|
66 // direct conversions aren't possible. |
|
67 VFPRegister dest = VFPRegister(dest_); |
|
68 as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); |
|
69 as_vcvt(dest, dest.uintOverlay()); |
|
70 } |
|
71 |
|
72 void |
|
73 MacroAssemblerARM::convertUInt32ToFloat32(const Register &src, const FloatRegister &dest_) |
|
74 { |
|
75 // direct conversions aren't possible. |
|
76 VFPRegister dest = VFPRegister(dest_); |
|
77 as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); |
|
78 as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay()); |
|
79 } |
|
80 |
|
81 void MacroAssemblerARM::convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest, |
|
82 Condition c) |
|
83 { |
|
84 as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c); |
|
85 } |
|
86 |
|
87 // there are two options for implementing emitTruncateDouble. |
|
88 // 1) convert the floating point value to an integer, if it did not fit, |
|
89 // then it was clamped to INT_MIN/INT_MAX, and we can test it. |
|
90 // NOTE: if the value really was supposed to be INT_MAX / INT_MIN |
|
91 // then it will be wrong. |
|
92 // 2) convert the floating point value to an integer, if it did not fit, |
|
93 // then it set one or two bits in the fpcsr. Check those. |
|
94 void |
|
95 MacroAssemblerARM::branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail) |
|
96 { |
|
97 ma_vcvt_F64_I32(src, ScratchFloatReg); |
|
98 ma_vxfer(ScratchFloatReg, dest); |
|
99 ma_cmp(dest, Imm32(0x7fffffff)); |
|
100 ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
|
101 ma_b(fail, Assembler::Equal); |
|
102 } |
|
103 |
|
104 // Checks whether a double is representable as a 32-bit integer. If so, the |
|
105 // integer is written to the output register. Otherwise, a bailout is taken to |
|
106 // the given snapshot. This function overwrites the scratch float register. |
|
107 void |
|
108 MacroAssemblerARM::convertDoubleToInt32(const FloatRegister &src, const Register &dest, |
|
109 Label *fail, bool negativeZeroCheck) |
|
110 { |
|
111 // convert the floating point value to an integer, if it did not fit, |
|
112 // then when we convert it *back* to a float, it will have a |
|
113 // different value, which we can test. |
|
114 ma_vcvt_F64_I32(src, ScratchFloatReg); |
|
115 // move the value into the dest register. |
|
116 ma_vxfer(ScratchFloatReg, dest); |
|
117 ma_vcvt_I32_F64(ScratchFloatReg, ScratchFloatReg); |
|
118 ma_vcmp(src, ScratchFloatReg); |
|
119 as_vmrs(pc); |
|
120 ma_b(fail, Assembler::VFP_NotEqualOrUnordered); |
|
121 |
|
122 if (negativeZeroCheck) { |
|
123 ma_cmp(dest, Imm32(0)); |
|
124 // Test and bail for -0.0, when integer result is 0 |
|
125 // Move the top word of the double into the output reg, if it is non-zero, |
|
126 // then the original value was -0.0 |
|
127 as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1); |
|
128 ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); |
|
129 ma_b(fail, Assembler::Equal); |
|
130 } |
|
131 } |
|
132 |
|
133 // Checks whether a float32 is representable as a 32-bit integer. If so, the |
|
134 // integer is written to the output register. Otherwise, a bailout is taken to |
|
135 // the given snapshot. This function overwrites the scratch float register. |
|
136 void |
|
137 MacroAssemblerARM::convertFloat32ToInt32(const FloatRegister &src, const Register &dest, |
|
138 Label *fail, bool negativeZeroCheck) |
|
139 { |
|
140 // convert the floating point value to an integer, if it did not fit, |
|
141 // then when we convert it *back* to a float, it will have a |
|
142 // different value, which we can test. |
|
143 ma_vcvt_F32_I32(src, ScratchFloatReg); |
|
144 // move the value into the dest register. |
|
145 ma_vxfer(ScratchFloatReg, dest); |
|
146 ma_vcvt_I32_F32(ScratchFloatReg, ScratchFloatReg); |
|
147 ma_vcmp_f32(src, ScratchFloatReg); |
|
148 as_vmrs(pc); |
|
149 ma_b(fail, Assembler::VFP_NotEqualOrUnordered); |
|
150 |
|
151 if (negativeZeroCheck) { |
|
152 ma_cmp(dest, Imm32(0)); |
|
153 // Test and bail for -0.0, when integer result is 0 |
|
154 // Move the float into the output reg, and if it is non-zero then |
|
155 // the original value was -0.0 |
|
156 as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0); |
|
157 ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); |
|
158 ma_b(fail, Assembler::Equal); |
|
159 } |
|
160 } |
|
161 |
|
162 void |
|
163 MacroAssemblerARM::convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest) { |
|
164 as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay()); |
|
165 } |
|
166 |
|
167 void |
|
168 MacroAssemblerARM::branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail) { |
|
169 ma_vcvt_F32_I32(src, ScratchFloatReg); |
|
170 ma_vxfer(ScratchFloatReg, dest); |
|
171 ma_cmp(dest, Imm32(0x7fffffff)); |
|
172 ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
|
173 ma_b(fail, Assembler::Equal); |
|
174 } |
|
175 |
|
176 void |
|
177 MacroAssemblerARM::convertInt32ToFloat32(const Register &src, const FloatRegister &dest_) { |
|
178 // direct conversions aren't possible. |
|
179 VFPRegister dest = VFPRegister(dest_).singleOverlay(); |
|
180 as_vxfer(src, InvalidReg, dest.sintOverlay(), |
|
181 CoreToFloat); |
|
182 as_vcvt(dest, dest.sintOverlay()); |
|
183 } |
|
184 |
|
185 void |
|
186 MacroAssemblerARM::convertInt32ToFloat32(const Address &src, FloatRegister dest) { |
|
187 ma_vldr(Operand(src), ScratchFloatReg); |
|
188 as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay()); |
|
189 } |
|
190 |
|
191 void |
|
192 MacroAssemblerARM::addDouble(FloatRegister src, FloatRegister dest) |
|
193 { |
|
194 ma_vadd(dest, src, dest); |
|
195 } |
|
196 |
|
197 void |
|
198 MacroAssemblerARM::subDouble(FloatRegister src, FloatRegister dest) |
|
199 { |
|
200 ma_vsub(dest, src, dest); |
|
201 } |
|
202 |
|
203 void |
|
204 MacroAssemblerARM::mulDouble(FloatRegister src, FloatRegister dest) |
|
205 { |
|
206 ma_vmul(dest, src, dest); |
|
207 } |
|
208 |
|
209 void |
|
210 MacroAssemblerARM::divDouble(FloatRegister src, FloatRegister dest) |
|
211 { |
|
212 ma_vdiv(dest, src, dest); |
|
213 } |
|
214 |
|
215 void |
|
216 MacroAssemblerARM::negateDouble(FloatRegister reg) |
|
217 { |
|
218 ma_vneg(reg, reg); |
|
219 } |
|
220 |
|
221 void |
|
222 MacroAssemblerARM::inc64(AbsoluteAddress dest) |
|
223 { |
|
224 |
|
225 ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex); |
|
226 |
|
227 ma_mov(Imm32((int32_t)dest.addr), ScratchRegister); |
|
228 |
|
229 ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(0)), r0, r1); |
|
230 |
|
231 ma_add(Imm32(1), r0, SetCond); |
|
232 ma_adc(Imm32(0), r1, NoSetCond); |
|
233 |
|
234 ma_strd(r0, r1, EDtrAddr(ScratchRegister, EDtrOffImm(0))); |
|
235 |
|
236 ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex); |
|
237 |
|
238 } |
|
239 |
|
240 bool |
|
241 MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, |
|
242 SetCond_ sc, Condition c) |
|
243 { |
|
244 if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op)) |
|
245 return false; |
|
246 ALUOp interop = getDestVariant(op); |
|
247 Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value); |
|
248 if (both.fst.invalid) |
|
249 return false; |
|
250 // for the most part, there is no good reason to set the condition |
|
251 // codes for the first instruction. |
|
252 // we can do better things if the second instruction doesn't |
|
253 // have a dest, such as check for overflow by doing first operation |
|
254 // don't do second operation if first operation overflowed. |
|
255 // this preserves the overflow condition code. |
|
256 // unfortunately, it is horribly brittle. |
|
257 as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c); |
|
258 as_alu(dest, ScratchRegister, both.snd, op, sc, c); |
|
259 return true; |
|
260 } |
|
261 |
|
262 |
|
263 void |
|
264 MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, |
|
265 ALUOp op, |
|
266 SetCond_ sc, Condition c) |
|
267 { |
|
268 // As it turns out, if you ask for a compare-like instruction |
|
269 // you *probably* want it to set condition codes. |
|
270 if (dest == InvalidReg) |
|
271 JS_ASSERT(sc == SetCond); |
|
272 |
|
273 // The operator gives us the ability to determine how |
|
274 // this can be used. |
|
275 Imm8 imm8 = Imm8(imm.value); |
|
276 // ONE INSTRUCTION: |
|
277 // If we can encode it using an imm8m, then do so. |
|
278 if (!imm8.invalid) { |
|
279 as_alu(dest, src1, imm8, op, sc, c); |
|
280 return; |
|
281 } |
|
282 // ONE INSTRUCTION, NEGATED: |
|
283 Imm32 negImm = imm; |
|
284 Register negDest; |
|
285 ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest); |
|
286 Imm8 negImm8 = Imm8(negImm.value); |
|
287 // add r1, r2, -15 can be replaced with |
|
288 // sub r1, r2, 15 |
|
289 // for bonus points, dest can be replaced (nearly always invalid => ScratchRegister) |
|
290 // This is useful if we wish to negate tst. tst has an invalid (aka not used) dest, |
|
291 // but its negation is bic *requires* a dest. We can accomodate, but it will need to clobber |
|
292 // *something*, and the scratch register isn't being used, so... |
|
293 if (negOp != op_invalid && !negImm8.invalid) { |
|
294 as_alu(negDest, src1, negImm8, negOp, sc, c); |
|
295 return; |
|
296 } |
|
297 |
|
298 if (hasMOVWT()) { |
|
299 // If the operation is a move-a-like then we can try to use movw to |
|
300 // move the bits into the destination. Otherwise, we'll need to |
|
301 // fall back on a multi-instruction format :( |
|
302 // movw/movt don't set condition codes, so don't hold your breath. |
|
303 if (sc == NoSetCond && (op == op_mov || op == op_mvn)) { |
|
304 // ARMv7 supports movw/movt. movw zero-extends |
|
305 // its 16 bit argument, so we can set the register |
|
306 // this way. |
|
307 // movt leaves the bottom 16 bits in tact, so |
|
308 // it is unsuitable to move a constant that |
|
309 if (op == op_mov && ((imm.value & ~ 0xffff) == 0)) { |
|
310 JS_ASSERT(src1 == InvalidReg); |
|
311 as_movw(dest, (uint16_t)imm.value, c); |
|
312 return; |
|
313 } |
|
314 |
|
315 // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits |
|
316 // then do it. |
|
317 if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) { |
|
318 JS_ASSERT(src1 == InvalidReg); |
|
319 as_movw(dest, (uint16_t)~imm.value, c); |
|
320 return; |
|
321 } |
|
322 |
|
323 // TODO: constant dedup may enable us to add dest, r0, 23 *if* |
|
324 // we are attempting to load a constant that looks similar to one |
|
325 // that already exists |
|
326 // If it can't be done with a single movw |
|
327 // then we *need* to use two instructions |
|
328 // since this must be some sort of a move operation, we can just use |
|
329 // a movw/movt pair and get the whole thing done in two moves. This |
|
330 // does not work for ops like add, sinc we'd need to do |
|
331 // movw tmp; movt tmp; add dest, tmp, src1 |
|
332 if (op == op_mvn) |
|
333 imm.value = ~imm.value; |
|
334 as_movw(dest, imm.value & 0xffff, c); |
|
335 as_movt(dest, (imm.value >> 16) & 0xffff, c); |
|
336 return; |
|
337 } |
|
338 // If we weren't doing a movalike, a 16 bit immediate |
|
339 // will require 2 instructions. With the same amount of |
|
340 // space and (less)time, we can do two 8 bit operations, reusing |
|
341 // the dest register. e.g. |
|
342 // movw tmp, 0xffff; add dest, src, tmp ror 4 |
|
343 // vs. |
|
344 // add dest, src, 0xff0; add dest, dest, 0xf000000f |
|
345 // it turns out that there are some immediates that we miss with the |
|
346 // second approach. A sample value is: add dest, src, 0x1fffe |
|
347 // this can be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 |
|
348 // since imm8m's only get even offsets, we cannot encode this. |
|
349 // I'll try to encode as two imm8's first, since they are faster. |
|
350 // Both operations should take 1 cycle, where as add dest, tmp ror 4 |
|
351 // takes two cycles to execute. |
|
352 } |
|
353 |
|
354 // Either a) this isn't ARMv7 b) this isn't a move |
|
355 // start by attempting to generate a two instruction form. |
|
356 // Some things cannot be made into two-inst forms correctly. |
|
357 // namely, adds dest, src, 0xffff. |
|
358 // Since we want the condition codes (and don't know which ones will |
|
359 // be checked), we need to assume that the overflow flag will be checked |
|
360 // and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not |
|
361 // guaranteed to set the overflow flag the same as the (theoretical) |
|
362 // one instruction variant. |
|
363 if (alu_dbl(src1, imm, dest, op, sc, c)) |
|
364 return; |
|
365 |
|
366 // And try with its negative. |
|
367 if (negOp != op_invalid && |
|
368 alu_dbl(src1, negImm, negDest, negOp, sc, c)) |
|
369 return; |
|
370 |
|
371 // Well, damn. We can use two 16 bit mov's, then do the op |
|
372 // or we can do a single load from a pool then op. |
|
373 if (hasMOVWT()) { |
|
374 // Try to load the immediate into a scratch register |
|
375 // then use that |
|
376 as_movw(ScratchRegister, imm.value & 0xffff, c); |
|
377 if ((imm.value >> 16) != 0) |
|
378 as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c); |
|
379 } else { |
|
380 // Going to have to use a load. If the operation is a move, then just move it into the |
|
381 // destination register |
|
382 if (op == op_mov) { |
|
383 as_Imm32Pool(dest, imm.value, c); |
|
384 return; |
|
385 } else { |
|
386 // If this isn't just going into a register, then stick it in a temp, and then proceed. |
|
387 as_Imm32Pool(ScratchRegister, imm.value, c); |
|
388 } |
|
389 } |
|
390 as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c); |
|
391 } |
|
392 |
|
393 void |
|
394 MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op, |
|
395 SetCond_ sc, Assembler::Condition c) |
|
396 { |
|
397 JS_ASSERT(op2.getTag() == Operand::OP2); |
|
398 as_alu(dest, src1, op2.toOp2(), op, sc, c); |
|
399 } |
|
400 |
|
401 void |
|
402 MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SetCond_ sc, Condition c) |
|
403 { |
|
404 as_alu(dest, src1, op2, op, sc, c); |
|
405 } |
|
406 |
|
407 void |
|
408 MacroAssemblerARM::ma_nop() |
|
409 { |
|
410 as_nop(); |
|
411 } |
|
412 |
|
413 Instruction * |
|
414 NextInst(Instruction *i) |
|
415 { |
|
416 if (i == nullptr) |
|
417 return nullptr; |
|
418 return i->next(); |
|
419 } |
|
420 |
|
421 void |
|
422 MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c, |
|
423 RelocStyle rs, Instruction *i) |
|
424 { |
|
425 int32_t imm = imm_.value; |
|
426 if (i) { |
|
427 // Make sure the current instruction is not an artificial guard |
|
428 // inserted by the assembler buffer. |
|
429 // The InstructionIterator already does this and handles edge cases, |
|
430 // so, just asking an iterator for its current instruction should be |
|
431 // enough to make sure we don't accidentally inspect an artificial guard. |
|
432 i = InstructionIterator(i).cur(); |
|
433 } |
|
434 switch(rs) { |
|
435 case L_MOVWT: |
|
436 as_movw(dest, Imm16(imm & 0xffff), c, i); |
|
437 // i can be nullptr here. that just means "insert in the next in sequence." |
|
438 // NextInst is special cased to not do anything when it is passed nullptr, so |
|
439 // two consecutive instructions will be inserted. |
|
440 i = NextInst(i); |
|
441 as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i); |
|
442 break; |
|
443 case L_LDR: |
|
444 if(i == nullptr) |
|
445 as_Imm32Pool(dest, imm, c); |
|
446 else |
|
447 as_WritePoolEntry(i, c, imm); |
|
448 break; |
|
449 } |
|
450 } |
|
451 |
|
452 void |
|
453 MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest, |
|
454 Assembler::Condition c, RelocStyle rs, Instruction *i) |
|
455 { |
|
456 return ma_movPatchable(Imm32(int32_t(imm.value)), dest, c, rs, i); |
|
457 } |
|
458 |
|
459 void |
|
460 MacroAssemblerARM::ma_mov(Register src, Register dest, |
|
461 SetCond_ sc, Assembler::Condition c) |
|
462 { |
|
463 if (sc == SetCond || dest != src) |
|
464 as_mov(dest, O2Reg(src), sc, c); |
|
465 } |
|
466 |
|
467 void |
|
468 MacroAssemblerARM::ma_mov(Imm32 imm, Register dest, |
|
469 SetCond_ sc, Assembler::Condition c) |
|
470 { |
|
471 ma_alu(InvalidReg, imm, dest, op_mov, sc, c); |
|
472 } |
|
473 |
|
474 void |
|
475 MacroAssemblerARM::ma_mov(ImmWord imm, Register dest, |
|
476 SetCond_ sc, Assembler::Condition c) |
|
477 { |
|
478 ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c); |
|
479 } |
|
480 |
|
481 void |
|
482 MacroAssemblerARM::ma_mov(const ImmGCPtr &ptr, Register dest) |
|
483 { |
|
484 // As opposed to x86/x64 version, the data relocation has to be executed |
|
485 // before to recover the pointer, and not after. |
|
486 writeDataRelocation(ptr); |
|
487 RelocStyle rs; |
|
488 if (hasMOVWT()) |
|
489 rs = L_MOVWT; |
|
490 else |
|
491 rs = L_LDR; |
|
492 |
|
493 ma_movPatchable(Imm32(ptr.value), dest, Always, rs); |
|
494 } |
|
495 |
|
496 // Shifts (just a move with a shifting op2) |
|
497 void |
|
498 MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) |
|
499 { |
|
500 as_mov(dst, lsl(src, shift.value)); |
|
501 } |
|
502 void |
|
503 MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst) |
|
504 { |
|
505 as_mov(dst, lsr(src, shift.value)); |
|
506 } |
|
507 void |
|
508 MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst) |
|
509 { |
|
510 as_mov(dst, asr(src, shift.value)); |
|
511 } |
|
512 void |
|
513 MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst) |
|
514 { |
|
515 as_mov(dst, ror(src, shift.value)); |
|
516 } |
|
517 void |
|
518 MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) |
|
519 { |
|
520 as_mov(dst, rol(src, shift.value)); |
|
521 } |
|
522 // Shifts (just a move with a shifting op2) |
|
523 void |
|
524 MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) |
|
525 { |
|
526 as_mov(dst, lsl(src, shift)); |
|
527 } |
|
528 void |
|
529 MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst) |
|
530 { |
|
531 as_mov(dst, lsr(src, shift)); |
|
532 } |
|
533 void |
|
534 MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst) |
|
535 { |
|
536 as_mov(dst, asr(src, shift)); |
|
537 } |
|
538 void |
|
539 MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst) |
|
540 { |
|
541 as_mov(dst, ror(src, shift)); |
|
542 } |
|
543 void |
|
544 MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst) |
|
545 { |
|
546 ma_rsb(shift, Imm32(32), ScratchRegister); |
|
547 as_mov(dst, ror(src, ScratchRegister)); |
|
548 } |
|
549 |
|
550 // Move not (dest <- ~src) |
|
551 |
|
552 void |
|
553 MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
|
554 { |
|
555 ma_alu(InvalidReg, imm, dest, op_mvn, sc, c); |
|
556 } |
|
557 |
|
558 void |
|
559 MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) |
|
560 { |
|
561 as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c); |
|
562 } |
|
563 |
|
564 // Negate (dest <- -src), src is a register, rather than a general op2. |
|
565 void |
|
566 MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) |
|
567 { |
|
568 as_rsb(dest, src1, Imm8(0), sc, c); |
|
569 } |
|
570 |
|
571 // And. |
|
572 void |
|
573 MacroAssemblerARM::ma_and(Register src, Register dest, SetCond_ sc, Assembler::Condition c) |
|
574 { |
|
575 ma_and(dest, src, dest); |
|
576 } |
|
577 void |
|
578 MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest, |
|
579 SetCond_ sc, Assembler::Condition c) |
|
580 { |
|
581 as_and(dest, src1, O2Reg(src2), sc, c); |
|
582 } |
|
583 void |
|
584 MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
|
585 { |
|
586 ma_alu(dest, imm, dest, op_and, sc, c); |
|
587 } |
|
588 void |
|
589 MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest, |
|
590 SetCond_ sc, Assembler::Condition c) |
|
591 { |
|
592 ma_alu(src1, imm, dest, op_and, sc, c); |
|
593 } |
|
594 |
|
595 |
|
596 // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2). |
|
597 void |
|
598 MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
|
599 { |
|
600 ma_alu(dest, imm, dest, op_bic, sc, c); |
|
601 } |
|
602 |
|
603 // Exclusive or. |
|
604 void |
|
605 MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c) |
|
606 { |
|
607 ma_eor(dest, src, dest, sc, c); |
|
608 } |
|
609 void |
|
610 MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest, |
|
611 SetCond_ sc, Assembler::Condition c) |
|
612 { |
|
613 as_eor(dest, src1, O2Reg(src2), sc, c); |
|
614 } |
|
615 void |
|
616 MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
|
617 { |
|
618 ma_alu(dest, imm, dest, op_eor, sc, c); |
|
619 } |
|
620 void |
|
621 MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest, |
|
622 SetCond_ sc, Assembler::Condition c) |
|
623 { |
|
624 ma_alu(src1, imm, dest, op_eor, sc, c); |
|
625 } |
|
626 |
|
627 // Or. |
|
628 void |
|
629 MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c) |
|
630 { |
|
631 ma_orr(dest, src, dest, sc, c); |
|
632 } |
|
633 void |
|
634 MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest, |
|
635 SetCond_ sc, Assembler::Condition c) |
|
636 { |
|
637 as_orr(dest, src1, O2Reg(src2), sc, c); |
|
638 } |
|
639 void |
|
640 MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
|
641 { |
|
642 ma_alu(dest, imm, dest, op_orr, sc, c); |
|
643 } |
|
644 void |
|
645 MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, |
|
646 SetCond_ sc, Assembler::Condition c) |
|
647 { |
|
648 ma_alu(src1, imm, dest, op_orr, sc, c); |
|
649 } |
|
650 |
|
651 // Arithmetic-based ops. |
|
652 // Add with carry. |
|
653 void |
|
654 MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
|
655 { |
|
656 ma_alu(dest, imm, dest, op_adc, sc, c); |
|
657 } |
|
658 void |
|
659 MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c) |
|
660 { |
|
661 as_alu(dest, dest, O2Reg(src), op_adc, sc, c); |
|
662 } |
|
663 void |
|
664 MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
|
665 { |
|
666 as_alu(dest, src1, O2Reg(src2), op_adc, sc, c); |
|
667 } |
|
668 |
|
669 // Add. |
|
670 void |
|
671 MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
|
672 { |
|
673 ma_alu(dest, imm, dest, op_add, sc, c); |
|
674 } |
|
675 |
|
676 void |
|
677 MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c) |
|
678 { |
|
679 ma_alu(dest, O2Reg(src1), dest, op_add, sc, c); |
|
680 } |
|
681 void |
|
682 MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
|
683 { |
|
684 as_alu(dest, src1, O2Reg(src2), op_add, sc, c); |
|
685 } |
|
686 void |
|
687 MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) |
|
688 { |
|
689 ma_alu(src1, op, dest, op_add, sc, c); |
|
690 } |
|
691 void |
|
692 MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) |
|
693 { |
|
694 ma_alu(src1, op, dest, op_add, sc, c); |
|
695 } |
|
696 |
|
697 // Subtract with carry. |
|
698 void |
|
699 MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
|
700 { |
|
701 ma_alu(dest, imm, dest, op_sbc, sc, c); |
|
702 } |
|
703 void |
|
704 MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c) |
|
705 { |
|
706 as_alu(dest, dest, O2Reg(src1), op_sbc, sc, c); |
|
707 } |
|
708 void |
|
709 MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
|
710 { |
|
711 as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c); |
|
712 } |
|
713 |
|
714 // Subtract. |
|
715 void |
|
716 MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
|
717 { |
|
718 ma_alu(dest, imm, dest, op_sub, sc, c); |
|
719 } |
|
720 void |
|
721 MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c) |
|
722 { |
|
723 ma_alu(dest, Operand(src1), dest, op_sub, sc, c); |
|
724 } |
|
725 void |
|
726 MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
|
727 { |
|
728 ma_alu(src1, Operand(src2), dest, op_sub, sc, c); |
|
729 } |
|
730 void |
|
731 MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) |
|
732 { |
|
733 ma_alu(src1, op, dest, op_sub, sc, c); |
|
734 } |
|
735 void |
|
736 MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) |
|
737 { |
|
738 ma_alu(src1, op, dest, op_sub, sc, c); |
|
739 } |
|
740 |
|
741 // Severse subtract. |
|
742 void |
|
743 MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
|
744 { |
|
745 ma_alu(dest, imm, dest, op_rsb, sc, c); |
|
746 } |
|
747 void |
|
748 MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c) |
|
749 { |
|
750 as_alu(dest, dest, O2Reg(src1), op_add, sc, c); |
|
751 } |
|
752 void |
|
753 MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
|
754 { |
|
755 as_alu(dest, src1, O2Reg(src2), op_rsb, sc, c); |
|
756 } |
|
757 void |
|
758 MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c) |
|
759 { |
|
760 ma_alu(src1, op2, dest, op_rsb, sc, c); |
|
761 } |
|
762 |
|
763 // Reverse subtract with carry. |
|
764 void |
|
765 MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
|
766 { |
|
767 ma_alu(dest, imm, dest, op_rsc, sc, c); |
|
768 } |
|
769 void |
|
770 MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c) |
|
771 { |
|
772 as_alu(dest, dest, O2Reg(src1), op_rsc, sc, c); |
|
773 } |
|
774 void |
|
775 MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
|
776 { |
|
777 as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c); |
|
778 } |
|
779 |
|
780 // Compares/tests. |
|
781 // Compare negative (sets condition codes as src1 + src2 would). |
|
782 void |
|
783 MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c) |
|
784 { |
|
785 ma_alu(src1, imm, InvalidReg, op_cmn, SetCond, c); |
|
786 } |
|
787 void |
|
788 MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) |
|
789 { |
|
790 as_alu(InvalidReg, src2, O2Reg(src1), op_cmn, SetCond, c); |
|
791 } |
|
792 void |
|
793 MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) |
|
794 { |
|
795 MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
|
796 } |
|
797 |
|
798 // Compare (src - src2). |
|
799 void |
|
800 MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c) |
|
801 { |
|
802 ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c); |
|
803 } |
|
804 |
|
805 void |
|
806 MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c) |
|
807 { |
|
808 ma_cmp(src1, Imm32(ptr.value), c); |
|
809 } |
|
810 |
|
811 void |
|
812 MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, Condition c) |
|
813 { |
|
814 ma_mov(ptr, ScratchRegister); |
|
815 ma_cmp(src1, ScratchRegister, c); |
|
816 } |
|
817 void |
|
818 MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c) |
|
819 { |
|
820 switch (op.getTag()) { |
|
821 case Operand::OP2: |
|
822 as_cmp(src1, op.toOp2(), c); |
|
823 break; |
|
824 case Operand::MEM: |
|
825 ma_ldr(op, ScratchRegister); |
|
826 as_cmp(src1, O2Reg(ScratchRegister), c); |
|
827 break; |
|
828 default: |
|
829 MOZ_ASSUME_UNREACHABLE("trying to compare FP and integer registers"); |
|
830 } |
|
831 } |
|
832 void |
|
833 MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) |
|
834 { |
|
835 as_cmp(src1, O2Reg(src2), c); |
|
836 } |
|
837 |
|
838 // Test for equality, (src1^src2). |
|
839 void |
|
840 MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c) |
|
841 { |
|
842 ma_alu(src1, imm, InvalidReg, op_teq, SetCond, c); |
|
843 } |
|
844 void |
|
845 MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) |
|
846 { |
|
847 as_tst(src1, O2Reg(src2), c); |
|
848 } |
|
849 void |
|
850 MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) |
|
851 { |
|
852 as_teq(src1, op.toOp2(), c); |
|
853 } |
|
854 |
|
855 |
|
856 // Test (src1 & src2). |
|
857 void |
|
858 MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c) |
|
859 { |
|
860 ma_alu(src1, imm, InvalidReg, op_tst, SetCond, c); |
|
861 } |
|
862 void |
|
863 MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) |
|
864 { |
|
865 as_tst(src1, O2Reg(src2), c); |
|
866 } |
|
867 void |
|
868 MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c) |
|
869 { |
|
870 as_tst(src1, op.toOp2(), c); |
|
871 } |
|
872 |
|
873 void |
|
874 MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest) |
|
875 { |
|
876 as_mul(dest, src1, src2); |
|
877 } |
|
878 void |
|
879 MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest) |
|
880 { |
|
881 |
|
882 ma_mov(imm, ScratchRegister); |
|
883 as_mul( dest, src1, ScratchRegister); |
|
884 } |
|
885 |
|
886 Assembler::Condition |
|
887 MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond) |
|
888 { |
|
889 // TODO: this operation is illegal on armv6 and earlier if src2 == ScratchRegister |
|
890 // or src2 == dest. |
|
891 if (cond == Equal || cond == NotEqual) { |
|
892 as_smull(ScratchRegister, dest, src1, src2, SetCond); |
|
893 return cond; |
|
894 } |
|
895 |
|
896 if (cond == Overflow) { |
|
897 as_smull(ScratchRegister, dest, src1, src2); |
|
898 as_cmp(ScratchRegister, asr(dest, 31)); |
|
899 return NotEqual; |
|
900 } |
|
901 |
|
902 MOZ_ASSUME_UNREACHABLE("Condition NYI"); |
|
903 } |
|
904 |
|
905 Assembler::Condition |
|
906 MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond) |
|
907 { |
|
908 ma_mov(imm, ScratchRegister); |
|
909 if (cond == Equal || cond == NotEqual) { |
|
910 as_smull(ScratchRegister, dest, ScratchRegister, src1, SetCond); |
|
911 return cond; |
|
912 } |
|
913 |
|
914 if (cond == Overflow) { |
|
915 as_smull(ScratchRegister, dest, ScratchRegister, src1); |
|
916 as_cmp(ScratchRegister, asr(dest, 31)); |
|
917 return NotEqual; |
|
918 } |
|
919 |
|
920 MOZ_ASSUME_UNREACHABLE("Condition NYI"); |
|
921 } |
|
922 |
|
923 void |
|
924 MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp, |
|
925 int32_t shift) |
|
926 { |
|
927 // MATH: |
|
928 // We wish to compute x % (1<<y) - 1 for a known constant, y. |
|
929 // first, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as |
|
930 // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n |
|
931 // now, since both addition and multiplication commute with modulus, |
|
932 // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C == |
|
933 // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)... |
|
934 // now, since b == C + 1, b % C == 1, and b^n % C == 1 |
|
935 // this means that the whole thing simplifies to: |
|
936 // c_0 + c_1 + c_2 ... c_n % C |
|
937 // each c_n can easily be computed by a shift/bitextract, and the modulus can be maintained |
|
938 // by simply subtracting by C whenever the number gets over C. |
|
939 int32_t mask = (1 << shift) - 1; |
|
940 Label head; |
|
941 |
|
942 // hold holds -1 if the value was negative, 1 otherwise. |
|
943 // ScratchRegister holds the remaining bits that have not been processed |
|
944 // lr serves as a temporary location to store extracted bits into as well |
|
945 // as holding the trial subtraction as a temp value |
|
946 // dest is the accumulator (and holds the final result) |
|
947 |
|
948 // move the whole value into tmp, setting the codition codes so we can |
|
949 // muck with them later. |
|
950 // |
|
951 // Note that we cannot use ScratchRegister in place of tmp here, as ma_and |
|
952 // below on certain architectures move the mask into ScratchRegister |
|
953 // before performing the bitwise and. |
|
954 as_mov(tmp, O2Reg(src), SetCond); |
|
955 // Zero out the dest. |
|
956 ma_mov(Imm32(0), dest); |
|
957 // Set the hold appropriately. |
|
958 ma_mov(Imm32(1), hold); |
|
959 ma_mov(Imm32(-1), hold, NoSetCond, Signed); |
|
960 ma_rsb(Imm32(0), tmp, SetCond, Signed); |
|
961 // Begin the main loop. |
|
962 bind(&head); |
|
963 |
|
964 // Extract the bottom bits into lr. |
|
965 ma_and(Imm32(mask), tmp, secondScratchReg_); |
|
966 // Add those bits to the accumulator. |
|
967 ma_add(secondScratchReg_, dest, dest); |
|
968 // Do a trial subtraction, this is the same operation as cmp, but we store the dest |
|
969 ma_sub(dest, Imm32(mask), secondScratchReg_, SetCond); |
|
970 // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus. |
|
971 ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned); |
|
972 // Get rid of the bits that we extracted before, and set the condition codes |
|
973 as_mov(tmp, lsr(tmp, shift), SetCond); |
|
974 // If the shift produced zero, finish, otherwise, continue in the loop. |
|
975 ma_b(&head, NonZero); |
|
976 // Check the hold to see if we need to negate the result. Hold can only be 1 or -1, |
|
977 // so this will never set the 0 flag. |
|
978 ma_cmp(hold, Imm32(0)); |
|
979 // If the hold was non-zero, negate the result to be in line with what JS wants |
|
980 // this will set the condition codes if we try to negate |
|
981 ma_rsb(Imm32(0), dest, SetCond, Signed); |
|
982 // Since the Zero flag is not set by the compare, we can *only* set the Zero flag |
|
983 // in the rsb, so Zero is set iff we negated zero (e.g. the result of the computation was -0.0). |
|
984 |
|
985 } |
|
986 |
|
987 void |
|
988 MacroAssemblerARM::ma_smod(Register num, Register div, Register dest) |
|
989 { |
|
990 as_sdiv(ScratchRegister, num, div); |
|
991 as_mls(dest, num, ScratchRegister, div); |
|
992 } |
|
993 |
|
994 void |
|
995 MacroAssemblerARM::ma_umod(Register num, Register div, Register dest) |
|
996 { |
|
997 as_udiv(ScratchRegister, num, div); |
|
998 as_mls(dest, num, ScratchRegister, div); |
|
999 } |
|
1000 |
|
1001 // division |
|
1002 void |
|
1003 MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond) |
|
1004 { |
|
1005 as_sdiv(dest, num, div, cond); |
|
1006 } |
|
1007 |
|
1008 void |
|
1009 MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond) |
|
1010 { |
|
1011 as_udiv(dest, num, div, cond); |
|
1012 } |
|
1013 |
|
1014 // Memory. |
|
1015 // Shortcut for when we know we're transferring 32 bits of data. |
|
1016 void |
|
1017 MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, |
|
1018 Index mode, Assembler::Condition cc) |
|
1019 { |
|
1020 ma_dataTransferN(ls, 32, true, rn, offset, rt, mode, cc); |
|
1021 } |
|
1022 |
|
1023 void |
|
1024 MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Register rm, Register rt, |
|
1025 Index mode, Assembler::Condition cc) |
|
1026 { |
|
1027 MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
|
1028 } |
|
1029 |
|
1030 void |
|
1031 MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc) |
|
1032 { |
|
1033 as_dtr(IsStore, 32, mode, rt, addr, cc); |
|
1034 } |
|
1035 |
|
1036 void |
|
1037 MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Operand &addr, Index mode, Condition cc) |
|
1038 { |
|
1039 ma_dataTransferN(ls, 32, true, |
|
1040 Register::FromCode(addr.base()), Imm32(addr.disp()), |
|
1041 rt, mode, cc); |
|
1042 } |
|
1043 |
|
1044 void |
|
1045 MacroAssemblerARM::ma_str(Register rt, const Operand &addr, Index mode, Condition cc) |
|
1046 { |
|
1047 ma_dtr(IsStore, rt, addr, mode, cc); |
|
1048 } |
|
1049 void |
|
1050 MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode, Condition cc) |
|
1051 { |
|
1052 JS_ASSERT((rt.code() & 1) == 0); |
|
1053 JS_ASSERT(rt2.value.code() == rt.code() + 1); |
|
1054 as_extdtr(IsStore, 64, true, mode, rt, addr, cc); |
|
1055 } |
|
1056 |
|
1057 void |
|
1058 MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc) |
|
1059 { |
|
1060 as_dtr(IsLoad, 32, mode, rt, addr, cc); |
|
1061 } |
|
1062 void |
|
1063 MacroAssemblerARM::ma_ldr(const Operand &addr, Register rt, Index mode, Condition cc) |
|
1064 { |
|
1065 ma_dtr(IsLoad, rt, addr, mode, cc); |
|
1066 } |
|
1067 |
|
1068 void |
|
1069 MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc) |
|
1070 { |
|
1071 as_dtr(IsLoad, 8, mode, rt, addr, cc); |
|
1072 } |
|
1073 |
|
1074 void |
|
1075 MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode, Condition cc) |
|
1076 { |
|
1077 as_extdtr(IsLoad, 16, true, mode, rt, addr, cc); |
|
1078 } |
|
1079 |
|
1080 void |
|
1081 MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode, Condition cc) |
|
1082 { |
|
1083 as_extdtr(IsLoad, 16, false, mode, rt, addr, cc); |
|
1084 } |
|
1085 void |
|
1086 MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc) |
|
1087 { |
|
1088 as_extdtr(IsLoad, 8, true, mode, rt, addr, cc); |
|
1089 } |
|
1090 void |
|
1091 MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, |
|
1092 Index mode, Condition cc) |
|
1093 { |
|
1094 JS_ASSERT((rt.code() & 1) == 0); |
|
1095 JS_ASSERT(rt2.value.code() == rt.code() + 1); |
|
1096 as_extdtr(IsLoad, 64, true, mode, rt, addr, cc); |
|
1097 } |
|
1098 void |
|
1099 MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode, Condition cc) |
|
1100 { |
|
1101 as_extdtr(IsStore, 16, false, mode, rt, addr, cc); |
|
1102 } |
|
1103 |
|
1104 void |
|
1105 MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc) |
|
1106 { |
|
1107 as_dtr(IsStore, 8, mode, rt, addr, cc); |
|
1108 } |
|
1109 |
|
1110 // Specialty for moving N bits of data, where n == 8,16,32,64. |
|
1111 BufferOffset |
|
1112 MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, |
|
1113 Register rn, Register rm, Register rt, |
|
1114 Index mode, Assembler::Condition cc, unsigned shiftAmount) |
|
1115 { |
|
1116 if (size == 32 || (size == 8 && !IsSigned)) { |
|
1117 return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc); |
|
1118 } else { |
|
1119 if (shiftAmount != 0) { |
|
1120 JS_ASSERT(rn != ScratchRegister); |
|
1121 JS_ASSERT(rt != ScratchRegister); |
|
1122 ma_lsl(Imm32(shiftAmount), rm, ScratchRegister); |
|
1123 rm = ScratchRegister; |
|
1124 } |
|
1125 return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc); |
|
1126 } |
|
1127 } |
|
1128 |
|
1129 BufferOffset |
|
1130 MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, |
|
1131 Register rn, Imm32 offset, Register rt, |
|
1132 Index mode, Assembler::Condition cc) |
|
1133 { |
|
1134 int off = offset.value; |
|
1135 // we can encode this as a standard ldr... MAKE IT SO |
|
1136 if (size == 32 || (size == 8 && !IsSigned) ) { |
|
1137 if (off < 4096 && off > -4096) { |
|
1138 // This encodes as a single instruction, Emulating mode's behavior |
|
1139 // in a multi-instruction sequence is not necessary. |
|
1140 return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc); |
|
1141 } |
|
1142 |
|
1143 // We cannot encode this offset in a a single ldr. For mode == index, |
|
1144 // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|. |
|
1145 // This does not wark for mode == PreIndex or mode == PostIndex. |
|
1146 // PreIndex is simple, just do the add into the base register first, then do |
|
1147 // a PreIndex'ed load. PostIndexed loads can be tricky. Normally, doing the load with |
|
1148 // an index of 0, then doing an add would work, but if the destination is the PC, |
|
1149 // you don't get to execute the instruction after the branch, which will lead to |
|
1150 // the base register not being updated correctly. Explicitly handle this case, without |
|
1151 // doing anything fancy, then handle all of the other cases. |
|
1152 |
|
1153 // mode == Offset |
|
1154 // add scratch, base, offset_hi |
|
1155 // ldr dest, [scratch, +offset_lo] |
|
1156 // |
|
1157 // mode == PreIndex |
|
1158 // add base, base, offset_hi |
|
1159 // ldr dest, [base, +offset_lo]! |
|
1160 // |
|
1161 // mode == PostIndex, dest == pc |
|
1162 // ldr scratch, [base] |
|
1163 // add base, base, offset_hi |
|
1164 // add base, base, offset_lo |
|
1165 // mov dest, scratch |
|
1166 // PostIndex with the pc as the destination needs to be handled |
|
1167 // specially, since in the code below, the write into 'dest' |
|
1168 // is going to alter the control flow, so the following instruction would |
|
1169 // never get emitted. |
|
1170 // |
|
1171 // mode == PostIndex, dest != pc |
|
1172 // ldr dest, [base], offset_lo |
|
1173 // add base, base, offset_hi |
|
1174 |
|
1175 if (rt == pc && mode == PostIndex && ls == IsLoad) { |
|
1176 ma_mov(rn, ScratchRegister); |
|
1177 ma_alu(rn, offset, rn, op_add); |
|
1178 return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc); |
|
1179 } |
|
1180 |
|
1181 int bottom = off & 0xfff; |
|
1182 int neg_bottom = 0x1000 - bottom; |
|
1183 // For a regular offset, base == ScratchRegister does what we want. Modify the |
|
1184 // scratch register, leaving the actual base unscathed. |
|
1185 Register base = ScratchRegister; |
|
1186 // For the preindex case, we want to just re-use rn as the base register, so when |
|
1187 // the base register is updated *before* the load, rn is updated. |
|
1188 if (mode == PreIndex) |
|
1189 base = rn; |
|
1190 JS_ASSERT(mode != PostIndex); |
|
1191 // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. |
|
1192 // |
|
1193 // Note a neg_bottom of 0x1000 can not be encoded as an immediate negative offset in the |
|
1194 // instruction and this occurs when bottom is zero, so this case is guarded against below. |
|
1195 if (off < 0) { |
|
1196 Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off |
|
1197 if (!sub_off.invalid) { |
|
1198 as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom |
|
1199 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); |
|
1200 } |
|
1201 sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off |
|
1202 if (!sub_off.invalid && bottom != 0) { |
|
1203 JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 |
|
1204 as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off |
|
1205 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); |
|
1206 } |
|
1207 } else { |
|
1208 Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom |
|
1209 if (!sub_off.invalid) { |
|
1210 as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom |
|
1211 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); |
|
1212 } |
|
1213 sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off |
|
1214 if (!sub_off.invalid && bottom != 0) { |
|
1215 JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 |
|
1216 as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off |
|
1217 return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); |
|
1218 } |
|
1219 } |
|
1220 ma_mov(offset, ScratchRegister); |
|
1221 return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0))); |
|
1222 } else { |
|
1223 // should attempt to use the extended load/store instructions |
|
1224 if (off < 256 && off > -256) |
|
1225 return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc); |
|
1226 |
|
1227 // We cannot encode this offset in a single extldr. Try to encode it as |
|
1228 // an add scratch, base, imm; extldr dest, [scratch, +offset]. |
|
1229 int bottom = off & 0xff; |
|
1230 int neg_bottom = 0x100 - bottom; |
|
1231 // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. |
|
1232 // |
|
1233 // Note a neg_bottom of 0x100 can not be encoded as an immediate negative offset in the |
|
1234 // instruction and this occurs when bottom is zero, so this case is guarded against below. |
|
1235 if (off < 0) { |
|
1236 Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off |
|
1237 if (!sub_off.invalid) { |
|
1238 as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom |
|
1239 return as_extdtr(ls, size, IsSigned, Offset, rt, |
|
1240 EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), |
|
1241 cc); |
|
1242 } |
|
1243 sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off |
|
1244 if (!sub_off.invalid && bottom != 0) { |
|
1245 JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 |
|
1246 as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off |
|
1247 return as_extdtr(ls, size, IsSigned, Offset, rt, |
|
1248 EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), |
|
1249 cc); |
|
1250 } |
|
1251 } else { |
|
1252 Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom |
|
1253 if (!sub_off.invalid) { |
|
1254 as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom |
|
1255 return as_extdtr(ls, size, IsSigned, Offset, rt, |
|
1256 EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), |
|
1257 cc); |
|
1258 } |
|
1259 sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off |
|
1260 if (!sub_off.invalid && bottom != 0) { |
|
1261 JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 |
|
1262 as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off |
|
1263 return as_extdtr(ls, size, IsSigned, Offset, rt, |
|
1264 EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), |
|
1265 cc); |
|
1266 } |
|
1267 } |
|
1268 ma_mov(offset, ScratchRegister); |
|
1269 return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc); |
|
1270 } |
|
1271 } |
|
1272 |
|
1273 void |
|
1274 MacroAssemblerARM::ma_pop(Register r) |
|
1275 { |
|
1276 ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex); |
|
1277 if (r == pc) |
|
1278 m_buffer.markGuard(); |
|
1279 } |
|
1280 void |
|
1281 MacroAssemblerARM::ma_push(Register r) |
|
1282 { |
|
1283 // Pushing sp is not well defined: use two instructions. |
|
1284 if (r == sp) { |
|
1285 ma_mov(sp, ScratchRegister); |
|
1286 r = ScratchRegister; |
|
1287 } |
|
1288 ma_dtr(IsStore, sp,Imm32(-4), r, PreIndex); |
|
1289 } |
|
1290 |
|
1291 void |
|
1292 MacroAssemblerARM::ma_vpop(VFPRegister r) |
|
1293 { |
|
1294 startFloatTransferM(IsLoad, sp, IA, WriteBack); |
|
1295 transferFloatReg(r); |
|
1296 finishFloatTransfer(); |
|
1297 } |
|
1298 void |
|
1299 MacroAssemblerARM::ma_vpush(VFPRegister r) |
|
1300 { |
|
1301 startFloatTransferM(IsStore, sp, DB, WriteBack); |
|
1302 transferFloatReg(r); |
|
1303 finishFloatTransfer(); |
|
1304 } |
|
1305 |
|
1306 // Branches when done from within arm-specific code. |
|
1307 BufferOffset |
|
1308 MacroAssemblerARM::ma_b(Label *dest, Assembler::Condition c, bool isPatchable) |
|
1309 { |
|
1310 return as_b(dest, c, isPatchable); |
|
1311 } |
|
1312 |
|
1313 void |
|
1314 MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c) |
|
1315 { |
|
1316 as_bx(dest, c); |
|
1317 } |
|
1318 |
|
1319 static Assembler::RelocBranchStyle |
|
1320 b_type() |
|
1321 { |
|
1322 return Assembler::B_LDR; |
|
1323 } |
|
1324 void |
|
1325 MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Condition c) |
|
1326 { |
|
1327 // we know the absolute address of the target, but not our final |
|
1328 // location (with relocating GC, we *can't* know our final location) |
|
1329 // for now, I'm going to be conservative, and load this with an |
|
1330 // absolute address |
|
1331 uint32_t trg = (uint32_t)target; |
|
1332 switch (b_type()) { |
|
1333 case Assembler::B_MOVWT: |
|
1334 as_movw(ScratchRegister, Imm16(trg & 0xffff), c); |
|
1335 as_movt(ScratchRegister, Imm16(trg >> 16), c); |
|
1336 // this is going to get the branch predictor pissed off. |
|
1337 as_bx(ScratchRegister, c); |
|
1338 break; |
|
1339 case Assembler::B_LDR_BX: |
|
1340 as_Imm32Pool(ScratchRegister, trg, c); |
|
1341 as_bx(ScratchRegister, c); |
|
1342 break; |
|
1343 case Assembler::B_LDR: |
|
1344 as_Imm32Pool(pc, trg, c); |
|
1345 if (c == Always) |
|
1346 m_buffer.markGuard(); |
|
1347 break; |
|
1348 default: |
|
1349 MOZ_ASSUME_UNREACHABLE("Other methods of generating tracable jumps NYI"); |
|
1350 } |
|
1351 } |
|
1352 |
|
1353 // This is almost NEVER necessary: we'll basically never be calling a label, |
|
1354 // except possibly in the crazy bailout-table case. |
|
1355 void |
|
1356 MacroAssemblerARM::ma_bl(Label *dest, Assembler::Condition c) |
|
1357 { |
|
1358 as_bl(dest, c); |
|
1359 } |
|
1360 |
|
1361 void |
|
1362 MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c) |
|
1363 { |
|
1364 as_blx(reg, c); |
|
1365 } |
|
1366 |
|
1367 // VFP/ALU |
|
1368 void |
|
1369 MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1370 { |
|
1371 as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
|
1372 } |
|
1373 |
|
1374 void |
|
1375 MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1376 { |
|
1377 as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
|
1378 VFPRegister(src2).singleOverlay()); |
|
1379 } |
|
1380 |
|
1381 void |
|
1382 MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1383 { |
|
1384 as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
|
1385 } |
|
1386 |
|
1387 void |
|
1388 MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1389 { |
|
1390 as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
|
1391 VFPRegister(src2).singleOverlay()); |
|
1392 } |
|
1393 |
|
1394 void |
|
1395 MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1396 { |
|
1397 as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
|
1398 } |
|
1399 |
|
1400 void |
|
1401 MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1402 { |
|
1403 as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
|
1404 VFPRegister(src2).singleOverlay()); |
|
1405 } |
|
1406 |
|
1407 void |
|
1408 MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1409 { |
|
1410 as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
|
1411 } |
|
1412 |
|
1413 void |
|
1414 MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
|
1415 { |
|
1416 as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
|
1417 VFPRegister(src2).singleOverlay()); |
|
1418 } |
|
1419 |
|
1420 void |
|
1421 MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest, Condition cc) |
|
1422 { |
|
1423 as_vmov(dest, src, cc); |
|
1424 } |
|
1425 |
|
1426 void |
|
1427 MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1428 { |
|
1429 as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
|
1430 } |
|
1431 |
|
1432 void |
|
1433 MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest, Condition cc) |
|
1434 { |
|
1435 as_vneg(dest, src, cc); |
|
1436 } |
|
1437 |
|
1438 void |
|
1439 MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1440 { |
|
1441 as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
|
1442 } |
|
1443 |
|
1444 void |
|
1445 MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest, Condition cc) |
|
1446 { |
|
1447 as_vabs(dest, src, cc); |
|
1448 } |
|
1449 |
|
1450 void |
|
1451 MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1452 { |
|
1453 as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
|
1454 } |
|
1455 |
|
1456 void |
|
1457 MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc) |
|
1458 { |
|
1459 as_vsqrt(dest, src, cc); |
|
1460 } |
|
1461 |
|
1462 void |
|
1463 MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1464 { |
|
1465 as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
|
1466 } |
|
1467 |
|
1468 static inline uint32_t |
|
1469 DoubleHighWord(const double value) |
|
1470 { |
|
1471 return static_cast<uint32_t>(BitwiseCast<uint64_t>(value) >> 32); |
|
1472 } |
|
1473 |
|
1474 static inline uint32_t |
|
1475 DoubleLowWord(const double value) |
|
1476 { |
|
1477 return BitwiseCast<uint64_t>(value) & uint32_t(0xffffffff); |
|
1478 } |
|
1479 |
|
1480 void |
|
1481 MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc) |
|
1482 { |
|
1483 if (hasVFPv3()) { |
|
1484 if (DoubleLowWord(value) == 0) { |
|
1485 if (DoubleHighWord(value) == 0) { |
|
1486 // To zero a register, load 1.0, then execute dN <- dN - dN |
|
1487 as_vimm(dest, VFPImm::one, cc); |
|
1488 as_vsub(dest, dest, dest, cc); |
|
1489 return; |
|
1490 } |
|
1491 |
|
1492 VFPImm enc(DoubleHighWord(value)); |
|
1493 if (enc.isValid()) { |
|
1494 as_vimm(dest, enc, cc); |
|
1495 return; |
|
1496 } |
|
1497 |
|
1498 } |
|
1499 } |
|
1500 // Fall back to putting the value in a pool. |
|
1501 as_FImm64Pool(dest, value, cc); |
|
1502 } |
|
1503 |
|
1504 static inline uint32_t |
|
1505 Float32Word(const float value) |
|
1506 { |
|
1507 return BitwiseCast<uint32_t>(value); |
|
1508 } |
|
1509 |
|
1510 void |
|
1511 MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc) |
|
1512 { |
|
1513 VFPRegister vd = VFPRegister(dest).singleOverlay(); |
|
1514 if (hasVFPv3()) { |
|
1515 if (Float32Word(value) == 0) { |
|
1516 // To zero a register, load 1.0, then execute sN <- sN - sN |
|
1517 as_vimm(vd, VFPImm::one, cc); |
|
1518 as_vsub(vd, vd, vd, cc); |
|
1519 return; |
|
1520 } |
|
1521 |
|
1522 // Note that the vimm immediate float32 instruction encoding differs from the |
|
1523 // vimm immediate double encoding, but this difference matches the difference |
|
1524 // in the floating point formats, so it is possible to convert the float32 to |
|
1525 // a double and then use the double encoding paths. It is still necessary to |
|
1526 // firstly check that the double low word is zero because some float32 |
|
1527 // numbers set these bits and this can not be ignored. |
|
1528 double doubleValue = value; |
|
1529 if (DoubleLowWord(value) == 0) { |
|
1530 VFPImm enc(DoubleHighWord(doubleValue)); |
|
1531 if (enc.isValid()) { |
|
1532 as_vimm(vd, enc, cc); |
|
1533 return; |
|
1534 } |
|
1535 } |
|
1536 } |
|
1537 // Fall back to putting the value in a pool. |
|
1538 as_FImm32Pool(vd, value, cc); |
|
1539 } |
|
1540 |
|
1541 void |
|
1542 MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc) |
|
1543 { |
|
1544 as_vcmp(VFPRegister(src1), VFPRegister(src2), cc); |
|
1545 } |
|
1546 void |
|
1547 MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc) |
|
1548 { |
|
1549 as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(), cc); |
|
1550 } |
|
1551 void |
|
1552 MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc) |
|
1553 { |
|
1554 as_vcmpz(VFPRegister(src1), cc); |
|
1555 } |
|
1556 void |
|
1557 MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc) |
|
1558 { |
|
1559 as_vcmpz(VFPRegister(src1).singleOverlay(), cc); |
|
1560 } |
|
1561 |
|
1562 void |
|
1563 MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1564 { |
|
1565 as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src), false, cc); |
|
1566 } |
|
1567 void |
|
1568 MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1569 { |
|
1570 as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src), false, cc); |
|
1571 } |
|
1572 void |
|
1573 MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister dest, FloatRegister src, Condition cc) |
|
1574 { |
|
1575 as_vcvt(VFPRegister(dest), VFPRegister(src).sintOverlay(), false, cc); |
|
1576 } |
|
1577 void |
|
1578 MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister dest, FloatRegister src, Condition cc) |
|
1579 { |
|
1580 as_vcvt(VFPRegister(dest), VFPRegister(src).uintOverlay(), false, cc); |
|
1581 } |
|
1582 |
|
1583 void |
|
1584 MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1585 { |
|
1586 as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(), false, cc); |
|
1587 } |
|
1588 void |
|
1589 MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc) |
|
1590 { |
|
1591 as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(), false, cc); |
|
1592 } |
|
1593 void |
|
1594 MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister dest, FloatRegister src, Condition cc) |
|
1595 { |
|
1596 as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(), false, cc); |
|
1597 } |
|
1598 void |
|
1599 MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister dest, FloatRegister src, Condition cc) |
|
1600 { |
|
1601 as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(), false, cc); |
|
1602 } |
|
1603 |
|
1604 void |
|
1605 MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest, Condition cc) |
|
1606 { |
|
1607 as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc); |
|
1608 } |
|
1609 |
|
1610 void |
|
1611 MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc) |
|
1612 { |
|
1613 as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc); |
|
1614 } |
|
1615 |
|
1616 void |
|
1617 MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc) |
|
1618 { |
|
1619 as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc); |
|
1620 } |
|
1621 |
|
1622 void |
|
1623 MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest, Condition cc) |
|
1624 { |
|
1625 as_vxfer(dest, InvalidReg, src, FloatToCore, cc); |
|
1626 } |
|
1627 |
|
1628 void |
|
1629 MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc) |
|
1630 { |
|
1631 as_vxfer(dest1, dest2, src, FloatToCore, cc); |
|
1632 } |
|
1633 |
|
1634 BufferOffset |
|
1635 MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Condition cc) |
|
1636 { |
|
1637 int off = addr.disp(); |
|
1638 JS_ASSERT((off & 3) == 0); |
|
1639 Register base = Register::FromCode(addr.base()); |
|
1640 if (off > -1024 && off < 1024) |
|
1641 return as_vdtr(ls, rt, addr.toVFPAddr(), cc); |
|
1642 |
|
1643 // We cannot encode this offset in a a single ldr. Try to encode it as |
|
1644 // an add scratch, base, imm; ldr dest, [scratch, +offset]. |
|
1645 int bottom = off & (0xff << 2); |
|
1646 int neg_bottom = (0x100 << 2) - bottom; |
|
1647 // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. |
|
1648 // |
|
1649 // Note a neg_bottom of 0x400 can not be encoded as an immediate negative offset in the |
|
1650 // instruction and this occurs when bottom is zero, so this case is guarded against below. |
|
1651 if (off < 0) { |
|
1652 Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off |
|
1653 if (!sub_off.invalid) { |
|
1654 as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom |
|
1655 return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); |
|
1656 } |
|
1657 sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off |
|
1658 if (!sub_off.invalid && bottom != 0) { |
|
1659 JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 |
|
1660 as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off |
|
1661 return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); |
|
1662 } |
|
1663 } else { |
|
1664 Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom |
|
1665 if (!sub_off.invalid) { |
|
1666 as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = off - bottom |
|
1667 return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); |
|
1668 } |
|
1669 sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off |
|
1670 if (!sub_off.invalid && bottom != 0) { |
|
1671 JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 |
|
1672 as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off |
|
1673 return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); |
|
1674 } |
|
1675 } |
|
1676 ma_add(base, Imm32(off), ScratchRegister, NoSetCond, cc); |
|
1677 return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc); |
|
1678 } |
|
1679 |
|
1680 BufferOffset |
|
1681 MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc) |
|
1682 { |
|
1683 return as_vdtr(IsLoad, dest, addr, cc); |
|
1684 } |
|
1685 BufferOffset |
|
1686 MacroAssemblerARM::ma_vldr(const Operand &addr, VFPRegister dest, Condition cc) |
|
1687 { |
|
1688 return ma_vdtr(IsLoad, addr, dest, cc); |
|
1689 } |
|
1690 BufferOffset |
|
1691 MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) |
|
1692 { |
|
1693 as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc); |
|
1694 return ma_vldr(Operand(ScratchRegister, 0), src, cc); |
|
1695 } |
|
1696 |
|
1697 BufferOffset |
|
1698 MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc) |
|
1699 { |
|
1700 return as_vdtr(IsStore, src, addr, cc); |
|
1701 } |
|
1702 |
|
1703 BufferOffset |
|
1704 MacroAssemblerARM::ma_vstr(VFPRegister src, const Operand &addr, Condition cc) |
|
1705 { |
|
1706 return ma_vdtr(IsStore, addr, src, cc); |
|
1707 } |
|
1708 BufferOffset |
|
1709 MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) |
|
1710 { |
|
1711 as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc); |
|
1712 return ma_vstr(src, Operand(ScratchRegister, 0), cc); |
|
1713 } |
|
1714 |
|
1715 bool |
|
1716 MacroAssemblerARMCompat::buildFakeExitFrame(const Register &scratch, uint32_t *offset) |
|
1717 { |
|
1718 DebugOnly<uint32_t> initialDepth = framePushed(); |
|
1719 uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); |
|
1720 |
|
1721 Push(Imm32(descriptor)); // descriptor_ |
|
1722 |
|
1723 enterNoPool(); |
|
1724 DebugOnly<uint32_t> offsetBeforePush = currentOffset(); |
|
1725 Push(pc); // actually pushes $pc + 8. |
|
1726 |
|
1727 // Consume an additional 4 bytes. The start of the next instruction will |
|
1728 // then be 8 bytes after the instruction for Push(pc); this offset can |
|
1729 // therefore be fed to the safepoint. |
|
1730 ma_nop(); |
|
1731 uint32_t pseudoReturnOffset = currentOffset(); |
|
1732 leaveNoPool(); |
|
1733 |
|
1734 JS_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size()); |
|
1735 JS_ASSERT(pseudoReturnOffset - offsetBeforePush == 8); |
|
1736 |
|
1737 *offset = pseudoReturnOffset; |
|
1738 return true; |
|
1739 } |
|
1740 |
|
1741 bool |
|
1742 MacroAssemblerARMCompat::buildOOLFakeExitFrame(void *fakeReturnAddr) |
|
1743 { |
|
1744 DebugOnly<uint32_t> initialDepth = framePushed(); |
|
1745 uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); |
|
1746 |
|
1747 Push(Imm32(descriptor)); // descriptor_ |
|
1748 Push(ImmPtr(fakeReturnAddr)); |
|
1749 |
|
1750 return true; |
|
1751 } |
|
1752 |
|
1753 void |
|
1754 MacroAssemblerARMCompat::callWithExitFrame(JitCode *target) |
|
1755 { |
|
1756 uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); |
|
1757 Push(Imm32(descriptor)); // descriptor |
|
1758 |
|
1759 addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); |
|
1760 RelocStyle rs; |
|
1761 if (hasMOVWT()) |
|
1762 rs = L_MOVWT; |
|
1763 else |
|
1764 rs = L_LDR; |
|
1765 |
|
1766 ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs); |
|
1767 ma_callIonHalfPush(ScratchRegister); |
|
1768 } |
|
1769 |
|
1770 void |
|
1771 MacroAssemblerARMCompat::callWithExitFrame(JitCode *target, Register dynStack) |
|
1772 { |
|
1773 ma_add(Imm32(framePushed()), dynStack); |
|
1774 makeFrameDescriptor(dynStack, JitFrame_IonJS); |
|
1775 Push(dynStack); // descriptor |
|
1776 |
|
1777 addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); |
|
1778 RelocStyle rs; |
|
1779 if (hasMOVWT()) |
|
1780 rs = L_MOVWT; |
|
1781 else |
|
1782 rs = L_LDR; |
|
1783 |
|
1784 ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs); |
|
1785 ma_callIonHalfPush(ScratchRegister); |
|
1786 } |
|
1787 |
|
1788 void |
|
1789 MacroAssemblerARMCompat::callIon(const Register &callee) |
|
1790 { |
|
1791 JS_ASSERT((framePushed() & 3) == 0); |
|
1792 if ((framePushed() & 7) == 4) { |
|
1793 ma_callIonHalfPush(callee); |
|
1794 } else { |
|
1795 adjustFrame(sizeof(void*)); |
|
1796 ma_callIon(callee); |
|
1797 } |
|
1798 } |
|
1799 |
|
1800 void |
|
1801 MacroAssemblerARMCompat::reserveStack(uint32_t amount) |
|
1802 { |
|
1803 if (amount) |
|
1804 ma_sub(Imm32(amount), sp); |
|
1805 adjustFrame(amount); |
|
1806 } |
|
1807 void |
|
1808 MacroAssemblerARMCompat::freeStack(uint32_t amount) |
|
1809 { |
|
1810 JS_ASSERT(amount <= framePushed_); |
|
1811 if (amount) |
|
1812 ma_add(Imm32(amount), sp); |
|
1813 adjustFrame(-amount); |
|
1814 } |
|
1815 void |
|
1816 MacroAssemblerARMCompat::freeStack(Register amount) |
|
1817 { |
|
1818 ma_add(amount, sp); |
|
1819 } |
|
1820 |
|
1821 void |
|
1822 MacroAssembler::PushRegsInMask(RegisterSet set) |
|
1823 { |
|
1824 int32_t diffF = set.fpus().size() * sizeof(double); |
|
1825 int32_t diffG = set.gprs().size() * sizeof(intptr_t); |
|
1826 |
|
1827 if (set.gprs().size() > 1) { |
|
1828 adjustFrame(diffG); |
|
1829 startDataTransferM(IsStore, StackPointer, DB, WriteBack); |
|
1830 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
|
1831 diffG -= sizeof(intptr_t); |
|
1832 transferReg(*iter); |
|
1833 } |
|
1834 finishDataTransfer(); |
|
1835 } else { |
|
1836 reserveStack(diffG); |
|
1837 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
|
1838 diffG -= sizeof(intptr_t); |
|
1839 storePtr(*iter, Address(StackPointer, diffG)); |
|
1840 } |
|
1841 } |
|
1842 JS_ASSERT(diffG == 0); |
|
1843 |
|
1844 adjustFrame(diffF); |
|
1845 diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB); |
|
1846 JS_ASSERT(diffF == 0); |
|
1847 } |
|
1848 |
|
1849 void |
|
1850 MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore) |
|
1851 { |
|
1852 int32_t diffG = set.gprs().size() * sizeof(intptr_t); |
|
1853 int32_t diffF = set.fpus().size() * sizeof(double); |
|
1854 const int32_t reservedG = diffG; |
|
1855 const int32_t reservedF = diffF; |
|
1856 |
|
1857 // ARM can load multiple registers at once, but only if we want back all |
|
1858 // the registers we previously saved to the stack. |
|
1859 if (ignore.empty(true)) { |
|
1860 diffF -= transferMultipleByRuns(set.fpus(), IsLoad, StackPointer, IA); |
|
1861 adjustFrame(-reservedF); |
|
1862 } else { |
|
1863 for (FloatRegisterBackwardIterator iter(set.fpus()); iter.more(); iter++) { |
|
1864 diffF -= sizeof(double); |
|
1865 if (!ignore.has(*iter)) |
|
1866 loadDouble(Address(StackPointer, diffF), *iter); |
|
1867 } |
|
1868 freeStack(reservedF); |
|
1869 } |
|
1870 JS_ASSERT(diffF == 0); |
|
1871 |
|
1872 if (set.gprs().size() > 1 && ignore.empty(false)) { |
|
1873 startDataTransferM(IsLoad, StackPointer, IA, WriteBack); |
|
1874 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
|
1875 diffG -= sizeof(intptr_t); |
|
1876 transferReg(*iter); |
|
1877 } |
|
1878 finishDataTransfer(); |
|
1879 adjustFrame(-reservedG); |
|
1880 } else { |
|
1881 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
|
1882 diffG -= sizeof(intptr_t); |
|
1883 if (!ignore.has(*iter)) |
|
1884 loadPtr(Address(StackPointer, diffG), *iter); |
|
1885 } |
|
1886 freeStack(reservedG); |
|
1887 } |
|
1888 JS_ASSERT(diffG == 0); |
|
1889 } |
|
1890 |
|
1891 void |
|
1892 MacroAssemblerARMCompat::add32(Register src, Register dest) |
|
1893 { |
|
1894 ma_add(src, dest, SetCond); |
|
1895 } |
|
1896 |
|
1897 void |
|
1898 MacroAssemblerARMCompat::add32(Imm32 imm, Register dest) |
|
1899 { |
|
1900 ma_add(imm, dest, SetCond); |
|
1901 } |
|
1902 |
|
1903 void |
|
1904 MacroAssemblerARMCompat::xor32(Imm32 imm, Register dest) |
|
1905 { |
|
1906 ma_eor(imm, dest, SetCond); |
|
1907 } |
|
1908 |
|
1909 void |
|
1910 MacroAssemblerARMCompat::add32(Imm32 imm, const Address &dest) |
|
1911 { |
|
1912 load32(dest, ScratchRegister); |
|
1913 ma_add(imm, ScratchRegister, SetCond); |
|
1914 store32(ScratchRegister, dest); |
|
1915 } |
|
1916 |
|
1917 void |
|
1918 MacroAssemblerARMCompat::sub32(Imm32 imm, Register dest) |
|
1919 { |
|
1920 ma_sub(imm, dest, SetCond); |
|
1921 } |
|
1922 |
|
1923 void |
|
1924 MacroAssemblerARMCompat::sub32(Register src, Register dest) |
|
1925 { |
|
1926 ma_sub(src, dest, SetCond); |
|
1927 } |
|
1928 |
|
1929 void |
|
1930 MacroAssemblerARMCompat::and32(Imm32 imm, Register dest) |
|
1931 { |
|
1932 ma_and(imm, dest, SetCond); |
|
1933 } |
|
1934 |
|
1935 void |
|
1936 MacroAssemblerARMCompat::addPtr(Register src, Register dest) |
|
1937 { |
|
1938 ma_add(src, dest); |
|
1939 } |
|
1940 |
|
1941 void |
|
1942 MacroAssemblerARMCompat::addPtr(const Address &src, Register dest) |
|
1943 { |
|
1944 load32(src, ScratchRegister); |
|
1945 ma_add(ScratchRegister, dest, SetCond); |
|
1946 } |
|
1947 |
|
1948 void |
|
1949 MacroAssemblerARMCompat::not32(Register reg) |
|
1950 { |
|
1951 ma_mvn(reg, reg); |
|
1952 } |
|
1953 |
|
1954 void |
|
1955 MacroAssemblerARMCompat::and32(Imm32 imm, const Address &dest) |
|
1956 { |
|
1957 load32(dest, ScratchRegister); |
|
1958 ma_and(imm, ScratchRegister); |
|
1959 store32(ScratchRegister, dest); |
|
1960 } |
|
1961 |
|
1962 void |
|
1963 MacroAssemblerARMCompat::or32(Imm32 imm, const Address &dest) |
|
1964 { |
|
1965 load32(dest, ScratchRegister); |
|
1966 ma_orr(imm, ScratchRegister); |
|
1967 store32(ScratchRegister, dest); |
|
1968 } |
|
1969 |
|
1970 void |
|
1971 MacroAssemblerARMCompat::xorPtr(Imm32 imm, Register dest) |
|
1972 { |
|
1973 ma_eor(imm, dest); |
|
1974 } |
|
1975 |
|
1976 void |
|
1977 MacroAssemblerARMCompat::xorPtr(Register src, Register dest) |
|
1978 { |
|
1979 ma_eor(src, dest); |
|
1980 } |
|
1981 |
|
1982 void |
|
1983 MacroAssemblerARMCompat::orPtr(Imm32 imm, Register dest) |
|
1984 { |
|
1985 ma_orr(imm, dest); |
|
1986 } |
|
1987 |
|
1988 void |
|
1989 MacroAssemblerARMCompat::orPtr(Register src, Register dest) |
|
1990 { |
|
1991 ma_orr(src, dest); |
|
1992 } |
|
1993 |
|
1994 void |
|
1995 MacroAssemblerARMCompat::andPtr(Imm32 imm, Register dest) |
|
1996 { |
|
1997 ma_and(imm, dest); |
|
1998 } |
|
1999 |
|
2000 void |
|
2001 MacroAssemblerARMCompat::andPtr(Register src, Register dest) |
|
2002 { |
|
2003 ma_and(src, dest); |
|
2004 } |
|
2005 |
|
2006 void |
|
2007 MacroAssemblerARMCompat::move32(const Imm32 &imm, const Register &dest) |
|
2008 { |
|
2009 ma_mov(imm, dest); |
|
2010 } |
|
2011 |
|
2012 void |
|
2013 MacroAssemblerARMCompat::move32(const Register &src, const Register &dest) { |
|
2014 ma_mov(src, dest); |
|
2015 } |
|
2016 |
|
2017 void |
|
2018 MacroAssemblerARMCompat::movePtr(const Register &src, const Register &dest) |
|
2019 { |
|
2020 ma_mov(src, dest); |
|
2021 } |
|
2022 void |
|
2023 MacroAssemblerARMCompat::movePtr(const ImmWord &imm, const Register &dest) |
|
2024 { |
|
2025 ma_mov(Imm32(imm.value), dest); |
|
2026 } |
|
2027 void |
|
2028 MacroAssemblerARMCompat::movePtr(const ImmGCPtr &imm, const Register &dest) |
|
2029 { |
|
2030 ma_mov(imm, dest); |
|
2031 } |
|
2032 void |
|
2033 MacroAssemblerARMCompat::movePtr(const ImmPtr &imm, const Register &dest) |
|
2034 { |
|
2035 movePtr(ImmWord(uintptr_t(imm.value)), dest); |
|
2036 } |
|
2037 void |
|
2038 MacroAssemblerARMCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest) |
|
2039 { |
|
2040 RelocStyle rs; |
|
2041 if (hasMOVWT()) |
|
2042 rs = L_MOVWT; |
|
2043 else |
|
2044 rs = L_LDR; |
|
2045 |
|
2046 enoughMemory_ &= append(AsmJSAbsoluteLink(nextOffset().getOffset(), imm.kind())); |
|
2047 ma_movPatchable(Imm32(-1), dest, Always, rs); |
|
2048 } |
|
2049 void |
|
2050 MacroAssemblerARMCompat::load8ZeroExtend(const Address &address, const Register &dest) |
|
2051 { |
|
2052 ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest); |
|
2053 } |
|
2054 |
|
2055 void |
|
2056 MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest) |
|
2057 { |
|
2058 Register base = src.base; |
|
2059 uint32_t scale = Imm32::ShiftOf(src.scale).value; |
|
2060 |
|
2061 if (src.offset != 0) { |
|
2062 ma_mov(base, ScratchRegister); |
|
2063 base = ScratchRegister; |
|
2064 ma_add(base, Imm32(src.offset), base); |
|
2065 } |
|
2066 ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); |
|
2067 |
|
2068 } |
|
2069 |
|
2070 void |
|
2071 MacroAssemblerARMCompat::load8SignExtend(const Address &address, const Register &dest) |
|
2072 { |
|
2073 ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest); |
|
2074 } |
|
2075 |
|
2076 void |
|
2077 MacroAssemblerARMCompat::load8SignExtend(const BaseIndex &src, const Register &dest) |
|
2078 { |
|
2079 Register index = src.index; |
|
2080 |
|
2081 // ARMv7 does not have LSL on an index register with an extended load. |
|
2082 if (src.scale != TimesOne) { |
|
2083 ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); |
|
2084 index = ScratchRegister; |
|
2085 } |
|
2086 |
|
2087 if (src.offset != 0) { |
|
2088 if (index != ScratchRegister) { |
|
2089 ma_mov(index, ScratchRegister); |
|
2090 index = ScratchRegister; |
|
2091 } |
|
2092 ma_add(Imm32(src.offset), index); |
|
2093 } |
|
2094 ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
|
2095 } |
|
2096 |
|
2097 void |
|
2098 MacroAssemblerARMCompat::load16ZeroExtend(const Address &address, const Register &dest) |
|
2099 { |
|
2100 ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest); |
|
2101 } |
|
2102 |
|
2103 void |
|
2104 MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest) |
|
2105 { |
|
2106 Register index = src.index; |
|
2107 |
|
2108 // ARMv7 does not have LSL on an index register with an extended load. |
|
2109 if (src.scale != TimesOne) { |
|
2110 ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); |
|
2111 index = ScratchRegister; |
|
2112 } |
|
2113 |
|
2114 if (src.offset != 0) { |
|
2115 if (index != ScratchRegister) { |
|
2116 ma_mov(index, ScratchRegister); |
|
2117 index = ScratchRegister; |
|
2118 } |
|
2119 ma_add(Imm32(src.offset), index); |
|
2120 } |
|
2121 ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
|
2122 } |
|
2123 |
|
2124 void |
|
2125 MacroAssemblerARMCompat::load16SignExtend(const Address &address, const Register &dest) |
|
2126 { |
|
2127 ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest); |
|
2128 } |
|
2129 |
|
2130 void |
|
2131 MacroAssemblerARMCompat::load16SignExtend(const BaseIndex &src, const Register &dest) |
|
2132 { |
|
2133 Register index = src.index; |
|
2134 |
|
2135 // We don't have LSL on index register yet. |
|
2136 if (src.scale != TimesOne) { |
|
2137 ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); |
|
2138 index = ScratchRegister; |
|
2139 } |
|
2140 |
|
2141 if (src.offset != 0) { |
|
2142 if (index != ScratchRegister) { |
|
2143 ma_mov(index, ScratchRegister); |
|
2144 index = ScratchRegister; |
|
2145 } |
|
2146 ma_add(Imm32(src.offset), index); |
|
2147 } |
|
2148 ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
|
2149 } |
|
2150 |
|
2151 void |
|
2152 MacroAssemblerARMCompat::load32(const Address &address, const Register &dest) |
|
2153 { |
|
2154 loadPtr(address, dest); |
|
2155 } |
|
2156 |
|
2157 void |
|
2158 MacroAssemblerARMCompat::load32(const BaseIndex &address, const Register &dest) |
|
2159 { |
|
2160 loadPtr(address, dest); |
|
2161 } |
|
2162 |
|
2163 void |
|
2164 MacroAssemblerARMCompat::load32(const AbsoluteAddress &address, const Register &dest) |
|
2165 { |
|
2166 loadPtr(address, dest); |
|
2167 } |
|
2168 void |
|
2169 MacroAssemblerARMCompat::loadPtr(const Address &address, const Register &dest) |
|
2170 { |
|
2171 ma_ldr(Operand(address), dest); |
|
2172 } |
|
2173 |
|
2174 void |
|
2175 MacroAssemblerARMCompat::loadPtr(const BaseIndex &src, const Register &dest) |
|
2176 { |
|
2177 Register base = src.base; |
|
2178 uint32_t scale = Imm32::ShiftOf(src.scale).value; |
|
2179 |
|
2180 if (src.offset != 0) { |
|
2181 ma_mov(base, ScratchRegister); |
|
2182 base = ScratchRegister; |
|
2183 ma_add(Imm32(src.offset), base); |
|
2184 } |
|
2185 ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); |
|
2186 } |
|
2187 void |
|
2188 MacroAssemblerARMCompat::loadPtr(const AbsoluteAddress &address, const Register &dest) |
|
2189 { |
|
2190 movePtr(ImmWord(uintptr_t(address.addr)), ScratchRegister); |
|
2191 loadPtr(Address(ScratchRegister, 0x0), dest); |
|
2192 } |
|
2193 void |
|
2194 MacroAssemblerARMCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest) |
|
2195 { |
|
2196 movePtr(AsmJSImmPtr(address.kind()), ScratchRegister); |
|
2197 loadPtr(Address(ScratchRegister, 0x0), dest); |
|
2198 } |
|
2199 |
|
2200 Operand payloadOf(const Address &address) { |
|
2201 return Operand(address.base, address.offset); |
|
2202 } |
|
2203 Operand tagOf(const Address &address) { |
|
2204 return Operand(address.base, address.offset + 4); |
|
2205 } |
|
2206 |
|
2207 void |
|
2208 MacroAssemblerARMCompat::loadPrivate(const Address &address, const Register &dest) |
|
2209 { |
|
2210 ma_ldr(payloadOf(address), dest); |
|
2211 } |
|
2212 |
|
2213 void |
|
2214 MacroAssemblerARMCompat::loadDouble(const Address &address, const FloatRegister &dest) |
|
2215 { |
|
2216 ma_vldr(Operand(address), dest); |
|
2217 } |
|
2218 |
|
2219 void |
|
2220 MacroAssemblerARMCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest) |
|
2221 { |
|
2222 // VFP instructions don't even support register Base + register Index modes, so |
|
2223 // just add the index, then handle the offset like normal |
|
2224 Register base = src.base; |
|
2225 Register index = src.index; |
|
2226 uint32_t scale = Imm32::ShiftOf(src.scale).value; |
|
2227 int32_t offset = src.offset; |
|
2228 as_add(ScratchRegister, base, lsl(index, scale)); |
|
2229 |
|
2230 ma_vldr(Operand(ScratchRegister, offset), dest); |
|
2231 } |
|
2232 |
|
2233 void |
|
2234 MacroAssemblerARMCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest) |
|
2235 { |
|
2236 VFPRegister rt = dest; |
|
2237 ma_vldr(Operand(address), rt.singleOverlay()); |
|
2238 as_vcvt(rt, rt.singleOverlay()); |
|
2239 } |
|
2240 |
|
2241 void |
|
2242 MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest) |
|
2243 { |
|
2244 // VFP instructions don't even support register Base + register Index modes, so |
|
2245 // just add the index, then handle the offset like normal |
|
2246 Register base = src.base; |
|
2247 Register index = src.index; |
|
2248 uint32_t scale = Imm32::ShiftOf(src.scale).value; |
|
2249 int32_t offset = src.offset; |
|
2250 VFPRegister rt = dest; |
|
2251 as_add(ScratchRegister, base, lsl(index, scale)); |
|
2252 |
|
2253 ma_vldr(Operand(ScratchRegister, offset), rt.singleOverlay()); |
|
2254 as_vcvt(rt, rt.singleOverlay()); |
|
2255 } |
|
2256 |
|
2257 void |
|
2258 MacroAssemblerARMCompat::loadFloat32(const Address &address, const FloatRegister &dest) |
|
2259 { |
|
2260 ma_vldr(Operand(address), VFPRegister(dest).singleOverlay()); |
|
2261 } |
|
2262 |
|
2263 void |
|
2264 MacroAssemblerARMCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest) |
|
2265 { |
|
2266 // VFP instructions don't even support register Base + register Index modes, so |
|
2267 // just add the index, then handle the offset like normal |
|
2268 Register base = src.base; |
|
2269 Register index = src.index; |
|
2270 uint32_t scale = Imm32::ShiftOf(src.scale).value; |
|
2271 int32_t offset = src.offset; |
|
2272 as_add(ScratchRegister, base, lsl(index, scale)); |
|
2273 |
|
2274 ma_vldr(Operand(ScratchRegister, offset), VFPRegister(dest).singleOverlay()); |
|
2275 } |
|
2276 |
|
2277 void |
|
2278 MacroAssemblerARMCompat::store8(const Imm32 &imm, const Address &address) |
|
2279 { |
|
2280 ma_mov(imm, secondScratchReg_); |
|
2281 store8(secondScratchReg_, address); |
|
2282 } |
|
2283 |
|
2284 void |
|
2285 MacroAssemblerARMCompat::store8(const Register &src, const Address &address) |
|
2286 { |
|
2287 ma_dataTransferN(IsStore, 8, false, address.base, Imm32(address.offset), src); |
|
2288 } |
|
2289 |
|
2290 void |
|
2291 MacroAssemblerARMCompat::store8(const Imm32 &imm, const BaseIndex &dest) |
|
2292 { |
|
2293 ma_mov(imm, secondScratchReg_); |
|
2294 store8(secondScratchReg_, dest); |
|
2295 } |
|
2296 |
|
2297 void |
|
2298 MacroAssemblerARMCompat::store8(const Register &src, const BaseIndex &dest) |
|
2299 { |
|
2300 Register base = dest.base; |
|
2301 uint32_t scale = Imm32::ShiftOf(dest.scale).value; |
|
2302 |
|
2303 if (dest.offset != 0) { |
|
2304 ma_add(base, Imm32(dest.offset), ScratchRegister); |
|
2305 base = ScratchRegister; |
|
2306 } |
|
2307 ma_strb(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); |
|
2308 } |
|
2309 |
|
2310 void |
|
2311 MacroAssemblerARMCompat::store16(const Imm32 &imm, const Address &address) |
|
2312 { |
|
2313 ma_mov(imm, secondScratchReg_); |
|
2314 store16(secondScratchReg_, address); |
|
2315 } |
|
2316 |
|
2317 void |
|
2318 MacroAssemblerARMCompat::store16(const Register &src, const Address &address) |
|
2319 { |
|
2320 ma_dataTransferN(IsStore, 16, false, address.base, Imm32(address.offset), src); |
|
2321 } |
|
2322 |
|
2323 void |
|
2324 MacroAssemblerARMCompat::store16(const Imm32 &imm, const BaseIndex &dest) |
|
2325 { |
|
2326 ma_mov(imm, secondScratchReg_); |
|
2327 store16(secondScratchReg_, dest); |
|
2328 } |
|
2329 void |
|
2330 MacroAssemblerARMCompat::store16(const Register &src, const BaseIndex &address) |
|
2331 { |
|
2332 Register index = address.index; |
|
2333 |
|
2334 // We don't have LSL on index register yet. |
|
2335 if (address.scale != TimesOne) { |
|
2336 ma_lsl(Imm32::ShiftOf(address.scale), index, ScratchRegister); |
|
2337 index = ScratchRegister; |
|
2338 } |
|
2339 |
|
2340 if (address.offset != 0) { |
|
2341 ma_add(index, Imm32(address.offset), ScratchRegister); |
|
2342 index = ScratchRegister; |
|
2343 } |
|
2344 ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index))); |
|
2345 } |
|
2346 void |
|
2347 MacroAssemblerARMCompat::store32(const Register &src, const AbsoluteAddress &address) |
|
2348 { |
|
2349 storePtr(src, address); |
|
2350 } |
|
2351 |
|
2352 void |
|
2353 MacroAssemblerARMCompat::store32(const Register &src, const Address &address) |
|
2354 { |
|
2355 storePtr(src, address); |
|
2356 } |
|
2357 |
|
2358 void |
|
2359 MacroAssemblerARMCompat::store32(const Imm32 &src, const Address &address) |
|
2360 { |
|
2361 move32(src, secondScratchReg_); |
|
2362 storePtr(secondScratchReg_, address); |
|
2363 } |
|
2364 |
|
2365 void |
|
2366 MacroAssemblerARMCompat::store32(const Imm32 &imm, const BaseIndex &dest) |
|
2367 { |
|
2368 ma_mov(imm, secondScratchReg_); |
|
2369 store32(secondScratchReg_, dest); |
|
2370 } |
|
2371 |
|
2372 void |
|
2373 MacroAssemblerARMCompat::store32(const Register &src, const BaseIndex &dest) |
|
2374 { |
|
2375 Register base = dest.base; |
|
2376 uint32_t scale = Imm32::ShiftOf(dest.scale).value; |
|
2377 |
|
2378 if (dest.offset != 0) { |
|
2379 ma_add(base, Imm32(dest.offset), ScratchRegister); |
|
2380 base = ScratchRegister; |
|
2381 } |
|
2382 ma_str(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); |
|
2383 } |
|
2384 |
|
2385 void |
|
2386 MacroAssemblerARMCompat::storePtr(ImmWord imm, const Address &address) |
|
2387 { |
|
2388 movePtr(imm, ScratchRegister); |
|
2389 storePtr(ScratchRegister, address); |
|
2390 } |
|
2391 |
|
2392 void |
|
2393 MacroAssemblerARMCompat::storePtr(ImmPtr imm, const Address &address) |
|
2394 { |
|
2395 storePtr(ImmWord(uintptr_t(imm.value)), address); |
|
2396 } |
|
2397 |
|
2398 void |
|
2399 MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const Address &address) |
|
2400 { |
|
2401 movePtr(imm, ScratchRegister); |
|
2402 storePtr(ScratchRegister, address); |
|
2403 } |
|
2404 |
|
2405 void |
|
2406 MacroAssemblerARMCompat::storePtr(Register src, const Address &address) |
|
2407 { |
|
2408 ma_str(src, Operand(address)); |
|
2409 } |
|
2410 |
|
2411 void |
|
2412 MacroAssemblerARMCompat::storePtr(const Register &src, const AbsoluteAddress &dest) |
|
2413 { |
|
2414 movePtr(ImmWord(uintptr_t(dest.addr)), ScratchRegister); |
|
2415 storePtr(src, Address(ScratchRegister, 0x0)); |
|
2416 } |
|
2417 |
|
2418 // Note: this function clobbers the input register. |
|
2419 void |
|
2420 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) |
|
2421 { |
|
2422 JS_ASSERT(input != ScratchFloatReg); |
|
2423 ma_vimm(0.5, ScratchFloatReg); |
|
2424 if (hasVFPv3()) { |
|
2425 Label notSplit; |
|
2426 ma_vadd(input, ScratchFloatReg, ScratchFloatReg); |
|
2427 // Convert the double into an unsigned fixed point value with 24 bits of |
|
2428 // precision. The resulting number will look like 0xII.DDDDDD |
|
2429 as_vcvtFixed(ScratchFloatReg, false, 24, true); |
|
2430 // Move the fixed point value into an integer register |
|
2431 as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore); |
|
2432 // see if this value *might* have been an exact integer after adding 0.5 |
|
2433 // This tests the 1/2 through 1/16,777,216th places, but 0.5 needs to be tested out to |
|
2434 // the 1/140,737,488,355,328th place. |
|
2435 ma_tst(output, Imm32(0x00ffffff)); |
|
2436 // convert to a uint8 by shifting out all of the fraction bits |
|
2437 ma_lsr(Imm32(24), output, output); |
|
2438 // If any of the bottom 24 bits were non-zero, then we're good, since this number |
|
2439 // can't be exactly XX.0 |
|
2440 ma_b(¬Split, NonZero); |
|
2441 as_vxfer(ScratchRegister, InvalidReg, input, FloatToCore); |
|
2442 ma_cmp(ScratchRegister, Imm32(0)); |
|
2443 // If the lower 32 bits of the double were 0, then this was an exact number, |
|
2444 // and it should be even. |
|
2445 ma_bic(Imm32(1), output, NoSetCond, Zero); |
|
2446 bind(¬Split); |
|
2447 } else { |
|
2448 Label outOfRange; |
|
2449 ma_vcmpz(input); |
|
2450 // do the add, in place so we can reference it later |
|
2451 ma_vadd(input, ScratchFloatReg, input); |
|
2452 // do the conversion to an integer. |
|
2453 as_vcvt(VFPRegister(ScratchFloatReg).uintOverlay(), VFPRegister(input)); |
|
2454 // copy the converted value out |
|
2455 as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore); |
|
2456 as_vmrs(pc); |
|
2457 ma_mov(Imm32(0), output, NoSetCond, Overflow); // NaN => 0 |
|
2458 ma_b(&outOfRange, Overflow); // NaN |
|
2459 ma_cmp(output, Imm32(0xff)); |
|
2460 ma_mov(Imm32(0xff), output, NoSetCond, Above); |
|
2461 ma_b(&outOfRange, Above); |
|
2462 // convert it back to see if we got the same value back |
|
2463 as_vcvt(ScratchFloatReg, VFPRegister(ScratchFloatReg).uintOverlay()); |
|
2464 // do the check |
|
2465 as_vcmp(ScratchFloatReg, input); |
|
2466 as_vmrs(pc); |
|
2467 ma_bic(Imm32(1), output, NoSetCond, Zero); |
|
2468 bind(&outOfRange); |
|
2469 } |
|
2470 } |
|
2471 |
|
2472 void |
|
2473 MacroAssemblerARMCompat::cmp32(const Register &lhs, const Imm32 &rhs) |
|
2474 { |
|
2475 JS_ASSERT(lhs != ScratchRegister); |
|
2476 ma_cmp(lhs, rhs); |
|
2477 } |
|
2478 |
|
2479 void |
|
2480 MacroAssemblerARMCompat::cmp32(const Operand &lhs, const Register &rhs) |
|
2481 { |
|
2482 ma_cmp(lhs.toReg(), rhs); |
|
2483 } |
|
2484 |
|
2485 void |
|
2486 MacroAssemblerARMCompat::cmp32(const Operand &lhs, const Imm32 &rhs) |
|
2487 { |
|
2488 JS_ASSERT(lhs.toReg() != ScratchRegister); |
|
2489 ma_cmp(lhs.toReg(), rhs); |
|
2490 } |
|
2491 |
|
2492 void |
|
2493 MacroAssemblerARMCompat::cmp32(const Register &lhs, const Register &rhs) |
|
2494 { |
|
2495 ma_cmp(lhs, rhs); |
|
2496 } |
|
2497 |
|
2498 void |
|
2499 MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmWord &rhs) |
|
2500 { |
|
2501 JS_ASSERT(lhs != ScratchRegister); |
|
2502 ma_cmp(lhs, Imm32(rhs.value)); |
|
2503 } |
|
2504 |
|
2505 void |
|
2506 MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmPtr &rhs) |
|
2507 { |
|
2508 return cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); |
|
2509 } |
|
2510 |
|
2511 void |
|
2512 MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const Register &rhs) |
|
2513 { |
|
2514 ma_cmp(lhs, rhs); |
|
2515 } |
|
2516 |
|
2517 void |
|
2518 MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmGCPtr &rhs) |
|
2519 { |
|
2520 ma_cmp(lhs, rhs); |
|
2521 } |
|
2522 |
|
2523 void |
|
2524 MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const Imm32 &rhs) |
|
2525 { |
|
2526 ma_cmp(lhs, rhs); |
|
2527 } |
|
2528 |
|
2529 void |
|
2530 MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const Register &rhs) |
|
2531 { |
|
2532 loadPtr(lhs, ScratchRegister); |
|
2533 cmpPtr(ScratchRegister, rhs); |
|
2534 } |
|
2535 |
|
2536 void |
|
2537 MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const ImmWord &rhs) |
|
2538 { |
|
2539 loadPtr(lhs, secondScratchReg_); |
|
2540 ma_cmp(secondScratchReg_, Imm32(rhs.value)); |
|
2541 } |
|
2542 |
|
2543 void |
|
2544 MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const ImmPtr &rhs) |
|
2545 { |
|
2546 cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); |
|
2547 } |
|
2548 |
|
2549 void |
|
2550 MacroAssemblerARMCompat::setStackArg(const Register ®, uint32_t arg) |
|
2551 { |
|
2552 ma_dataTransferN(IsStore, 32, true, sp, Imm32(arg * sizeof(intptr_t)), reg); |
|
2553 |
|
2554 } |
|
2555 |
|
2556 void |
|
2557 MacroAssemblerARMCompat::subPtr(Imm32 imm, const Register dest) |
|
2558 { |
|
2559 ma_sub(imm, dest); |
|
2560 } |
|
2561 |
|
2562 void |
|
2563 MacroAssemblerARMCompat::subPtr(const Address &addr, const Register dest) |
|
2564 { |
|
2565 loadPtr(addr, ScratchRegister); |
|
2566 ma_sub(ScratchRegister, dest); |
|
2567 } |
|
2568 |
|
2569 void |
|
2570 MacroAssemblerARMCompat::subPtr(const Register &src, const Register &dest) |
|
2571 { |
|
2572 ma_sub(src, dest); |
|
2573 } |
|
2574 |
|
2575 void |
|
2576 MacroAssemblerARMCompat::subPtr(const Register &src, const Address &dest) |
|
2577 { |
|
2578 loadPtr(dest, ScratchRegister); |
|
2579 ma_sub(src, ScratchRegister); |
|
2580 storePtr(ScratchRegister, dest); |
|
2581 } |
|
2582 |
|
2583 void |
|
2584 MacroAssemblerARMCompat::addPtr(Imm32 imm, const Register dest) |
|
2585 { |
|
2586 ma_add(imm, dest); |
|
2587 } |
|
2588 |
|
2589 void |
|
2590 MacroAssemblerARMCompat::addPtr(Imm32 imm, const Address &dest) |
|
2591 { |
|
2592 loadPtr(dest, ScratchRegister); |
|
2593 addPtr(imm, ScratchRegister); |
|
2594 storePtr(ScratchRegister, dest); |
|
2595 } |
|
2596 |
|
2597 void |
|
2598 MacroAssemblerARMCompat::compareDouble(FloatRegister lhs, FloatRegister rhs) |
|
2599 { |
|
2600 // Compare the doubles, setting vector status flags. |
|
2601 if (rhs == InvalidFloatReg) |
|
2602 ma_vcmpz(lhs); |
|
2603 else |
|
2604 ma_vcmp(lhs, rhs); |
|
2605 |
|
2606 // Move vector status bits to normal status flags. |
|
2607 as_vmrs(pc); |
|
2608 } |
|
2609 |
|
2610 void |
|
2611 MacroAssemblerARMCompat::branchDouble(DoubleCondition cond, const FloatRegister &lhs, |
|
2612 const FloatRegister &rhs, Label *label) |
|
2613 { |
|
2614 compareDouble(lhs, rhs); |
|
2615 |
|
2616 if (cond == DoubleNotEqual) { |
|
2617 // Force the unordered cases not to jump. |
|
2618 Label unordered; |
|
2619 ma_b(&unordered, VFP_Unordered); |
|
2620 ma_b(label, VFP_NotEqualOrUnordered); |
|
2621 bind(&unordered); |
|
2622 return; |
|
2623 } |
|
2624 |
|
2625 if (cond == DoubleEqualOrUnordered) { |
|
2626 ma_b(label, VFP_Unordered); |
|
2627 ma_b(label, VFP_Equal); |
|
2628 return; |
|
2629 } |
|
2630 |
|
2631 ma_b(label, ConditionFromDoubleCondition(cond)); |
|
2632 } |
|
2633 |
|
2634 void |
|
2635 MacroAssemblerARMCompat::compareFloat(FloatRegister lhs, FloatRegister rhs) |
|
2636 { |
|
2637 // Compare the doubles, setting vector status flags. |
|
2638 if (rhs == InvalidFloatReg) |
|
2639 as_vcmpz(VFPRegister(lhs).singleOverlay()); |
|
2640 else |
|
2641 as_vcmp(VFPRegister(lhs).singleOverlay(), VFPRegister(rhs).singleOverlay()); |
|
2642 |
|
2643 // Move vector status bits to normal status flags. |
|
2644 as_vmrs(pc); |
|
2645 } |
|
2646 |
|
2647 void |
|
2648 MacroAssemblerARMCompat::branchFloat(DoubleCondition cond, const FloatRegister &lhs, |
|
2649 const FloatRegister &rhs, Label *label) |
|
2650 { |
|
2651 compareFloat(lhs, rhs); |
|
2652 |
|
2653 if (cond == DoubleNotEqual) { |
|
2654 // Force the unordered cases not to jump. |
|
2655 Label unordered; |
|
2656 ma_b(&unordered, VFP_Unordered); |
|
2657 ma_b(label, VFP_NotEqualOrUnordered); |
|
2658 bind(&unordered); |
|
2659 return; |
|
2660 } |
|
2661 |
|
2662 if (cond == DoubleEqualOrUnordered) { |
|
2663 ma_b(label, VFP_Unordered); |
|
2664 ma_b(label, VFP_Equal); |
|
2665 return; |
|
2666 } |
|
2667 |
|
2668 ma_b(label, ConditionFromDoubleCondition(cond)); |
|
2669 } |
|
2670 |
|
2671 Assembler::Condition |
|
2672 MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const ValueOperand &value) |
|
2673 { |
|
2674 JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
|
2675 ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_INT32)); |
|
2676 return cond; |
|
2677 } |
|
2678 |
|
2679 Assembler::Condition |
|
2680 MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const ValueOperand &value) |
|
2681 { |
|
2682 JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
|
2683 ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN)); |
|
2684 return cond; |
|
2685 } |
|
2686 Assembler::Condition |
|
2687 MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand &value) |
|
2688 { |
|
2689 JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
|
2690 Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
|
2691 ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR)); |
|
2692 return actual; |
|
2693 } |
|
2694 |
|
2695 Assembler::Condition |
|
2696 MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const ValueOperand &value) |
|
2697 { |
|
2698 JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
|
2699 ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_NULL)); |
|
2700 return cond; |
|
2701 } |
|
2702 |
|
2703 Assembler::Condition |
|
2704 MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const ValueOperand &value) |
|
2705 { |
|
2706 JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
|
2707 ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED)); |
|
2708 return cond; |
|
2709 } |
|
2710 |
|
2711 Assembler::Condition |
|
2712 MacroAssemblerARMCompat::testString(Assembler::Condition cond, const ValueOperand &value) |
|
2713 { |
|
2714 return testString(cond, value.typeReg()); |
|
2715 } |
|
2716 |
|
2717 Assembler::Condition |
|
2718 MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const ValueOperand &value) |
|
2719 { |
|
2720 return testObject(cond, value.typeReg()); |
|
2721 } |
|
2722 |
|
2723 Assembler::Condition |
|
2724 MacroAssemblerARMCompat::testNumber(Assembler::Condition cond, const ValueOperand &value) |
|
2725 { |
|
2726 return testNumber(cond, value.typeReg()); |
|
2727 } |
|
2728 |
|
2729 Assembler::Condition |
|
2730 MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const ValueOperand &value) |
|
2731 { |
|
2732 return testMagic(cond, value.typeReg()); |
|
2733 } |
|
2734 |
|
2735 Assembler::Condition |
|
2736 MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const ValueOperand &value) |
|
2737 { |
|
2738 return testPrimitive(cond, value.typeReg()); |
|
2739 } |
|
2740 |
|
2741 // Register-based tests. |
|
2742 Assembler::Condition |
|
2743 MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Register &tag) |
|
2744 { |
|
2745 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2746 ma_cmp(tag, ImmTag(JSVAL_TAG_INT32)); |
|
2747 return cond; |
|
2748 } |
|
2749 |
|
2750 Assembler::Condition |
|
2751 MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const Register &tag) |
|
2752 { |
|
2753 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2754 ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN)); |
|
2755 return cond; |
|
2756 } |
|
2757 |
|
2758 Assembler::Condition |
|
2759 MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const Register &tag) { |
|
2760 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2761 ma_cmp(tag, ImmTag(JSVAL_TAG_NULL)); |
|
2762 return cond; |
|
2763 } |
|
2764 |
|
2765 Assembler::Condition |
|
2766 MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const Register &tag) { |
|
2767 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2768 ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED)); |
|
2769 return cond; |
|
2770 } |
|
2771 |
|
2772 Assembler::Condition |
|
2773 MacroAssemblerARMCompat::testString(Assembler::Condition cond, const Register &tag) { |
|
2774 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2775 ma_cmp(tag, ImmTag(JSVAL_TAG_STRING)); |
|
2776 return cond; |
|
2777 } |
|
2778 |
|
2779 Assembler::Condition |
|
2780 MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const Register &tag) |
|
2781 { |
|
2782 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2783 ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT)); |
|
2784 return cond; |
|
2785 } |
|
2786 |
|
2787 Assembler::Condition |
|
2788 MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Register &tag) |
|
2789 { |
|
2790 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2791 ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC)); |
|
2792 return cond; |
|
2793 } |
|
2794 |
|
2795 Assembler::Condition |
|
2796 MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const Register &tag) |
|
2797 { |
|
2798 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2799 ma_cmp(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET)); |
|
2800 return cond == Equal ? Below : AboveOrEqual; |
|
2801 } |
|
2802 |
|
2803 Assembler::Condition |
|
2804 MacroAssemblerARMCompat::testGCThing(Assembler::Condition cond, const Address &address) |
|
2805 { |
|
2806 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2807 extractTag(address, ScratchRegister); |
|
2808 ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); |
|
2809 return cond == Equal ? AboveOrEqual : Below; |
|
2810 } |
|
2811 |
|
2812 Assembler::Condition |
|
2813 MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Address &address) |
|
2814 { |
|
2815 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2816 extractTag(address, ScratchRegister); |
|
2817 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_MAGIC)); |
|
2818 return cond; |
|
2819 } |
|
2820 |
|
2821 Assembler::Condition |
|
2822 MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Address &address) |
|
2823 { |
|
2824 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2825 extractTag(address, ScratchRegister); |
|
2826 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_INT32)); |
|
2827 return cond; |
|
2828 } |
|
2829 |
|
2830 Assembler::Condition |
|
2831 MacroAssemblerARMCompat::testDouble(Condition cond, const Address &address) |
|
2832 { |
|
2833 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2834 extractTag(address, ScratchRegister); |
|
2835 return testDouble(cond, ScratchRegister); |
|
2836 } |
|
2837 |
|
2838 Assembler::Condition |
|
2839 MacroAssemblerARMCompat::testBoolean(Condition cond, const Address &address) |
|
2840 { |
|
2841 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2842 extractTag(address, ScratchRegister); |
|
2843 return testBoolean(cond, ScratchRegister); |
|
2844 } |
|
2845 |
|
2846 Assembler::Condition |
|
2847 MacroAssemblerARMCompat::testNull(Condition cond, const Address &address) |
|
2848 { |
|
2849 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2850 extractTag(address, ScratchRegister); |
|
2851 return testNull(cond, ScratchRegister); |
|
2852 } |
|
2853 |
|
2854 Assembler::Condition |
|
2855 MacroAssemblerARMCompat::testUndefined(Condition cond, const Address &address) |
|
2856 { |
|
2857 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2858 extractTag(address, ScratchRegister); |
|
2859 return testUndefined(cond, ScratchRegister); |
|
2860 } |
|
2861 |
|
2862 Assembler::Condition |
|
2863 MacroAssemblerARMCompat::testString(Condition cond, const Address &address) |
|
2864 { |
|
2865 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2866 extractTag(address, ScratchRegister); |
|
2867 return testString(cond, ScratchRegister); |
|
2868 } |
|
2869 |
|
2870 Assembler::Condition |
|
2871 MacroAssemblerARMCompat::testObject(Condition cond, const Address &address) |
|
2872 { |
|
2873 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2874 extractTag(address, ScratchRegister); |
|
2875 return testObject(cond, ScratchRegister); |
|
2876 } |
|
2877 |
|
2878 Assembler::Condition |
|
2879 MacroAssemblerARMCompat::testNumber(Condition cond, const Address &address) |
|
2880 { |
|
2881 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2882 extractTag(address, ScratchRegister); |
|
2883 return testNumber(cond, ScratchRegister); |
|
2884 } |
|
2885 |
|
2886 Assembler::Condition |
|
2887 MacroAssemblerARMCompat::testDouble(Condition cond, const Register &tag) |
|
2888 { |
|
2889 JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
|
2890 Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
|
2891 ma_cmp(tag, ImmTag(JSVAL_TAG_CLEAR)); |
|
2892 return actual; |
|
2893 } |
|
2894 |
|
2895 Assembler::Condition |
|
2896 MacroAssemblerARMCompat::testNumber(Condition cond, const Register &tag) |
|
2897 { |
|
2898 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2899 ma_cmp(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET)); |
|
2900 return cond == Equal ? BelowOrEqual : Above; |
|
2901 } |
|
2902 |
|
2903 Assembler::Condition |
|
2904 MacroAssemblerARMCompat::testUndefined(Condition cond, const BaseIndex &src) |
|
2905 { |
|
2906 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2907 extractTag(src, ScratchRegister); |
|
2908 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_UNDEFINED)); |
|
2909 return cond; |
|
2910 } |
|
2911 |
|
2912 Assembler::Condition |
|
2913 MacroAssemblerARMCompat::testNull(Condition cond, const BaseIndex &src) |
|
2914 { |
|
2915 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2916 extractTag(src, ScratchRegister); |
|
2917 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_NULL)); |
|
2918 return cond; |
|
2919 } |
|
2920 |
|
2921 Assembler::Condition |
|
2922 MacroAssemblerARMCompat::testBoolean(Condition cond, const BaseIndex &src) |
|
2923 { |
|
2924 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2925 extractTag(src, ScratchRegister); |
|
2926 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_BOOLEAN)); |
|
2927 return cond; |
|
2928 } |
|
2929 |
|
2930 Assembler::Condition |
|
2931 MacroAssemblerARMCompat::testString(Condition cond, const BaseIndex &src) |
|
2932 { |
|
2933 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2934 extractTag(src, ScratchRegister); |
|
2935 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_STRING)); |
|
2936 return cond; |
|
2937 } |
|
2938 |
|
2939 Assembler::Condition |
|
2940 MacroAssemblerARMCompat::testInt32(Condition cond, const BaseIndex &src) |
|
2941 { |
|
2942 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2943 extractTag(src, ScratchRegister); |
|
2944 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_INT32)); |
|
2945 return cond; |
|
2946 } |
|
2947 |
|
2948 Assembler::Condition |
|
2949 MacroAssemblerARMCompat::testObject(Condition cond, const BaseIndex &src) |
|
2950 { |
|
2951 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2952 extractTag(src, ScratchRegister); |
|
2953 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_OBJECT)); |
|
2954 return cond; |
|
2955 } |
|
2956 |
|
2957 Assembler::Condition |
|
2958 MacroAssemblerARMCompat::testDouble(Condition cond, const BaseIndex &src) |
|
2959 { |
|
2960 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2961 Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
|
2962 extractTag(src, ScratchRegister); |
|
2963 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_CLEAR)); |
|
2964 return actual; |
|
2965 } |
|
2966 |
|
2967 Assembler::Condition |
|
2968 MacroAssemblerARMCompat::testMagic(Condition cond, const BaseIndex &address) |
|
2969 { |
|
2970 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2971 extractTag(address, ScratchRegister); |
|
2972 ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_MAGIC)); |
|
2973 return cond; |
|
2974 } |
|
2975 |
|
2976 Assembler::Condition |
|
2977 MacroAssemblerARMCompat::testGCThing(Condition cond, const BaseIndex &address) |
|
2978 { |
|
2979 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
2980 extractTag(address, ScratchRegister); |
|
2981 ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); |
|
2982 return cond == Equal ? AboveOrEqual : Below; |
|
2983 } |
|
2984 |
|
2985 void |
|
2986 MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand &value, const Value &v, |
|
2987 Label *label) |
|
2988 { |
|
2989 // If cond == NotEqual, branch when a.payload != b.payload || a.tag != b.tag. |
|
2990 // If the payloads are equal, compare the tags. If the payloads are not equal, |
|
2991 // short circuit true (NotEqual). |
|
2992 // |
|
2993 // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag. |
|
2994 // If the payloads are equal, compare the tags. If the payloads are not equal, |
|
2995 // short circuit false (NotEqual). |
|
2996 jsval_layout jv = JSVAL_TO_IMPL(v); |
|
2997 if (v.isMarkable()) |
|
2998 ma_cmp(value.payloadReg(), ImmGCPtr(reinterpret_cast<gc::Cell *>(v.toGCThing()))); |
|
2999 else |
|
3000 ma_cmp(value.payloadReg(), Imm32(jv.s.payload.i32)); |
|
3001 ma_cmp(value.typeReg(), Imm32(jv.s.tag), Equal); |
|
3002 ma_b(label, cond); |
|
3003 } |
|
3004 |
|
3005 void |
|
3006 MacroAssemblerARMCompat::branchTestValue(Condition cond, const Address &valaddr, |
|
3007 const ValueOperand &value, Label *label) |
|
3008 { |
|
3009 JS_ASSERT(cond == Equal || cond == NotEqual); |
|
3010 |
|
3011 // Check payload before tag, since payload is more likely to differ. |
|
3012 if (cond == NotEqual) { |
|
3013 ma_ldr(payloadOf(valaddr), ScratchRegister); |
|
3014 branchPtr(NotEqual, ScratchRegister, value.payloadReg(), label); |
|
3015 |
|
3016 ma_ldr(tagOf(valaddr), ScratchRegister); |
|
3017 branchPtr(NotEqual, ScratchRegister, value.typeReg(), label); |
|
3018 |
|
3019 } else { |
|
3020 Label fallthrough; |
|
3021 |
|
3022 ma_ldr(payloadOf(valaddr), ScratchRegister); |
|
3023 branchPtr(NotEqual, ScratchRegister, value.payloadReg(), &fallthrough); |
|
3024 |
|
3025 ma_ldr(tagOf(valaddr), ScratchRegister); |
|
3026 branchPtr(Equal, ScratchRegister, value.typeReg(), label); |
|
3027 |
|
3028 bind(&fallthrough); |
|
3029 } |
|
3030 } |
|
3031 |
|
3032 // unboxing code |
|
3033 void |
|
3034 MacroAssemblerARMCompat::unboxInt32(const ValueOperand &operand, const Register &dest) |
|
3035 { |
|
3036 ma_mov(operand.payloadReg(), dest); |
|
3037 } |
|
3038 |
|
3039 void |
|
3040 MacroAssemblerARMCompat::unboxInt32(const Address &src, const Register &dest) |
|
3041 { |
|
3042 ma_ldr(payloadOf(src), dest); |
|
3043 } |
|
3044 |
|
3045 void |
|
3046 MacroAssemblerARMCompat::unboxBoolean(const ValueOperand &operand, const Register &dest) |
|
3047 { |
|
3048 ma_mov(operand.payloadReg(), dest); |
|
3049 } |
|
3050 |
|
3051 void |
|
3052 MacroAssemblerARMCompat::unboxBoolean(const Address &src, const Register &dest) |
|
3053 { |
|
3054 ma_ldr(payloadOf(src), dest); |
|
3055 } |
|
3056 |
|
3057 void |
|
3058 MacroAssemblerARMCompat::unboxDouble(const ValueOperand &operand, const FloatRegister &dest) |
|
3059 { |
|
3060 JS_ASSERT(dest != ScratchFloatReg); |
|
3061 as_vxfer(operand.payloadReg(), operand.typeReg(), |
|
3062 VFPRegister(dest), CoreToFloat); |
|
3063 } |
|
3064 |
|
3065 void |
|
3066 MacroAssemblerARMCompat::unboxDouble(const Address &src, const FloatRegister &dest) |
|
3067 { |
|
3068 ma_vldr(Operand(src), dest); |
|
3069 } |
|
3070 |
|
3071 void |
|
3072 MacroAssemblerARMCompat::unboxString(const ValueOperand &operand, const Register &dest) |
|
3073 { |
|
3074 ma_mov(operand.payloadReg(), dest); |
|
3075 } |
|
3076 |
|
3077 void |
|
3078 MacroAssemblerARMCompat::unboxString(const Address &src, const Register &dest) |
|
3079 { |
|
3080 ma_ldr(payloadOf(src), dest); |
|
3081 } |
|
3082 |
|
3083 void |
|
3084 MacroAssemblerARMCompat::unboxObject(const ValueOperand &src, const Register &dest) |
|
3085 { |
|
3086 ma_mov(src.payloadReg(), dest); |
|
3087 } |
|
3088 |
|
3089 void |
|
3090 MacroAssemblerARMCompat::unboxValue(const ValueOperand &src, AnyRegister dest) |
|
3091 { |
|
3092 if (dest.isFloat()) { |
|
3093 Label notInt32, end; |
|
3094 branchTestInt32(Assembler::NotEqual, src, ¬Int32); |
|
3095 convertInt32ToDouble(src.payloadReg(), dest.fpu()); |
|
3096 ma_b(&end); |
|
3097 bind(¬Int32); |
|
3098 unboxDouble(src, dest.fpu()); |
|
3099 bind(&end); |
|
3100 } else if (src.payloadReg() != dest.gpr()) { |
|
3101 as_mov(dest.gpr(), O2Reg(src.payloadReg())); |
|
3102 } |
|
3103 } |
|
3104 |
|
3105 void |
|
3106 MacroAssemblerARMCompat::unboxPrivate(const ValueOperand &src, Register dest) |
|
3107 { |
|
3108 ma_mov(src.payloadReg(), dest); |
|
3109 } |
|
3110 |
|
3111 void |
|
3112 MacroAssemblerARMCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest) |
|
3113 { |
|
3114 as_vxfer(dest.payloadReg(), dest.typeReg(), VFPRegister(src), FloatToCore); |
|
3115 } |
|
3116 |
|
3117 void |
|
3118 MacroAssemblerARMCompat::boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest) { |
|
3119 if (src != dest.payloadReg()) |
|
3120 ma_mov(src, dest.payloadReg()); |
|
3121 ma_mov(ImmType(type), dest.typeReg()); |
|
3122 } |
|
3123 |
|
3124 void |
|
3125 MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) |
|
3126 { |
|
3127 VFPRegister d = VFPRegister(dest); |
|
3128 ma_vimm(1.0, dest); |
|
3129 ma_cmp(operand.payloadReg(), Imm32(0)); |
|
3130 // If the source is 0, then subtract the dest from itself, producing 0. |
|
3131 as_vsub(d, d, d, Equal); |
|
3132 } |
|
3133 |
|
3134 void |
|
3135 MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) |
|
3136 { |
|
3137 // transfer the integral value to a floating point register |
|
3138 VFPRegister vfpdest = VFPRegister(dest); |
|
3139 as_vxfer(operand.payloadReg(), InvalidReg, |
|
3140 vfpdest.sintOverlay(), CoreToFloat); |
|
3141 // convert the value to a double. |
|
3142 as_vcvt(vfpdest, vfpdest.sintOverlay()); |
|
3143 } |
|
3144 |
|
3145 void |
|
3146 MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest) |
|
3147 { |
|
3148 VFPRegister d = VFPRegister(dest).singleOverlay(); |
|
3149 ma_vimm_f32(1.0, dest); |
|
3150 ma_cmp(operand.payloadReg(), Imm32(0)); |
|
3151 // If the source is 0, then subtract the dest from itself, producing 0. |
|
3152 as_vsub(d, d, d, Equal); |
|
3153 } |
|
3154 |
|
3155 void |
|
3156 MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest) |
|
3157 { |
|
3158 // transfer the integral value to a floating point register |
|
3159 VFPRegister vfpdest = VFPRegister(dest).singleOverlay(); |
|
3160 as_vxfer(operand.payloadReg(), InvalidReg, |
|
3161 vfpdest.sintOverlay(), CoreToFloat); |
|
3162 // convert the value to a float. |
|
3163 as_vcvt(vfpdest, vfpdest.sintOverlay()); |
|
3164 } |
|
3165 |
|
3166 void |
|
3167 MacroAssemblerARMCompat::loadConstantFloat32(float f, const FloatRegister &dest) |
|
3168 { |
|
3169 ma_vimm_f32(f, dest); |
|
3170 } |
|
3171 |
|
3172 void |
|
3173 MacroAssemblerARMCompat::loadInt32OrDouble(const Operand &src, const FloatRegister &dest) |
|
3174 { |
|
3175 Label notInt32, end; |
|
3176 // If it's an int, convert it to double. |
|
3177 ma_ldr(ToType(src), ScratchRegister); |
|
3178 branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); |
|
3179 ma_ldr(ToPayload(src), ScratchRegister); |
|
3180 convertInt32ToDouble(ScratchRegister, dest); |
|
3181 ma_b(&end); |
|
3182 |
|
3183 // Not an int, just load as double. |
|
3184 bind(¬Int32); |
|
3185 ma_vldr(src, dest); |
|
3186 bind(&end); |
|
3187 } |
|
3188 |
|
3189 void |
|
3190 MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, const FloatRegister &dest, int32_t shift) |
|
3191 { |
|
3192 Label notInt32, end; |
|
3193 |
|
3194 JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); |
|
3195 |
|
3196 // If it's an int, convert it to double. |
|
3197 ma_alu(base, lsl(index, shift), ScratchRegister, op_add); |
|
3198 |
|
3199 // Since we only have one scratch register, we need to stomp over it with the tag |
|
3200 ma_ldr(Address(ScratchRegister, NUNBOX32_TYPE_OFFSET), ScratchRegister); |
|
3201 branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); |
|
3202 |
|
3203 // Implicitly requires NUNBOX32_PAYLOAD_OFFSET == 0: no offset provided |
|
3204 ma_ldr(DTRAddr(base, DtrRegImmShift(index, LSL, shift)), ScratchRegister); |
|
3205 convertInt32ToDouble(ScratchRegister, dest); |
|
3206 ma_b(&end); |
|
3207 |
|
3208 // Not an int, just load as double. |
|
3209 bind(¬Int32); |
|
3210 // First, recompute the offset that had been stored in the scratch register |
|
3211 // since the scratch register was overwritten loading in the type. |
|
3212 ma_alu(base, lsl(index, shift), ScratchRegister, op_add); |
|
3213 ma_vldr(Address(ScratchRegister, 0), dest); |
|
3214 bind(&end); |
|
3215 } |
|
3216 |
|
3217 void |
|
3218 MacroAssemblerARMCompat::loadConstantDouble(double dp, const FloatRegister &dest) |
|
3219 { |
|
3220 as_FImm64Pool(dest, dp); |
|
3221 } |
|
3222 |
|
3223 // treat the value as a boolean, and set condition codes accordingly |
|
3224 |
|
3225 Assembler::Condition |
|
3226 MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand &operand) |
|
3227 { |
|
3228 ma_tst(operand.payloadReg(), operand.payloadReg()); |
|
3229 return truthy ? NonZero : Zero; |
|
3230 } |
|
3231 |
|
3232 Assembler::Condition |
|
3233 MacroAssemblerARMCompat::testBooleanTruthy(bool truthy, const ValueOperand &operand) |
|
3234 { |
|
3235 ma_tst(operand.payloadReg(), operand.payloadReg()); |
|
3236 return truthy ? NonZero : Zero; |
|
3237 } |
|
3238 |
|
3239 Assembler::Condition |
|
3240 MacroAssemblerARMCompat::testDoubleTruthy(bool truthy, const FloatRegister ®) |
|
3241 { |
|
3242 as_vcmpz(VFPRegister(reg)); |
|
3243 as_vmrs(pc); |
|
3244 as_cmp(r0, O2Reg(r0), Overflow); |
|
3245 return truthy ? NonZero : Zero; |
|
3246 } |
|
3247 |
|
3248 Register |
|
3249 MacroAssemblerARMCompat::extractObject(const Address &address, Register scratch) |
|
3250 { |
|
3251 ma_ldr(payloadOf(address), scratch); |
|
3252 return scratch; |
|
3253 } |
|
3254 |
|
3255 Register |
|
3256 MacroAssemblerARMCompat::extractTag(const Address &address, Register scratch) |
|
3257 { |
|
3258 ma_ldr(tagOf(address), scratch); |
|
3259 return scratch; |
|
3260 } |
|
3261 |
|
3262 Register |
|
3263 MacroAssemblerARMCompat::extractTag(const BaseIndex &address, Register scratch) |
|
3264 { |
|
3265 ma_alu(address.base, lsl(address.index, address.scale), scratch, op_add, NoSetCond); |
|
3266 return extractTag(Address(scratch, address.offset), scratch); |
|
3267 } |
|
3268 |
|
3269 void |
|
3270 MacroAssemblerARMCompat::moveValue(const Value &val, Register type, Register data) |
|
3271 { |
|
3272 jsval_layout jv = JSVAL_TO_IMPL(val); |
|
3273 ma_mov(Imm32(jv.s.tag), type); |
|
3274 if (val.isMarkable()) |
|
3275 ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), data); |
|
3276 else |
|
3277 ma_mov(Imm32(jv.s.payload.i32), data); |
|
3278 } |
|
3279 void |
|
3280 MacroAssemblerARMCompat::moveValue(const Value &val, const ValueOperand &dest) |
|
3281 { |
|
3282 moveValue(val, dest.typeReg(), dest.payloadReg()); |
|
3283 } |
|
3284 |
|
3285 ///////////////////////////////////////////////////////////////// |
|
3286 // X86/X64-common (ARM too now) interface. |
|
3287 ///////////////////////////////////////////////////////////////// |
|
3288 void |
|
3289 MacroAssemblerARMCompat::storeValue(ValueOperand val, Operand dst) |
|
3290 { |
|
3291 ma_str(val.payloadReg(), ToPayload(dst)); |
|
3292 ma_str(val.typeReg(), ToType(dst)); |
|
3293 } |
|
3294 |
|
3295 void |
|
3296 MacroAssemblerARMCompat::storeValue(ValueOperand val, const BaseIndex &dest) |
|
3297 { |
|
3298 if (isValueDTRDCandidate(val) && Abs(dest.offset) <= 255) { |
|
3299 Register tmpIdx; |
|
3300 if (dest.offset == 0) { |
|
3301 if (dest.scale == TimesOne) { |
|
3302 tmpIdx = dest.index; |
|
3303 } else { |
|
3304 ma_lsl(Imm32(dest.scale), dest.index, ScratchRegister); |
|
3305 tmpIdx = ScratchRegister; |
|
3306 } |
|
3307 ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(dest.base, EDtrOffReg(tmpIdx))); |
|
3308 } else { |
|
3309 ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); |
|
3310 ma_strd(val.payloadReg(), val.typeReg(), |
|
3311 EDtrAddr(ScratchRegister, EDtrOffImm(dest.offset))); |
|
3312 } |
|
3313 } else { |
|
3314 ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); |
|
3315 storeValue(val, Address(ScratchRegister, dest.offset)); |
|
3316 } |
|
3317 } |
|
3318 |
|
3319 void |
|
3320 MacroAssemblerARMCompat::loadValue(const BaseIndex &addr, ValueOperand val) |
|
3321 { |
|
3322 if (isValueDTRDCandidate(val) && Abs(addr.offset) <= 255) { |
|
3323 Register tmpIdx; |
|
3324 if (addr.offset == 0) { |
|
3325 if (addr.scale == TimesOne) { |
|
3326 tmpIdx = addr.index; |
|
3327 } else { |
|
3328 ma_lsl(Imm32(addr.scale), addr.index, ScratchRegister); |
|
3329 tmpIdx = ScratchRegister; |
|
3330 } |
|
3331 ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(), val.typeReg()); |
|
3332 } else { |
|
3333 ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); |
|
3334 ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(addr.offset)), |
|
3335 val.payloadReg(), val.typeReg()); |
|
3336 } |
|
3337 } else { |
|
3338 ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); |
|
3339 loadValue(Address(ScratchRegister, addr.offset), val); |
|
3340 } |
|
3341 } |
|
3342 |
|
3343 void |
|
3344 MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val) |
|
3345 { |
|
3346 Operand srcOp = Operand(src); |
|
3347 Operand payload = ToPayload(srcOp); |
|
3348 Operand type = ToType(srcOp); |
|
3349 // TODO: copy this code into a generic function that acts on all sequences of memory accesses |
|
3350 if (isValueDTRDCandidate(val)) { |
|
3351 // If the value we want is in two consecutive registers starting with an even register, |
|
3352 // they can be combined as a single ldrd. |
|
3353 int offset = srcOp.disp(); |
|
3354 if (offset < 256 && offset > -256) { |
|
3355 ma_ldrd(EDtrAddr(Register::FromCode(srcOp.base()), EDtrOffImm(srcOp.disp())), val.payloadReg(), val.typeReg()); |
|
3356 return; |
|
3357 } |
|
3358 } |
|
3359 // if the value is lower than the type, then we may be able to use an ldm instruction |
|
3360 |
|
3361 if (val.payloadReg().code() < val.typeReg().code()) { |
|
3362 if (srcOp.disp() <= 4 && srcOp.disp() >= -8 && (srcOp.disp() & 3) == 0) { |
|
3363 // turns out each of the 4 value -8, -4, 0, 4 corresponds exactly with one of |
|
3364 // LDM{DB, DA, IA, IB} |
|
3365 DTMMode mode; |
|
3366 switch(srcOp.disp()) { |
|
3367 case -8: |
|
3368 mode = DB; |
|
3369 break; |
|
3370 case -4: |
|
3371 mode = DA; |
|
3372 break; |
|
3373 case 0: |
|
3374 mode = IA; |
|
3375 break; |
|
3376 case 4: |
|
3377 mode = IB; |
|
3378 break; |
|
3379 default: |
|
3380 MOZ_ASSUME_UNREACHABLE("Bogus Offset for LoadValue as DTM"); |
|
3381 } |
|
3382 startDataTransferM(IsLoad, Register::FromCode(srcOp.base()), mode); |
|
3383 transferReg(val.payloadReg()); |
|
3384 transferReg(val.typeReg()); |
|
3385 finishDataTransfer(); |
|
3386 return; |
|
3387 } |
|
3388 } |
|
3389 // Ensure that loading the payload does not erase the pointer to the |
|
3390 // Value in memory. |
|
3391 if (Register::FromCode(type.base()) != val.payloadReg()) { |
|
3392 ma_ldr(payload, val.payloadReg()); |
|
3393 ma_ldr(type, val.typeReg()); |
|
3394 } else { |
|
3395 ma_ldr(type, val.typeReg()); |
|
3396 ma_ldr(payload, val.payloadReg()); |
|
3397 } |
|
3398 } |
|
3399 |
|
3400 void |
|
3401 MacroAssemblerARMCompat::tagValue(JSValueType type, Register payload, ValueOperand dest) |
|
3402 { |
|
3403 JS_ASSERT(dest.typeReg() != dest.payloadReg()); |
|
3404 if (payload != dest.payloadReg()) |
|
3405 ma_mov(payload, dest.payloadReg()); |
|
3406 ma_mov(ImmType(type), dest.typeReg()); |
|
3407 } |
|
3408 |
|
3409 void |
|
3410 MacroAssemblerARMCompat::pushValue(ValueOperand val) { |
|
3411 ma_push(val.typeReg()); |
|
3412 ma_push(val.payloadReg()); |
|
3413 } |
|
3414 void |
|
3415 MacroAssemblerARMCompat::pushValue(const Address &addr) |
|
3416 { |
|
3417 JS_ASSERT(addr.base != StackPointer); |
|
3418 Operand srcOp = Operand(addr); |
|
3419 Operand payload = ToPayload(srcOp); |
|
3420 Operand type = ToType(srcOp); |
|
3421 |
|
3422 ma_ldr(type, ScratchRegister); |
|
3423 ma_push(ScratchRegister); |
|
3424 ma_ldr(payload, ScratchRegister); |
|
3425 ma_push(ScratchRegister); |
|
3426 } |
|
3427 |
|
3428 void |
|
3429 MacroAssemblerARMCompat::popValue(ValueOperand val) { |
|
3430 ma_pop(val.payloadReg()); |
|
3431 ma_pop(val.typeReg()); |
|
3432 } |
|
3433 void |
|
3434 MacroAssemblerARMCompat::storePayload(const Value &val, Operand dest) |
|
3435 { |
|
3436 jsval_layout jv = JSVAL_TO_IMPL(val); |
|
3437 if (val.isMarkable()) |
|
3438 ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), secondScratchReg_); |
|
3439 else |
|
3440 ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_); |
|
3441 ma_str(secondScratchReg_, ToPayload(dest)); |
|
3442 } |
|
3443 void |
|
3444 MacroAssemblerARMCompat::storePayload(Register src, Operand dest) |
|
3445 { |
|
3446 if (dest.getTag() == Operand::MEM) { |
|
3447 ma_str(src, ToPayload(dest)); |
|
3448 return; |
|
3449 } |
|
3450 MOZ_ASSUME_UNREACHABLE("why do we do all of these things?"); |
|
3451 |
|
3452 } |
|
3453 |
|
3454 void |
|
3455 MacroAssemblerARMCompat::storePayload(const Value &val, Register base, Register index, int32_t shift) |
|
3456 { |
|
3457 jsval_layout jv = JSVAL_TO_IMPL(val); |
|
3458 if (val.isMarkable()) |
|
3459 ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), ScratchRegister); |
|
3460 else |
|
3461 ma_mov(Imm32(jv.s.payload.i32), ScratchRegister); |
|
3462 JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); |
|
3463 // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] |
|
3464 // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. |
|
3465 as_dtr(IsStore, 32, Offset, ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); |
|
3466 } |
|
3467 void |
|
3468 MacroAssemblerARMCompat::storePayload(Register src, Register base, Register index, int32_t shift) |
|
3469 { |
|
3470 JS_ASSERT((shift < 32) && (shift >= 0)); |
|
3471 // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] |
|
3472 // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. |
|
3473 JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); |
|
3474 // Technically, shift > -32 can be handle by changing LSL to ASR, but should never come up, |
|
3475 // and this is one less code path to get wrong. |
|
3476 as_dtr(IsStore, 32, Offset, src, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); |
|
3477 } |
|
3478 |
|
3479 void |
|
3480 MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Operand dest) { |
|
3481 if (dest.getTag() == Operand::MEM) { |
|
3482 ma_mov(tag, secondScratchReg_); |
|
3483 ma_str(secondScratchReg_, ToType(dest)); |
|
3484 return; |
|
3485 } |
|
3486 |
|
3487 MOZ_ASSUME_UNREACHABLE("why do we do all of these things?"); |
|
3488 |
|
3489 } |
|
3490 |
|
3491 void |
|
3492 MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift) { |
|
3493 JS_ASSERT(base != ScratchRegister); |
|
3494 JS_ASSERT(index != ScratchRegister); |
|
3495 // A value needs to be store a value int base + index << shift + 4. |
|
3496 // Arm cannot handle this in a single operand, so a temp register is required. |
|
3497 // However, the scratch register is presently in use to hold the immediate that |
|
3498 // is being stored into said memory location. Work around this by modifying |
|
3499 // the base so the valid [base + index << shift] format can be used, then |
|
3500 // restore it. |
|
3501 ma_add(base, Imm32(NUNBOX32_TYPE_OFFSET), base); |
|
3502 ma_mov(tag, ScratchRegister); |
|
3503 ma_str(ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); |
|
3504 ma_sub(base, Imm32(NUNBOX32_TYPE_OFFSET), base); |
|
3505 } |
|
3506 |
|
3507 void |
|
3508 MacroAssemblerARMCompat::linkExitFrame() { |
|
3509 uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop(); |
|
3510 movePtr(ImmPtr(dest), ScratchRegister); |
|
3511 ma_str(StackPointer, Operand(ScratchRegister, 0)); |
|
3512 } |
|
3513 |
|
3514 void |
|
3515 MacroAssemblerARMCompat::linkParallelExitFrame(const Register &pt) |
|
3516 { |
|
3517 ma_str(StackPointer, Operand(pt, offsetof(PerThreadData, ionTop))); |
|
3518 } |
|
3519 |
|
3520 // ARM says that all reads of pc will return 8 higher than the |
|
3521 // address of the currently executing instruction. This means we are |
|
3522 // correctly storing the address of the instruction after the call |
|
3523 // in the register. |
|
3524 // Also ION is breaking the ARM EABI here (sort of). The ARM EABI |
|
3525 // says that a function call should move the pc into the link register, |
|
3526 // then branch to the function, and *sp is data that is owned by the caller, |
|
3527 // not the callee. The ION ABI says *sp should be the address that |
|
3528 // we will return to when leaving this function |
|
3529 void |
|
3530 MacroAssemblerARM::ma_callIon(const Register r) |
|
3531 { |
|
3532 // When the stack is 8 byte aligned, |
|
3533 // we want to decrement sp by 8, and write pc+8 into the new sp. |
|
3534 // when we return from this call, sp will be its present value minus 4. |
|
3535 AutoForbidPools afp(this); |
|
3536 as_dtr(IsStore, 32, PreIndex, pc, DTRAddr(sp, DtrOffImm(-8))); |
|
3537 as_blx(r); |
|
3538 } |
|
3539 void |
|
3540 MacroAssemblerARM::ma_callIonNoPush(const Register r) |
|
3541 { |
|
3542 // Since we just write the return address into the stack, which is |
|
3543 // popped on return, the net effect is removing 4 bytes from the stack |
|
3544 AutoForbidPools afp(this); |
|
3545 as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); |
|
3546 as_blx(r); |
|
3547 } |
|
3548 |
|
3549 void |
|
3550 MacroAssemblerARM::ma_callIonHalfPush(const Register r) |
|
3551 { |
|
3552 // The stack is unaligned by 4 bytes. |
|
3553 // We push the pc to the stack to align the stack before the call, when we |
|
3554 // return the pc is poped and the stack is restored to its unaligned state. |
|
3555 AutoForbidPools afp(this); |
|
3556 ma_push(pc); |
|
3557 as_blx(r); |
|
3558 } |
|
3559 |
|
3560 void |
|
3561 MacroAssemblerARM::ma_call(ImmPtr dest) |
|
3562 { |
|
3563 RelocStyle rs; |
|
3564 if (hasMOVWT()) |
|
3565 rs = L_MOVWT; |
|
3566 else |
|
3567 rs = L_LDR; |
|
3568 |
|
3569 ma_movPatchable(dest, CallReg, Always, rs); |
|
3570 as_blx(CallReg); |
|
3571 } |
|
3572 |
|
3573 void |
|
3574 MacroAssemblerARM::ma_callAndStoreRet(const Register r, uint32_t stackArgBytes) |
|
3575 { |
|
3576 // Note: this function stores the return address to sp[0]. The caller must |
|
3577 // anticipate this by pushing additional space on the stack. The ABI does |
|
3578 // not provide space for a return address so this function may only be |
|
3579 // called if no argument are passed. |
|
3580 JS_ASSERT(stackArgBytes == 0); |
|
3581 AutoForbidPools afp(this); |
|
3582 as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); |
|
3583 as_blx(r); |
|
3584 } |
|
3585 |
|
3586 void |
|
3587 MacroAssemblerARMCompat::breakpoint() |
|
3588 { |
|
3589 as_bkpt(); |
|
3590 } |
|
3591 |
|
3592 void |
|
3593 MacroAssemblerARMCompat::ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure) |
|
3594 { |
|
3595 Label isDouble, done; |
|
3596 branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble); |
|
3597 branchTestInt32(Assembler::NotEqual, source.typeReg(), failure); |
|
3598 |
|
3599 convertInt32ToDouble(source.payloadReg(), dest); |
|
3600 jump(&done); |
|
3601 |
|
3602 bind(&isDouble); |
|
3603 unboxDouble(source, dest); |
|
3604 |
|
3605 bind(&done); |
|
3606 } |
|
3607 |
|
3608 void |
|
3609 MacroAssemblerARMCompat::breakpoint(Condition cc) |
|
3610 { |
|
3611 ma_ldr(DTRAddr(r12, DtrRegImmShift(r12, LSL, 0, IsDown)), r12, Offset, cc); |
|
3612 } |
|
3613 |
|
3614 void |
|
3615 MacroAssemblerARMCompat::setupABICall(uint32_t args) |
|
3616 { |
|
3617 JS_ASSERT(!inCall_); |
|
3618 inCall_ = true; |
|
3619 args_ = args; |
|
3620 passedArgs_ = 0; |
|
3621 passedArgTypes_ = 0; |
|
3622 usedIntSlots_ = 0; |
|
3623 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
|
3624 usedFloatSlots_ = 0; |
|
3625 usedFloat32_ = false; |
|
3626 padding_ = 0; |
|
3627 #endif |
|
3628 floatArgsInGPR[0] = MoveOperand(); |
|
3629 floatArgsInGPR[1] = MoveOperand(); |
|
3630 floatArgsInGPRValid[0] = false; |
|
3631 floatArgsInGPRValid[1] = false; |
|
3632 } |
|
3633 |
|
3634 void |
|
3635 MacroAssemblerARMCompat::setupAlignedABICall(uint32_t args) |
|
3636 { |
|
3637 setupABICall(args); |
|
3638 |
|
3639 dynamicAlignment_ = false; |
|
3640 } |
|
3641 |
|
3642 void |
|
3643 MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, const Register &scratch) |
|
3644 { |
|
3645 setupABICall(args); |
|
3646 dynamicAlignment_ = true; |
|
3647 |
|
3648 ma_mov(sp, scratch); |
|
3649 |
|
3650 // Force sp to be aligned |
|
3651 ma_and(Imm32(~(StackAlignment - 1)), sp, sp); |
|
3652 ma_push(scratch); |
|
3653 } |
|
3654 |
|
3655 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
|
3656 void |
|
3657 MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type type) |
|
3658 { |
|
3659 MoveOperand to; |
|
3660 ++passedArgs_; |
|
3661 if (!enoughMemory_) |
|
3662 return; |
|
3663 switch (type) { |
|
3664 case MoveOp::FLOAT32: |
|
3665 case MoveOp::DOUBLE: { |
|
3666 // N.B. this isn't a limitation of the ABI, it is a limitation of the compiler right now. |
|
3667 // There isn't a good way to handle odd numbered single registers, so everything goes to hell |
|
3668 // when we try. Current fix is to never use more than one float in a function call. |
|
3669 // Fix coming along with complete float32 support in bug 957504. |
|
3670 JS_ASSERT(!usedFloat32_); |
|
3671 if (type == MoveOp::FLOAT32) |
|
3672 usedFloat32_ = true; |
|
3673 FloatRegister fr; |
|
3674 if (GetFloatArgReg(usedIntSlots_, usedFloatSlots_, &fr)) { |
|
3675 if (from.isFloatReg() && from.floatReg() == fr) { |
|
3676 // Nothing to do; the value is in the right register already |
|
3677 usedFloatSlots_++; |
|
3678 if (type == MoveOp::FLOAT32) |
|
3679 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; |
|
3680 else |
|
3681 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; |
|
3682 return; |
|
3683 } |
|
3684 to = MoveOperand(fr); |
|
3685 } else { |
|
3686 // If (and only if) the integer registers have started spilling, do we |
|
3687 // need to take the register's alignment into account |
|
3688 uint32_t disp = INT_MAX; |
|
3689 if (type == MoveOp::FLOAT32) |
|
3690 disp = GetFloat32ArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); |
|
3691 else |
|
3692 disp = GetDoubleArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); |
|
3693 to = MoveOperand(sp, disp); |
|
3694 } |
|
3695 usedFloatSlots_++; |
|
3696 if (type == MoveOp::FLOAT32) |
|
3697 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; |
|
3698 else |
|
3699 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; |
|
3700 break; |
|
3701 } |
|
3702 case MoveOp::GENERAL: { |
|
3703 Register r; |
|
3704 if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) { |
|
3705 if (from.isGeneralReg() && from.reg() == r) { |
|
3706 // Nothing to do; the value is in the right register already |
|
3707 usedIntSlots_++; |
|
3708 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; |
|
3709 return; |
|
3710 } |
|
3711 to = MoveOperand(r); |
|
3712 } else { |
|
3713 uint32_t disp = GetIntArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); |
|
3714 to = MoveOperand(sp, disp); |
|
3715 } |
|
3716 usedIntSlots_++; |
|
3717 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; |
|
3718 break; |
|
3719 } |
|
3720 default: |
|
3721 MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); |
|
3722 } |
|
3723 |
|
3724 enoughMemory_ = moveResolver_.addMove(from, to, type); |
|
3725 } |
|
3726 #endif |
|
3727 |
|
3728 #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
|
3729 void |
|
3730 MacroAssemblerARMCompat::passSoftFpABIArg(const MoveOperand &from, MoveOp::Type type) |
|
3731 { |
|
3732 MoveOperand to; |
|
3733 uint32_t increment = 1; |
|
3734 bool useResolver = true; |
|
3735 ++passedArgs_; |
|
3736 switch (type) { |
|
3737 case MoveOp::DOUBLE: |
|
3738 // Double arguments need to be rounded up to the nearest doubleword |
|
3739 // boundary, even if it is in a register! |
|
3740 usedIntSlots_ = (usedIntSlots_ + 1) & ~1; |
|
3741 increment = 2; |
|
3742 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; |
|
3743 break; |
|
3744 case MoveOp::FLOAT32: |
|
3745 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; |
|
3746 break; |
|
3747 case MoveOp::GENERAL: |
|
3748 passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; |
|
3749 break; |
|
3750 default: |
|
3751 MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); |
|
3752 } |
|
3753 |
|
3754 Register destReg; |
|
3755 MoveOperand dest; |
|
3756 if (GetIntArgReg(usedIntSlots_, 0, &destReg)) { |
|
3757 if (type == MoveOp::DOUBLE || type == MoveOp::FLOAT32) { |
|
3758 floatArgsInGPR[destReg.code() >> 1] = from; |
|
3759 floatArgsInGPRValid[destReg.code() >> 1] = true; |
|
3760 useResolver = false; |
|
3761 } else if (from.isGeneralReg() && from.reg() == destReg) { |
|
3762 // No need to move anything |
|
3763 useResolver = false; |
|
3764 } else { |
|
3765 dest = MoveOperand(destReg); |
|
3766 } |
|
3767 } else { |
|
3768 uint32_t disp = GetArgStackDisp(usedIntSlots_); |
|
3769 dest = MoveOperand(sp, disp); |
|
3770 } |
|
3771 |
|
3772 if (useResolver) |
|
3773 enoughMemory_ = enoughMemory_ && moveResolver_.addMove(from, dest, type); |
|
3774 usedIntSlots_ += increment; |
|
3775 } |
|
3776 #endif |
|
3777 |
|
3778 void |
|
3779 MacroAssemblerARMCompat::passABIArg(const MoveOperand &from, MoveOp::Type type) |
|
3780 { |
|
3781 #if defined(JS_ARM_SIMULATOR) |
|
3782 if (useHardFpABI()) |
|
3783 MacroAssemblerARMCompat::passHardFpABIArg(from, type); |
|
3784 else |
|
3785 MacroAssemblerARMCompat::passSoftFpABIArg(from, type); |
|
3786 #elif defined(JS_CODEGEN_ARM_HARDFP) |
|
3787 MacroAssemblerARMCompat::passHardFpABIArg(from, type); |
|
3788 #else |
|
3789 MacroAssemblerARMCompat::passSoftFpABIArg(from, type); |
|
3790 #endif |
|
3791 } |
|
3792 |
|
3793 void |
|
3794 MacroAssemblerARMCompat::passABIArg(const Register ®) |
|
3795 { |
|
3796 passABIArg(MoveOperand(reg), MoveOp::GENERAL); |
|
3797 } |
|
3798 |
|
3799 void |
|
3800 MacroAssemblerARMCompat::passABIArg(const FloatRegister &freg, MoveOp::Type type) |
|
3801 { |
|
3802 passABIArg(MoveOperand(freg), type); |
|
3803 } |
|
3804 |
|
3805 void MacroAssemblerARMCompat::checkStackAlignment() |
|
3806 { |
|
3807 #ifdef DEBUG |
|
3808 ma_tst(sp, Imm32(StackAlignment - 1)); |
|
3809 breakpoint(NonZero); |
|
3810 #endif |
|
3811 } |
|
3812 |
|
3813 void |
|
3814 MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust) |
|
3815 { |
|
3816 JS_ASSERT(inCall_); |
|
3817 |
|
3818 *stackAdjust = ((usedIntSlots_ > NumIntArgRegs) ? usedIntSlots_ - NumIntArgRegs : 0) * sizeof(intptr_t); |
|
3819 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
|
3820 if (useHardFpABI()) |
|
3821 *stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t); |
|
3822 #endif |
|
3823 if (!dynamicAlignment_) { |
|
3824 *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust, StackAlignment); |
|
3825 } else { |
|
3826 // sizeof(intptr_t) account for the saved stack pointer pushed by setupUnalignedABICall |
|
3827 *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment); |
|
3828 } |
|
3829 |
|
3830 reserveStack(*stackAdjust); |
|
3831 |
|
3832 // Position all arguments. |
|
3833 { |
|
3834 enoughMemory_ = enoughMemory_ && moveResolver_.resolve(); |
|
3835 if (!enoughMemory_) |
|
3836 return; |
|
3837 |
|
3838 MoveEmitter emitter(*this); |
|
3839 emitter.emit(moveResolver_); |
|
3840 emitter.finish(); |
|
3841 } |
|
3842 for (int i = 0; i < 2; i++) { |
|
3843 if (floatArgsInGPRValid[i]) { |
|
3844 MoveOperand from = floatArgsInGPR[i]; |
|
3845 Register to0 = Register::FromCode(i * 2), to1 = Register::FromCode(i * 2 + 1); |
|
3846 |
|
3847 if (from.isFloatReg()) { |
|
3848 ma_vxfer(VFPRegister(from.floatReg()), to0, to1); |
|
3849 } else { |
|
3850 JS_ASSERT(from.isMemory()); |
|
3851 // Note: We can safely use the MoveOperand's displacement here, |
|
3852 // even if the base is SP: MoveEmitter::toOperand adjusts |
|
3853 // SP-relative operands by the difference between the current |
|
3854 // stack usage and stackAdjust, which emitter.finish() resets |
|
3855 // to 0. |
|
3856 // |
|
3857 // Warning: if the offset isn't within [-255,+255] then this |
|
3858 // will assert-fail (or, if non-debug, load the wrong words). |
|
3859 // Nothing uses such an offset at the time of this writing. |
|
3860 ma_ldrd(EDtrAddr(from.base(), EDtrOffImm(from.disp())), to0, to1); |
|
3861 } |
|
3862 } |
|
3863 } |
|
3864 checkStackAlignment(); |
|
3865 |
|
3866 // Save the lr register if we need to preserve it. |
|
3867 if (secondScratchReg_ != lr) |
|
3868 ma_mov(lr, secondScratchReg_); |
|
3869 } |
|
3870 |
|
3871 void |
|
3872 MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) |
|
3873 { |
|
3874 if (secondScratchReg_ != lr) |
|
3875 ma_mov(secondScratchReg_, lr); |
|
3876 |
|
3877 switch (result) { |
|
3878 case MoveOp::DOUBLE: |
|
3879 if (!useHardFpABI()) { |
|
3880 // Move double from r0/r1 to ReturnFloatReg. |
|
3881 as_vxfer(r0, r1, ReturnFloatReg, CoreToFloat); |
|
3882 break; |
|
3883 } |
|
3884 case MoveOp::FLOAT32: |
|
3885 if (!useHardFpABI()) { |
|
3886 // Move float32 from r0 to ReturnFloatReg. |
|
3887 as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), CoreToFloat); |
|
3888 break; |
|
3889 } |
|
3890 case MoveOp::GENERAL: |
|
3891 break; |
|
3892 |
|
3893 default: |
|
3894 MOZ_ASSUME_UNREACHABLE("unexpected callWithABI result"); |
|
3895 } |
|
3896 |
|
3897 freeStack(stackAdjust); |
|
3898 |
|
3899 if (dynamicAlignment_) { |
|
3900 // x86 supports pop esp. on arm, that isn't well defined, so just |
|
3901 // do it manually |
|
3902 as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0))); |
|
3903 } |
|
3904 |
|
3905 JS_ASSERT(inCall_); |
|
3906 inCall_ = false; |
|
3907 } |
|
3908 |
|
3909 #if defined(DEBUG) && defined(JS_ARM_SIMULATOR) |
|
3910 static void |
|
3911 AssertValidABIFunctionType(uint32_t passedArgTypes) |
|
3912 { |
|
3913 switch (passedArgTypes) { |
|
3914 case Args_General0: |
|
3915 case Args_General1: |
|
3916 case Args_General2: |
|
3917 case Args_General3: |
|
3918 case Args_General4: |
|
3919 case Args_General5: |
|
3920 case Args_General6: |
|
3921 case Args_General7: |
|
3922 case Args_General8: |
|
3923 case Args_Double_None: |
|
3924 case Args_Int_Double: |
|
3925 case Args_Float32_Float32: |
|
3926 case Args_Double_Double: |
|
3927 case Args_Double_Int: |
|
3928 case Args_Double_DoubleInt: |
|
3929 case Args_Double_DoubleDouble: |
|
3930 case Args_Double_IntDouble: |
|
3931 case Args_Int_IntDouble: |
|
3932 break; |
|
3933 default: |
|
3934 MOZ_ASSUME_UNREACHABLE("Unexpected type"); |
|
3935 } |
|
3936 } |
|
3937 #endif |
|
3938 |
|
3939 void |
|
3940 MacroAssemblerARMCompat::callWithABI(void *fun, MoveOp::Type result) |
|
3941 { |
|
3942 #ifdef JS_ARM_SIMULATOR |
|
3943 MOZ_ASSERT(passedArgs_ <= 15); |
|
3944 passedArgTypes_ <<= ArgType_Shift; |
|
3945 switch (result) { |
|
3946 case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break; |
|
3947 case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break; |
|
3948 case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break; |
|
3949 default: MOZ_ASSUME_UNREACHABLE("Invalid return type"); |
|
3950 } |
|
3951 #ifdef DEBUG |
|
3952 AssertValidABIFunctionType(passedArgTypes_); |
|
3953 #endif |
|
3954 ABIFunctionType type = ABIFunctionType(passedArgTypes_); |
|
3955 fun = Simulator::RedirectNativeFunction(fun, type); |
|
3956 #endif |
|
3957 |
|
3958 uint32_t stackAdjust; |
|
3959 callWithABIPre(&stackAdjust); |
|
3960 ma_call(ImmPtr(fun)); |
|
3961 callWithABIPost(stackAdjust, result); |
|
3962 } |
|
3963 |
|
3964 void |
|
3965 MacroAssemblerARMCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result) |
|
3966 { |
|
3967 uint32_t stackAdjust; |
|
3968 callWithABIPre(&stackAdjust); |
|
3969 call(imm); |
|
3970 callWithABIPost(stackAdjust, result); |
|
3971 } |
|
3972 |
|
3973 void |
|
3974 MacroAssemblerARMCompat::callWithABI(const Address &fun, MoveOp::Type result) |
|
3975 { |
|
3976 // Load the callee in r12, no instruction between the ldr and call |
|
3977 // should clobber it. Note that we can't use fun.base because it may |
|
3978 // be one of the IntArg registers clobbered before the call. |
|
3979 ma_ldr(fun, r12); |
|
3980 uint32_t stackAdjust; |
|
3981 callWithABIPre(&stackAdjust); |
|
3982 call(r12); |
|
3983 callWithABIPost(stackAdjust, result); |
|
3984 } |
|
3985 |
|
3986 void |
|
3987 MacroAssemblerARMCompat::handleFailureWithHandler(void *handler) |
|
3988 { |
|
3989 // Reserve space for exception information. |
|
3990 int size = (sizeof(ResumeFromException) + 7) & ~7; |
|
3991 ma_sub(Imm32(size), sp); |
|
3992 ma_mov(sp, r0); |
|
3993 |
|
3994 // Ask for an exception handler. |
|
3995 setupUnalignedABICall(1, r1); |
|
3996 passABIArg(r0); |
|
3997 callWithABI(handler); |
|
3998 |
|
3999 JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail(); |
|
4000 branch(excTail); |
|
4001 } |
|
4002 |
|
4003 void |
|
4004 MacroAssemblerARMCompat::handleFailureWithHandlerTail() |
|
4005 { |
|
4006 Label entryFrame; |
|
4007 Label catch_; |
|
4008 Label finally; |
|
4009 Label return_; |
|
4010 Label bailout; |
|
4011 |
|
4012 ma_ldr(Operand(sp, offsetof(ResumeFromException, kind)), r0); |
|
4013 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame); |
|
4014 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_); |
|
4015 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally); |
|
4016 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_); |
|
4017 branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); |
|
4018 |
|
4019 breakpoint(); // Invalid kind. |
|
4020 |
|
4021 // No exception handler. Load the error value, load the new stack pointer |
|
4022 // and return from the entry frame. |
|
4023 bind(&entryFrame); |
|
4024 moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); |
|
4025 ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
|
4026 |
|
4027 // We're going to be returning by the ion calling convention, which returns |
|
4028 // by ??? (for now, I think ldr pc, [sp]!) |
|
4029 as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4))); |
|
4030 |
|
4031 // If we found a catch handler, this must be a baseline frame. Restore state |
|
4032 // and jump to the catch block. |
|
4033 bind(&catch_); |
|
4034 ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0); |
|
4035 ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); |
|
4036 ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
|
4037 jump(r0); |
|
4038 |
|
4039 // If we found a finally block, this must be a baseline frame. Push |
|
4040 // two values expected by JSOP_RETSUB: BooleanValue(true) and the |
|
4041 // exception. |
|
4042 bind(&finally); |
|
4043 ValueOperand exception = ValueOperand(r1, r2); |
|
4044 loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception); |
|
4045 |
|
4046 ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0); |
|
4047 ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); |
|
4048 ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
|
4049 |
|
4050 pushValue(BooleanValue(true)); |
|
4051 pushValue(exception); |
|
4052 jump(r0); |
|
4053 |
|
4054 // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. |
|
4055 bind(&return_); |
|
4056 ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); |
|
4057 ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
|
4058 loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand); |
|
4059 ma_mov(r11, sp); |
|
4060 pop(r11); |
|
4061 ret(); |
|
4062 |
|
4063 // If we are bailing out to baseline to handle an exception, jump to |
|
4064 // the bailout tail stub. |
|
4065 bind(&bailout); |
|
4066 ma_ldr(Operand(sp, offsetof(ResumeFromException, bailoutInfo)), r2); |
|
4067 ma_mov(Imm32(BAILOUT_RETURN_OK), r0); |
|
4068 ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r1); |
|
4069 jump(r1); |
|
4070 } |
|
4071 |
|
4072 Assembler::Condition |
|
4073 MacroAssemblerARMCompat::testStringTruthy(bool truthy, const ValueOperand &value) |
|
4074 { |
|
4075 Register string = value.payloadReg(); |
|
4076 |
|
4077 size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT); |
|
4078 ma_dtr(IsLoad, string, Imm32(JSString::offsetOfLengthAndFlags()), ScratchRegister); |
|
4079 // Bit clear into the scratch register. This is done because there is performs the operation |
|
4080 // dest <- src1 & ~ src2. There is no instruction that does this without writing |
|
4081 // the result somewhere, so the Scratch Register is sacrificed. |
|
4082 ma_bic(Imm32(~mask), ScratchRegister, SetCond); |
|
4083 return truthy ? Assembler::NonZero : Assembler::Zero; |
|
4084 } |
|
4085 |
|
4086 void |
|
4087 MacroAssemblerARMCompat::floor(FloatRegister input, Register output, Label *bail) |
|
4088 { |
|
4089 Label handleZero; |
|
4090 Label handleNeg; |
|
4091 Label fin; |
|
4092 compareDouble(input, InvalidFloatReg); |
|
4093 ma_b(&handleZero, Assembler::Equal); |
|
4094 ma_b(&handleNeg, Assembler::Signed); |
|
4095 // NaN is always a bail condition, just bail directly. |
|
4096 ma_b(bail, Assembler::Overflow); |
|
4097 |
|
4098 // The argument is a positive number, truncation is the path to glory; |
|
4099 // Since it is known to be > 0.0, explicitly convert to a larger range, |
|
4100 // then a value that rounds to INT_MAX is explicitly different from an |
|
4101 // argument that clamps to INT_MAX |
|
4102 ma_vcvt_F64_U32(input, ScratchFloatReg); |
|
4103 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4104 ma_mov(output, output, SetCond); |
|
4105 ma_b(bail, Signed); |
|
4106 ma_b(&fin); |
|
4107 |
|
4108 bind(&handleZero); |
|
4109 // Move the top word of the double into the output reg, if it is non-zero, |
|
4110 // then the original value was -0.0 |
|
4111 as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); |
|
4112 ma_cmp(output, Imm32(0)); |
|
4113 ma_b(bail, NonZero); |
|
4114 ma_b(&fin); |
|
4115 |
|
4116 bind(&handleNeg); |
|
4117 // Negative case, negate, then start dancing |
|
4118 ma_vneg(input, input); |
|
4119 ma_vcvt_F64_U32(input, ScratchFloatReg); |
|
4120 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4121 ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg); |
|
4122 compareDouble(ScratchFloatReg, input); |
|
4123 ma_add(output, Imm32(1), output, NoSetCond, NotEqual); |
|
4124 // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
|
4125 // the result will still be a negative number |
|
4126 ma_rsb(output, Imm32(0), output, SetCond); |
|
4127 // Flip the negated input back to its original value. |
|
4128 ma_vneg(input, input); |
|
4129 // If the result looks non-negative, then this value didn't actually fit into |
|
4130 // the int range, and special handling is required. |
|
4131 // zero is also caught by this case, but floor of a negative number |
|
4132 // should never be zero. |
|
4133 ma_b(bail, NotSigned); |
|
4134 |
|
4135 bind(&fin); |
|
4136 } |
|
4137 |
|
4138 void |
|
4139 MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label *bail) |
|
4140 { |
|
4141 Label handleZero; |
|
4142 Label handleNeg; |
|
4143 Label fin; |
|
4144 compareFloat(input, InvalidFloatReg); |
|
4145 ma_b(&handleZero, Assembler::Equal); |
|
4146 ma_b(&handleNeg, Assembler::Signed); |
|
4147 // NaN is always a bail condition, just bail directly. |
|
4148 ma_b(bail, Assembler::Overflow); |
|
4149 |
|
4150 // The argument is a positive number, truncation is the path to glory; |
|
4151 // Since it is known to be > 0.0, explicitly convert to a larger range, |
|
4152 // then a value that rounds to INT_MAX is explicitly different from an |
|
4153 // argument that clamps to INT_MAX |
|
4154 ma_vcvt_F32_U32(input, ScratchFloatReg); |
|
4155 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4156 ma_mov(output, output, SetCond); |
|
4157 ma_b(bail, Signed); |
|
4158 ma_b(&fin); |
|
4159 |
|
4160 bind(&handleZero); |
|
4161 // Move the top word of the double into the output reg, if it is non-zero, |
|
4162 // then the original value was -0.0 |
|
4163 as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0); |
|
4164 ma_cmp(output, Imm32(0)); |
|
4165 ma_b(bail, NonZero); |
|
4166 ma_b(&fin); |
|
4167 |
|
4168 bind(&handleNeg); |
|
4169 // Negative case, negate, then start dancing |
|
4170 ma_vneg_f32(input, input); |
|
4171 ma_vcvt_F32_U32(input, ScratchFloatReg); |
|
4172 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4173 ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg); |
|
4174 compareFloat(ScratchFloatReg, input); |
|
4175 ma_add(output, Imm32(1), output, NoSetCond, NotEqual); |
|
4176 // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
|
4177 // the result will still be a negative number |
|
4178 ma_rsb(output, Imm32(0), output, SetCond); |
|
4179 // Flip the negated input back to its original value. |
|
4180 ma_vneg_f32(input, input); |
|
4181 // If the result looks non-negative, then this value didn't actually fit into |
|
4182 // the int range, and special handling is required. |
|
4183 // zero is also caught by this case, but floor of a negative number |
|
4184 // should never be zero. |
|
4185 ma_b(bail, NotSigned); |
|
4186 |
|
4187 bind(&fin); |
|
4188 } |
|
4189 |
|
4190 CodeOffsetLabel |
|
4191 MacroAssemblerARMCompat::toggledJump(Label *label) |
|
4192 { |
|
4193 // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). |
|
4194 |
|
4195 BufferOffset b = ma_b(label, Always, true); |
|
4196 CodeOffsetLabel ret(b.getOffset()); |
|
4197 return ret; |
|
4198 } |
|
4199 |
|
4200 CodeOffsetLabel |
|
4201 MacroAssemblerARMCompat::toggledCall(JitCode *target, bool enabled) |
|
4202 { |
|
4203 BufferOffset bo = nextOffset(); |
|
4204 CodeOffsetLabel offset(bo.getOffset()); |
|
4205 addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE); |
|
4206 ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, hasMOVWT() ? L_MOVWT : L_LDR); |
|
4207 if (enabled) |
|
4208 ma_blx(ScratchRegister); |
|
4209 else |
|
4210 ma_nop(); |
|
4211 JS_ASSERT(nextOffset().getOffset() - offset.offset() == ToggledCallSize()); |
|
4212 return offset; |
|
4213 } |
|
4214 |
|
4215 void |
|
4216 MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail, FloatRegister tmp) |
|
4217 { |
|
4218 Label handleZero; |
|
4219 Label handleNeg; |
|
4220 Label fin; |
|
4221 // Do a compare based on the original value, then do most other things based on the |
|
4222 // shifted value. |
|
4223 ma_vcmpz(input); |
|
4224 // Adding 0.5 is technically incorrect! |
|
4225 // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. |
|
4226 ma_vimm(0.5, ScratchFloatReg); |
|
4227 // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. |
|
4228 ma_vabs(input, tmp); |
|
4229 // Add 0.5, storing the result into tmp. |
|
4230 ma_vadd(ScratchFloatReg, tmp, tmp); |
|
4231 as_vmrs(pc); |
|
4232 ma_b(&handleZero, Assembler::Equal); |
|
4233 ma_b(&handleNeg, Assembler::Signed); |
|
4234 // NaN is always a bail condition, just bail directly. |
|
4235 ma_b(bail, Assembler::Overflow); |
|
4236 |
|
4237 // The argument is a positive number, truncation is the path to glory; |
|
4238 // Since it is known to be > 0.0, explicitly convert to a larger range, |
|
4239 // then a value that rounds to INT_MAX is explicitly different from an |
|
4240 // argument that clamps to INT_MAX |
|
4241 ma_vcvt_F64_U32(tmp, ScratchFloatReg); |
|
4242 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4243 ma_mov(output, output, SetCond); |
|
4244 ma_b(bail, Signed); |
|
4245 ma_b(&fin); |
|
4246 |
|
4247 bind(&handleZero); |
|
4248 // Move the top word of the double into the output reg, if it is non-zero, |
|
4249 // then the original value was -0.0 |
|
4250 as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); |
|
4251 ma_cmp(output, Imm32(0)); |
|
4252 ma_b(bail, NonZero); |
|
4253 ma_b(&fin); |
|
4254 |
|
4255 bind(&handleNeg); |
|
4256 // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 |
|
4257 ma_vcvt_F64_U32(tmp, ScratchFloatReg); |
|
4258 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4259 |
|
4260 // -output is now a correctly rounded value, unless the original value was exactly |
|
4261 // halfway between two integers, at which point, it has been rounded away from zero, when |
|
4262 // it should be rounded towards \infty. |
|
4263 ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg); |
|
4264 compareDouble(ScratchFloatReg, tmp); |
|
4265 ma_sub(output, Imm32(1), output, NoSetCond, Equal); |
|
4266 // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
|
4267 // the result will still be a negative number |
|
4268 ma_rsb(output, Imm32(0), output, SetCond); |
|
4269 |
|
4270 // If the result looks non-negative, then this value didn't actually fit into |
|
4271 // the int range, and special handling is required, or it was zero, which means |
|
4272 // the result is actually -0.0 which also requires special handling. |
|
4273 ma_b(bail, NotSigned); |
|
4274 |
|
4275 bind(&fin); |
|
4276 } |
|
4277 |
|
4278 void |
|
4279 MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bail, FloatRegister tmp) |
|
4280 { |
|
4281 Label handleZero; |
|
4282 Label handleNeg; |
|
4283 Label fin; |
|
4284 // Do a compare based on the original value, then do most other things based on the |
|
4285 // shifted value. |
|
4286 ma_vcmpz_f32(input); |
|
4287 // Adding 0.5 is technically incorrect! |
|
4288 // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. |
|
4289 ma_vimm_f32(0.5f, ScratchFloatReg); |
|
4290 // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. |
|
4291 ma_vabs_f32(input, tmp); |
|
4292 // Add 0.5, storing the result into tmp. |
|
4293 ma_vadd_f32(ScratchFloatReg, tmp, tmp); |
|
4294 as_vmrs(pc); |
|
4295 ma_b(&handleZero, Assembler::Equal); |
|
4296 ma_b(&handleNeg, Assembler::Signed); |
|
4297 // NaN is always a bail condition, just bail directly. |
|
4298 ma_b(bail, Assembler::Overflow); |
|
4299 |
|
4300 // The argument is a positive number, truncation is the path to glory; |
|
4301 // Since it is known to be > 0.0, explicitly convert to a larger range, |
|
4302 // then a value that rounds to INT_MAX is explicitly different from an |
|
4303 // argument that clamps to INT_MAX |
|
4304 ma_vcvt_F32_U32(tmp, ScratchFloatReg); |
|
4305 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4306 ma_mov(output, output, SetCond); |
|
4307 ma_b(bail, Signed); |
|
4308 ma_b(&fin); |
|
4309 |
|
4310 bind(&handleZero); |
|
4311 // Move the top word of the double into the output reg, if it is non-zero, |
|
4312 // then the original value was -0.0 |
|
4313 as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); |
|
4314 ma_cmp(output, Imm32(0)); |
|
4315 ma_b(bail, NonZero); |
|
4316 ma_b(&fin); |
|
4317 |
|
4318 bind(&handleNeg); |
|
4319 // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 |
|
4320 ma_vcvt_F32_U32(tmp, ScratchFloatReg); |
|
4321 ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
|
4322 |
|
4323 // -output is now a correctly rounded value, unless the original value was exactly |
|
4324 // halfway between two integers, at which point, it has been rounded away from zero, when |
|
4325 // it should be rounded towards \infty. |
|
4326 ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg); |
|
4327 compareFloat(ScratchFloatReg, tmp); |
|
4328 ma_sub(output, Imm32(1), output, NoSetCond, Equal); |
|
4329 // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
|
4330 // the result will still be a negative number |
|
4331 ma_rsb(output, Imm32(0), output, SetCond); |
|
4332 |
|
4333 // If the result looks non-negative, then this value didn't actually fit into |
|
4334 // the int range, and special handling is required, or it was zero, which means |
|
4335 // the result is actually -0.0 which also requires special handling. |
|
4336 ma_b(bail, NotSigned); |
|
4337 |
|
4338 bind(&fin); |
|
4339 } |
|
4340 |
|
4341 CodeOffsetJump |
|
4342 MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel *label, Condition cond) |
|
4343 { |
|
4344 ARMBuffer::PoolEntry pe; |
|
4345 BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond); |
|
4346 // Fill in a new CodeOffset with both the load and the |
|
4347 // pool entry that the instruction loads from. |
|
4348 CodeOffsetJump ret(bo.getOffset(), pe.encode()); |
|
4349 return ret; |
|
4350 } |
|
4351 |
|
4352 #ifdef JSGC_GENERATIONAL |
|
4353 |
|
4354 void |
|
4355 MacroAssemblerARMCompat::branchPtrInNurseryRange(Register ptr, Register temp, Label *label) |
|
4356 { |
|
4357 JS_ASSERT(ptr != temp); |
|
4358 JS_ASSERT(ptr != secondScratchReg_); |
|
4359 |
|
4360 const Nursery &nursery = GetIonContext()->runtime->gcNursery(); |
|
4361 uintptr_t startChunk = nursery.start() >> Nursery::ChunkShift; |
|
4362 |
|
4363 ma_mov(Imm32(startChunk), secondScratchReg_); |
|
4364 as_rsb(secondScratchReg_, secondScratchReg_, lsr(ptr, Nursery::ChunkShift)); |
|
4365 branch32(Assembler::Below, secondScratchReg_, Imm32(Nursery::NumNurseryChunks), label); |
|
4366 } |
|
4367 |
|
4368 void |
|
4369 MacroAssemblerARMCompat::branchValueIsNurseryObject(ValueOperand value, Register temp, Label *label) |
|
4370 { |
|
4371 Label done; |
|
4372 |
|
4373 branchTestObject(Assembler::NotEqual, value, &done); |
|
4374 branchPtrInNurseryRange(value.payloadReg(), temp, label); |
|
4375 |
|
4376 bind(&done); |
|
4377 } |
|
4378 |
|
4379 #endif |