|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "jit/arm/CodeGenerator-arm.h" |
|
8 |
|
9 #include "mozilla/MathAlgorithms.h" |
|
10 |
|
11 #include "jscntxt.h" |
|
12 #include "jscompartment.h" |
|
13 #include "jsnum.h" |
|
14 |
|
15 #include "jit/CodeGenerator.h" |
|
16 #include "jit/IonFrames.h" |
|
17 #include "jit/JitCompartment.h" |
|
18 #include "jit/MIR.h" |
|
19 #include "jit/MIRGraph.h" |
|
20 #include "vm/Shape.h" |
|
21 #include "vm/TraceLogging.h" |
|
22 |
|
23 #include "jsscriptinlines.h" |
|
24 |
|
25 #include "jit/shared/CodeGenerator-shared-inl.h" |
|
26 |
|
27 using namespace js; |
|
28 using namespace js::jit; |
|
29 |
|
30 using mozilla::FloorLog2; |
|
31 using mozilla::NegativeInfinity; |
|
32 using JS::GenericNaN; |
|
33 |
|
34 // shared |
|
35 CodeGeneratorARM::CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm) |
|
36 : CodeGeneratorShared(gen, graph, masm) |
|
37 { |
|
38 } |
|
39 |
|
40 bool |
|
41 CodeGeneratorARM::generatePrologue() |
|
42 { |
|
43 JS_ASSERT(!gen->compilingAsmJS()); |
|
44 |
|
45 // Note that this automatically sets MacroAssembler::framePushed(). |
|
46 masm.reserveStack(frameSize()); |
|
47 masm.checkStackAlignment(); |
|
48 return true; |
|
49 } |
|
50 |
|
51 bool |
|
52 CodeGeneratorARM::generateAsmJSPrologue(Label *stackOverflowLabel) |
|
53 { |
|
54 JS_ASSERT(gen->compilingAsmJS()); |
|
55 |
|
56 masm.Push(lr); |
|
57 |
|
58 // The asm.js over-recursed handler wants to be able to assume that SP |
|
59 // points to the return address, so perform the check after pushing lr but |
|
60 // before pushing frameDepth. |
|
61 if (!omitOverRecursedCheck()) { |
|
62 masm.branchPtr(Assembler::AboveOrEqual, |
|
63 AsmJSAbsoluteAddress(AsmJSImm_StackLimit), |
|
64 StackPointer, |
|
65 stackOverflowLabel); |
|
66 } |
|
67 |
|
68 // Note that this automatically sets MacroAssembler::framePushed(). |
|
69 masm.reserveStack(frameDepth_); |
|
70 masm.checkStackAlignment(); |
|
71 return true; |
|
72 } |
|
73 |
|
74 bool |
|
75 CodeGeneratorARM::generateEpilogue() |
|
76 { |
|
77 masm.bind(&returnLabel_); |
|
78 |
|
79 #ifdef JS_TRACE_LOGGING |
|
80 if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) { |
|
81 if (!emitTracelogStopEvent(TraceLogger::IonMonkey)) |
|
82 return false; |
|
83 if (!emitTracelogScriptStop()) |
|
84 return false; |
|
85 } |
|
86 #endif |
|
87 |
|
88 if (gen->compilingAsmJS()) { |
|
89 // Pop the stack we allocated at the start of the function. |
|
90 masm.freeStack(frameDepth_); |
|
91 masm.Pop(pc); |
|
92 JS_ASSERT(masm.framePushed() == 0); |
|
93 //masm.as_bkpt(); |
|
94 } else { |
|
95 // Pop the stack we allocated at the start of the function. |
|
96 masm.freeStack(frameSize()); |
|
97 JS_ASSERT(masm.framePushed() == 0); |
|
98 masm.ma_pop(pc); |
|
99 } |
|
100 masm.dumpPool(); |
|
101 return true; |
|
102 } |
|
103 |
|
104 void |
|
105 CodeGeneratorARM::emitBranch(Assembler::Condition cond, MBasicBlock *mirTrue, MBasicBlock *mirFalse) |
|
106 { |
|
107 if (isNextBlock(mirFalse->lir())) { |
|
108 jumpToBlock(mirTrue, cond); |
|
109 } else { |
|
110 jumpToBlock(mirFalse, Assembler::InvertCondition(cond)); |
|
111 jumpToBlock(mirTrue); |
|
112 } |
|
113 } |
|
114 |
|
115 |
|
116 bool |
|
117 OutOfLineBailout::accept(CodeGeneratorARM *codegen) |
|
118 { |
|
119 return codegen->visitOutOfLineBailout(this); |
|
120 } |
|
121 |
|
122 bool |
|
123 CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch *test) |
|
124 { |
|
125 const LAllocation *opd = test->getOperand(0); |
|
126 MBasicBlock *ifTrue = test->ifTrue(); |
|
127 MBasicBlock *ifFalse = test->ifFalse(); |
|
128 |
|
129 // Test the operand |
|
130 masm.ma_cmp(ToRegister(opd), Imm32(0)); |
|
131 |
|
132 if (isNextBlock(ifFalse->lir())) { |
|
133 jumpToBlock(ifTrue, Assembler::NonZero); |
|
134 } else if (isNextBlock(ifTrue->lir())) { |
|
135 jumpToBlock(ifFalse, Assembler::Zero); |
|
136 } else { |
|
137 jumpToBlock(ifFalse, Assembler::Zero); |
|
138 jumpToBlock(ifTrue); |
|
139 } |
|
140 return true; |
|
141 } |
|
142 |
|
143 bool |
|
144 CodeGeneratorARM::visitCompare(LCompare *comp) |
|
145 { |
|
146 Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop()); |
|
147 const LAllocation *left = comp->getOperand(0); |
|
148 const LAllocation *right = comp->getOperand(1); |
|
149 const LDefinition *def = comp->getDef(0); |
|
150 |
|
151 if (right->isConstant()) |
|
152 masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right))); |
|
153 else |
|
154 masm.ma_cmp(ToRegister(left), ToOperand(right)); |
|
155 masm.ma_mov(Imm32(0), ToRegister(def)); |
|
156 masm.ma_mov(Imm32(1), ToRegister(def), NoSetCond, cond); |
|
157 return true; |
|
158 } |
|
159 |
|
160 bool |
|
161 CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch *comp) |
|
162 { |
|
163 Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->compareType(), comp->jsop()); |
|
164 if (comp->right()->isConstant()) |
|
165 masm.ma_cmp(ToRegister(comp->left()), Imm32(ToInt32(comp->right()))); |
|
166 else |
|
167 masm.ma_cmp(ToRegister(comp->left()), ToOperand(comp->right())); |
|
168 emitBranch(cond, comp->ifTrue(), comp->ifFalse()); |
|
169 return true; |
|
170 |
|
171 } |
|
172 |
|
173 bool |
|
174 CodeGeneratorARM::generateOutOfLineCode() |
|
175 { |
|
176 if (!CodeGeneratorShared::generateOutOfLineCode()) |
|
177 return false; |
|
178 |
|
179 if (deoptLabel_.used()) { |
|
180 // All non-table-based bailouts will go here. |
|
181 masm.bind(&deoptLabel_); |
|
182 |
|
183 // Push the frame size, so the handler can recover the IonScript. |
|
184 masm.ma_mov(Imm32(frameSize()), lr); |
|
185 |
|
186 JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler(); |
|
187 masm.branch(handler); |
|
188 } |
|
189 |
|
190 return true; |
|
191 } |
|
192 |
|
193 bool |
|
194 CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot) |
|
195 { |
|
196 CompileInfo &info = snapshot->mir()->block()->info(); |
|
197 switch (info.executionMode()) { |
|
198 |
|
199 case ParallelExecution: { |
|
200 // in parallel mode, make no attempt to recover, just signal an error. |
|
201 OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported, |
|
202 snapshot->mir()->block(), |
|
203 snapshot->mir()->pc()); |
|
204 masm.ma_b(ool->entry(), condition); |
|
205 return true; |
|
206 } |
|
207 case SequentialExecution: |
|
208 break; |
|
209 default: |
|
210 MOZ_ASSUME_UNREACHABLE("No such execution mode"); |
|
211 } |
|
212 if (!encode(snapshot)) |
|
213 return false; |
|
214 |
|
215 // Though the assembler doesn't track all frame pushes, at least make sure |
|
216 // the known value makes sense. We can't use bailout tables if the stack |
|
217 // isn't properly aligned to the static frame size. |
|
218 JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(), |
|
219 frameClass_.frameSize() == masm.framePushed()); |
|
220 |
|
221 if (assignBailoutId(snapshot)) { |
|
222 uint8_t *code = deoptTable_->raw() + snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE; |
|
223 masm.ma_b(code, Relocation::HARDCODED, condition); |
|
224 return true; |
|
225 } |
|
226 |
|
227 // We could not use a jump table, either because all bailout IDs were |
|
228 // reserved, or a jump table is not optimal for this frame size or |
|
229 // platform. Whatever, we will generate a lazy bailout. |
|
230 OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed()); |
|
231 if (!addOutOfLineCode(ool)) |
|
232 return false; |
|
233 |
|
234 masm.ma_b(ool->entry(), condition); |
|
235 |
|
236 return true; |
|
237 } |
|
238 bool |
|
239 CodeGeneratorARM::bailoutFrom(Label *label, LSnapshot *snapshot) |
|
240 { |
|
241 if (masm.bailed()) |
|
242 return false; |
|
243 JS_ASSERT(label->used()); |
|
244 JS_ASSERT(!label->bound()); |
|
245 |
|
246 CompileInfo &info = snapshot->mir()->block()->info(); |
|
247 switch (info.executionMode()) { |
|
248 |
|
249 case ParallelExecution: { |
|
250 // in parallel mode, make no attempt to recover, just signal an error. |
|
251 OutOfLineAbortPar *ool = oolAbortPar(ParallelBailoutUnsupported, |
|
252 snapshot->mir()->block(), |
|
253 snapshot->mir()->pc()); |
|
254 masm.retarget(label, ool->entry()); |
|
255 return true; |
|
256 } |
|
257 case SequentialExecution: |
|
258 break; |
|
259 default: |
|
260 MOZ_ASSUME_UNREACHABLE("No such execution mode"); |
|
261 } |
|
262 |
|
263 if (!encode(snapshot)) |
|
264 return false; |
|
265 |
|
266 // Though the assembler doesn't track all frame pushes, at least make sure |
|
267 // the known value makes sense. We can't use bailout tables if the stack |
|
268 // isn't properly aligned to the static frame size. |
|
269 JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(), |
|
270 frameClass_.frameSize() == masm.framePushed()); |
|
271 |
|
272 // On ARM we don't use a bailout table. |
|
273 OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed()); |
|
274 if (!addOutOfLineCode(ool)) { |
|
275 return false; |
|
276 } |
|
277 |
|
278 masm.retarget(label, ool->entry()); |
|
279 |
|
280 return true; |
|
281 } |
|
282 |
|
283 bool |
|
284 CodeGeneratorARM::bailout(LSnapshot *snapshot) |
|
285 { |
|
286 Label label; |
|
287 masm.ma_b(&label); |
|
288 return bailoutFrom(&label, snapshot); |
|
289 } |
|
290 |
|
291 bool |
|
292 CodeGeneratorARM::visitOutOfLineBailout(OutOfLineBailout *ool) |
|
293 { |
|
294 masm.ma_mov(Imm32(ool->snapshot()->snapshotOffset()), ScratchRegister); |
|
295 masm.ma_push(ScratchRegister); // BailoutStack::padding_ |
|
296 masm.ma_push(ScratchRegister); // BailoutStack::snapshotOffset_ |
|
297 masm.ma_b(&deoptLabel_); |
|
298 return true; |
|
299 } |
|
300 |
|
301 bool |
|
302 CodeGeneratorARM::visitMinMaxD(LMinMaxD *ins) |
|
303 { |
|
304 FloatRegister first = ToFloatRegister(ins->first()); |
|
305 FloatRegister second = ToFloatRegister(ins->second()); |
|
306 FloatRegister output = ToFloatRegister(ins->output()); |
|
307 |
|
308 JS_ASSERT(first == output); |
|
309 |
|
310 Assembler::Condition cond = ins->mir()->isMax() |
|
311 ? Assembler::VFP_LessThanOrEqual |
|
312 : Assembler::VFP_GreaterThanOrEqual; |
|
313 Label nan, equal, returnSecond, done; |
|
314 |
|
315 masm.compareDouble(first, second); |
|
316 masm.ma_b(&nan, Assembler::VFP_Unordered); // first or second is NaN, result is NaN. |
|
317 masm.ma_b(&equal, Assembler::VFP_Equal); // make sure we handle -0 and 0 right. |
|
318 masm.ma_b(&returnSecond, cond); |
|
319 masm.ma_b(&done); |
|
320 |
|
321 // Check for zero. |
|
322 masm.bind(&equal); |
|
323 masm.compareDouble(first, InvalidFloatReg); |
|
324 masm.ma_b(&done, Assembler::VFP_NotEqualOrUnordered); // first wasn't 0 or -0, so just return it. |
|
325 // So now both operands are either -0 or 0. |
|
326 if (ins->mir()->isMax()) { |
|
327 masm.ma_vadd(second, first, first); // -0 + -0 = -0 and -0 + 0 = 0. |
|
328 } else { |
|
329 masm.ma_vneg(first, first); |
|
330 masm.ma_vsub(first, second, first); |
|
331 masm.ma_vneg(first, first); |
|
332 } |
|
333 masm.ma_b(&done); |
|
334 |
|
335 masm.bind(&nan); |
|
336 masm.loadConstantDouble(GenericNaN(), output); |
|
337 masm.ma_b(&done); |
|
338 |
|
339 masm.bind(&returnSecond); |
|
340 masm.ma_vmov(second, output); |
|
341 |
|
342 masm.bind(&done); |
|
343 return true; |
|
344 } |
|
345 |
|
346 bool |
|
347 CodeGeneratorARM::visitAbsD(LAbsD *ins) |
|
348 { |
|
349 FloatRegister input = ToFloatRegister(ins->input()); |
|
350 JS_ASSERT(input == ToFloatRegister(ins->output())); |
|
351 masm.ma_vabs(input, input); |
|
352 return true; |
|
353 } |
|
354 |
|
355 bool |
|
356 CodeGeneratorARM::visitAbsF(LAbsF *ins) |
|
357 { |
|
358 FloatRegister input = ToFloatRegister(ins->input()); |
|
359 JS_ASSERT(input == ToFloatRegister(ins->output())); |
|
360 masm.ma_vabs_f32(input, input); |
|
361 return true; |
|
362 } |
|
363 |
|
364 bool |
|
365 CodeGeneratorARM::visitSqrtD(LSqrtD *ins) |
|
366 { |
|
367 FloatRegister input = ToFloatRegister(ins->input()); |
|
368 FloatRegister output = ToFloatRegister(ins->output()); |
|
369 masm.ma_vsqrt(input, output); |
|
370 return true; |
|
371 } |
|
372 |
|
373 bool |
|
374 CodeGeneratorARM::visitSqrtF(LSqrtF *ins) |
|
375 { |
|
376 FloatRegister input = ToFloatRegister(ins->input()); |
|
377 FloatRegister output = ToFloatRegister(ins->output()); |
|
378 masm.ma_vsqrt_f32(input, output); |
|
379 return true; |
|
380 } |
|
381 |
|
382 bool |
|
383 CodeGeneratorARM::visitAddI(LAddI *ins) |
|
384 { |
|
385 const LAllocation *lhs = ins->getOperand(0); |
|
386 const LAllocation *rhs = ins->getOperand(1); |
|
387 const LDefinition *dest = ins->getDef(0); |
|
388 |
|
389 if (rhs->isConstant()) |
|
390 masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond); |
|
391 else |
|
392 masm.ma_add(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond); |
|
393 |
|
394 if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot())) |
|
395 return false; |
|
396 |
|
397 return true; |
|
398 } |
|
399 |
|
400 bool |
|
401 CodeGeneratorARM::visitSubI(LSubI *ins) |
|
402 { |
|
403 const LAllocation *lhs = ins->getOperand(0); |
|
404 const LAllocation *rhs = ins->getOperand(1); |
|
405 const LDefinition *dest = ins->getDef(0); |
|
406 |
|
407 if (rhs->isConstant()) |
|
408 masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCond); |
|
409 else |
|
410 masm.ma_sub(ToRegister(lhs), ToOperand(rhs), ToRegister(dest), SetCond); |
|
411 |
|
412 if (ins->snapshot() && !bailoutIf(Assembler::Overflow, ins->snapshot())) |
|
413 return false; |
|
414 return true; |
|
415 } |
|
416 |
|
417 bool |
|
418 CodeGeneratorARM::visitMulI(LMulI *ins) |
|
419 { |
|
420 const LAllocation *lhs = ins->getOperand(0); |
|
421 const LAllocation *rhs = ins->getOperand(1); |
|
422 const LDefinition *dest = ins->getDef(0); |
|
423 MMul *mul = ins->mir(); |
|
424 JS_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow()); |
|
425 |
|
426 if (rhs->isConstant()) { |
|
427 // Bailout when this condition is met. |
|
428 Assembler::Condition c = Assembler::Overflow; |
|
429 // Bailout on -0.0 |
|
430 int32_t constant = ToInt32(rhs); |
|
431 if (mul->canBeNegativeZero() && constant <= 0) { |
|
432 Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal; |
|
433 masm.ma_cmp(ToRegister(lhs), Imm32(0)); |
|
434 if (!bailoutIf(bailoutCond, ins->snapshot())) |
|
435 return false; |
|
436 } |
|
437 // TODO: move these to ma_mul. |
|
438 switch (constant) { |
|
439 case -1: |
|
440 masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCond); |
|
441 break; |
|
442 case 0: |
|
443 masm.ma_mov(Imm32(0), ToRegister(dest)); |
|
444 return true; // escape overflow check; |
|
445 case 1: |
|
446 // nop |
|
447 masm.ma_mov(ToRegister(lhs), ToRegister(dest)); |
|
448 return true; // escape overflow check; |
|
449 case 2: |
|
450 masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCond); |
|
451 // Overflow is handled later. |
|
452 break; |
|
453 default: { |
|
454 bool handled = false; |
|
455 if (constant > 0) { |
|
456 // Try shift and add sequences for a positive constant. |
|
457 if (!mul->canOverflow()) { |
|
458 // If it cannot overflow, we can do lots of optimizations |
|
459 Register src = ToRegister(lhs); |
|
460 uint32_t shift = FloorLog2(constant); |
|
461 uint32_t rest = constant - (1 << shift); |
|
462 // See if the constant has one bit set, meaning it can be encoded as a bitshift |
|
463 if ((1 << shift) == constant) { |
|
464 masm.ma_lsl(Imm32(shift), src, ToRegister(dest)); |
|
465 handled = true; |
|
466 } else { |
|
467 // If the constant cannot be encoded as (1<<C1), see if it can be encoded as |
|
468 // (1<<C1) | (1<<C2), which can be computed using an add and a shift |
|
469 uint32_t shift_rest = FloorLog2(rest); |
|
470 if ((1u << shift_rest) == rest) { |
|
471 masm.as_add(ToRegister(dest), src, lsl(src, shift-shift_rest)); |
|
472 if (shift_rest != 0) |
|
473 masm.ma_lsl(Imm32(shift_rest), ToRegister(dest), ToRegister(dest)); |
|
474 handled = true; |
|
475 } |
|
476 } |
|
477 } else if (ToRegister(lhs) != ToRegister(dest)) { |
|
478 // To stay on the safe side, only optimize things that are a |
|
479 // power of 2. |
|
480 |
|
481 uint32_t shift = FloorLog2(constant); |
|
482 if ((1 << shift) == constant) { |
|
483 // dest = lhs * pow(2,shift) |
|
484 masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest)); |
|
485 // At runtime, check (lhs == dest >> shift), if this does not hold, |
|
486 // some bits were lost due to overflow, and the computation should |
|
487 // be resumed as a double. |
|
488 masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift)); |
|
489 c = Assembler::NotEqual; |
|
490 handled = true; |
|
491 } |
|
492 } |
|
493 } |
|
494 |
|
495 if (!handled) { |
|
496 if (mul->canOverflow()) |
|
497 c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c); |
|
498 else |
|
499 masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest)); |
|
500 } |
|
501 } |
|
502 } |
|
503 // Bailout on overflow |
|
504 if (mul->canOverflow() && !bailoutIf(c, ins->snapshot())) |
|
505 return false; |
|
506 } else { |
|
507 Assembler::Condition c = Assembler::Overflow; |
|
508 |
|
509 //masm.imull(ToOperand(rhs), ToRegister(lhs)); |
|
510 if (mul->canOverflow()) |
|
511 c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c); |
|
512 else |
|
513 masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest)); |
|
514 |
|
515 // Bailout on overflow |
|
516 if (mul->canOverflow() && !bailoutIf(c, ins->snapshot())) |
|
517 return false; |
|
518 |
|
519 if (mul->canBeNegativeZero()) { |
|
520 Label done; |
|
521 masm.ma_cmp(ToRegister(dest), Imm32(0)); |
|
522 masm.ma_b(&done, Assembler::NotEqual); |
|
523 |
|
524 // Result is -0 if lhs or rhs is negative. |
|
525 masm.ma_cmn(ToRegister(lhs), ToRegister(rhs)); |
|
526 if (!bailoutIf(Assembler::Signed, ins->snapshot())) |
|
527 return false; |
|
528 |
|
529 masm.bind(&done); |
|
530 } |
|
531 } |
|
532 |
|
533 return true; |
|
534 } |
|
535 |
|
536 bool |
|
537 CodeGeneratorARM::divICommon(MDiv *mir, Register lhs, Register rhs, Register output, |
|
538 LSnapshot *snapshot, Label &done) |
|
539 { |
|
540 if (mir->canBeNegativeOverflow()) { |
|
541 // Handle INT32_MIN / -1; |
|
542 // The integer division will give INT32_MIN, but we want -(double)INT32_MIN. |
|
543 masm.ma_cmp(lhs, Imm32(INT32_MIN)); // sets EQ if lhs == INT32_MIN |
|
544 masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT32_MIN), sets EQ if rhs == -1 |
|
545 if (mir->canTruncateOverflow()) { |
|
546 // (-INT32_MIN)|0 = INT32_MIN |
|
547 Label skip; |
|
548 masm.ma_b(&skip, Assembler::NotEqual); |
|
549 masm.ma_mov(Imm32(INT32_MIN), output); |
|
550 masm.ma_b(&done); |
|
551 masm.bind(&skip); |
|
552 } else { |
|
553 JS_ASSERT(mir->fallible()); |
|
554 if (!bailoutIf(Assembler::Equal, snapshot)) |
|
555 return false; |
|
556 } |
|
557 } |
|
558 |
|
559 // Handle divide by zero. |
|
560 if (mir->canBeDivideByZero()) { |
|
561 masm.ma_cmp(rhs, Imm32(0)); |
|
562 if (mir->canTruncateInfinities()) { |
|
563 // Infinity|0 == 0 |
|
564 Label skip; |
|
565 masm.ma_b(&skip, Assembler::NotEqual); |
|
566 masm.ma_mov(Imm32(0), output); |
|
567 masm.ma_b(&done); |
|
568 masm.bind(&skip); |
|
569 } else { |
|
570 JS_ASSERT(mir->fallible()); |
|
571 if (!bailoutIf(Assembler::Equal, snapshot)) |
|
572 return false; |
|
573 } |
|
574 } |
|
575 |
|
576 // Handle negative 0. |
|
577 if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) { |
|
578 Label nonzero; |
|
579 masm.ma_cmp(lhs, Imm32(0)); |
|
580 masm.ma_b(&nonzero, Assembler::NotEqual); |
|
581 masm.ma_cmp(rhs, Imm32(0)); |
|
582 JS_ASSERT(mir->fallible()); |
|
583 if (!bailoutIf(Assembler::LessThan, snapshot)) |
|
584 return false; |
|
585 masm.bind(&nonzero); |
|
586 } |
|
587 |
|
588 return true; |
|
589 } |
|
590 |
|
591 bool |
|
592 CodeGeneratorARM::visitDivI(LDivI *ins) |
|
593 { |
|
594 // Extract the registers from this instruction |
|
595 Register lhs = ToRegister(ins->lhs()); |
|
596 Register rhs = ToRegister(ins->rhs()); |
|
597 Register temp = ToRegister(ins->getTemp(0)); |
|
598 Register output = ToRegister(ins->output()); |
|
599 MDiv *mir = ins->mir(); |
|
600 |
|
601 Label done; |
|
602 if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done)) |
|
603 return false; |
|
604 |
|
605 if (mir->canTruncateRemainder()) { |
|
606 masm.ma_sdiv(lhs, rhs, output); |
|
607 } else { |
|
608 masm.ma_sdiv(lhs, rhs, ScratchRegister); |
|
609 masm.ma_mul(ScratchRegister, rhs, temp); |
|
610 masm.ma_cmp(lhs, temp); |
|
611 if (!bailoutIf(Assembler::NotEqual, ins->snapshot())) |
|
612 return false; |
|
613 masm.ma_mov(ScratchRegister, output); |
|
614 } |
|
615 |
|
616 masm.bind(&done); |
|
617 |
|
618 return true; |
|
619 } |
|
620 |
|
621 extern "C" { |
|
622 extern int64_t __aeabi_idivmod(int,int); |
|
623 extern int64_t __aeabi_uidivmod(int,int); |
|
624 } |
|
625 |
|
626 bool |
|
627 CodeGeneratorARM::visitSoftDivI(LSoftDivI *ins) |
|
628 { |
|
629 // Extract the registers from this instruction |
|
630 Register lhs = ToRegister(ins->lhs()); |
|
631 Register rhs = ToRegister(ins->rhs()); |
|
632 Register output = ToRegister(ins->output()); |
|
633 MDiv *mir = ins->mir(); |
|
634 |
|
635 Label done; |
|
636 if (!divICommon(mir, lhs, rhs, output, ins->snapshot(), done)) |
|
637 return false; |
|
638 |
|
639 masm.setupAlignedABICall(2); |
|
640 masm.passABIArg(lhs); |
|
641 masm.passABIArg(rhs); |
|
642 if (gen->compilingAsmJS()) |
|
643 masm.callWithABI(AsmJSImm_aeabi_idivmod); |
|
644 else |
|
645 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod)); |
|
646 // idivmod returns the quotient in r0, and the remainder in r1. |
|
647 if (!mir->canTruncateRemainder()) { |
|
648 JS_ASSERT(mir->fallible()); |
|
649 masm.ma_cmp(r1, Imm32(0)); |
|
650 if (!bailoutIf(Assembler::NonZero, ins->snapshot())) |
|
651 return false; |
|
652 } |
|
653 |
|
654 masm.bind(&done); |
|
655 |
|
656 return true; |
|
657 } |
|
658 |
|
659 bool |
|
660 CodeGeneratorARM::visitDivPowTwoI(LDivPowTwoI *ins) |
|
661 { |
|
662 Register lhs = ToRegister(ins->numerator()); |
|
663 Register output = ToRegister(ins->output()); |
|
664 int32_t shift = ins->shift(); |
|
665 |
|
666 if (shift != 0) { |
|
667 MDiv *mir = ins->mir(); |
|
668 if (!mir->isTruncated()) { |
|
669 // If the remainder is != 0, bailout since this must be a double. |
|
670 masm.as_mov(ScratchRegister, lsl(lhs, 32 - shift), SetCond); |
|
671 if (!bailoutIf(Assembler::NonZero, ins->snapshot())) |
|
672 return false; |
|
673 } |
|
674 |
|
675 if (!mir->canBeNegativeDividend()) { |
|
676 // Numerator is unsigned, so needs no adjusting. Do the shift. |
|
677 masm.as_mov(output, asr(lhs, shift)); |
|
678 return true; |
|
679 } |
|
680 |
|
681 // Adjust the value so that shifting produces a correctly rounded result |
|
682 // when the numerator is negative. See 10-1 "Signed Division by a Known |
|
683 // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight. |
|
684 if (shift > 1) { |
|
685 masm.as_mov(ScratchRegister, asr(lhs, 31)); |
|
686 masm.as_add(ScratchRegister, lhs, lsr(ScratchRegister, 32 - shift)); |
|
687 } else |
|
688 masm.as_add(ScratchRegister, lhs, lsr(lhs, 32 - shift)); |
|
689 |
|
690 // Do the shift. |
|
691 masm.as_mov(output, asr(ScratchRegister, shift)); |
|
692 } else { |
|
693 masm.ma_mov(lhs, output); |
|
694 } |
|
695 |
|
696 return true; |
|
697 } |
|
698 |
|
699 bool |
|
700 CodeGeneratorARM::modICommon(MMod *mir, Register lhs, Register rhs, Register output, |
|
701 LSnapshot *snapshot, Label &done) |
|
702 { |
|
703 // 0/X (with X < 0) is bad because both of these values *should* be doubles, and |
|
704 // the result should be -0.0, which cannot be represented in integers. |
|
705 // X/0 is bad because it will give garbage (or abort), when it should give |
|
706 // either \infty, -\infty or NAN. |
|
707 |
|
708 // Prevent 0 / X (with X < 0) and X / 0 |
|
709 // testing X / Y. Compare Y with 0. |
|
710 // There are three cases: (Y < 0), (Y == 0) and (Y > 0) |
|
711 // If (Y < 0), then we compare X with 0, and bail if X == 0 |
|
712 // If (Y == 0), then we simply want to bail. Since this does not set |
|
713 // the flags necessary for LT to trigger, we don't test X, and take the |
|
714 // bailout because the EQ flag is set. |
|
715 // if (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take the bailout. |
|
716 if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) { |
|
717 masm.ma_cmp(rhs, Imm32(0)); |
|
718 masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan); |
|
719 if (mir->isTruncated()) { |
|
720 // NaN|0 == 0 and (0 % -X)|0 == 0 |
|
721 Label skip; |
|
722 masm.ma_b(&skip, Assembler::NotEqual); |
|
723 masm.ma_mov(Imm32(0), output); |
|
724 masm.ma_b(&done); |
|
725 masm.bind(&skip); |
|
726 } else { |
|
727 JS_ASSERT(mir->fallible()); |
|
728 if (!bailoutIf(Assembler::Equal, snapshot)) |
|
729 return false; |
|
730 } |
|
731 } |
|
732 |
|
733 return true; |
|
734 } |
|
735 |
|
736 bool |
|
737 CodeGeneratorARM::visitModI(LModI *ins) |
|
738 { |
|
739 Register lhs = ToRegister(ins->lhs()); |
|
740 Register rhs = ToRegister(ins->rhs()); |
|
741 Register output = ToRegister(ins->output()); |
|
742 Register callTemp = ToRegister(ins->callTemp()); |
|
743 MMod *mir = ins->mir(); |
|
744 |
|
745 // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. |
|
746 masm.ma_mov(lhs, callTemp); |
|
747 |
|
748 Label done; |
|
749 if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done)) |
|
750 return false; |
|
751 |
|
752 masm.ma_smod(lhs, rhs, output); |
|
753 |
|
754 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0 |
|
755 if (mir->canBeNegativeDividend()) { |
|
756 if (mir->isTruncated()) { |
|
757 // -0.0|0 == 0 |
|
758 } else { |
|
759 JS_ASSERT(mir->fallible()); |
|
760 // See if X < 0 |
|
761 masm.ma_cmp(output, Imm32(0)); |
|
762 masm.ma_b(&done, Assembler::NotEqual); |
|
763 masm.ma_cmp(callTemp, Imm32(0)); |
|
764 if (!bailoutIf(Assembler::Signed, ins->snapshot())) |
|
765 return false; |
|
766 } |
|
767 } |
|
768 |
|
769 masm.bind(&done); |
|
770 return true; |
|
771 } |
|
772 |
|
773 bool |
|
774 CodeGeneratorARM::visitSoftModI(LSoftModI *ins) |
|
775 { |
|
776 // Extract the registers from this instruction |
|
777 Register lhs = ToRegister(ins->lhs()); |
|
778 Register rhs = ToRegister(ins->rhs()); |
|
779 Register output = ToRegister(ins->output()); |
|
780 Register callTemp = ToRegister(ins->callTemp()); |
|
781 MMod *mir = ins->mir(); |
|
782 Label done; |
|
783 |
|
784 // save the lhs in case we end up with a 0 that should be a -0.0 because lhs < 0. |
|
785 JS_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code()); |
|
786 masm.ma_mov(lhs, callTemp); |
|
787 |
|
788 // Prevent INT_MIN % -1; |
|
789 // The integer division will give INT_MIN, but we want -(double)INT_MIN. |
|
790 if (mir->canBeNegativeDividend()) { |
|
791 masm.ma_cmp(lhs, Imm32(INT_MIN)); // sets EQ if lhs == INT_MIN |
|
792 masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal); // if EQ (LHS == INT_MIN), sets EQ if rhs == -1 |
|
793 if (mir->isTruncated()) { |
|
794 // (INT_MIN % -1)|0 == 0 |
|
795 Label skip; |
|
796 masm.ma_b(&skip, Assembler::NotEqual); |
|
797 masm.ma_mov(Imm32(0), output); |
|
798 masm.ma_b(&done); |
|
799 masm.bind(&skip); |
|
800 } else { |
|
801 JS_ASSERT(mir->fallible()); |
|
802 if (!bailoutIf(Assembler::Equal, ins->snapshot())) |
|
803 return false; |
|
804 } |
|
805 } |
|
806 |
|
807 if (!modICommon(mir, lhs, rhs, output, ins->snapshot(), done)) |
|
808 return false; |
|
809 |
|
810 masm.setupAlignedABICall(2); |
|
811 masm.passABIArg(lhs); |
|
812 masm.passABIArg(rhs); |
|
813 if (gen->compilingAsmJS()) |
|
814 masm.callWithABI(AsmJSImm_aeabi_idivmod); |
|
815 else |
|
816 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_idivmod)); |
|
817 |
|
818 // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0 |
|
819 if (mir->canBeNegativeDividend()) { |
|
820 if (mir->isTruncated()) { |
|
821 // -0.0|0 == 0 |
|
822 } else { |
|
823 JS_ASSERT(mir->fallible()); |
|
824 // See if X < 0 |
|
825 masm.ma_cmp(r1, Imm32(0)); |
|
826 masm.ma_b(&done, Assembler::NotEqual); |
|
827 masm.ma_cmp(callTemp, Imm32(0)); |
|
828 if (!bailoutIf(Assembler::Signed, ins->snapshot())) |
|
829 return false; |
|
830 } |
|
831 } |
|
832 masm.bind(&done); |
|
833 return true; |
|
834 } |
|
835 |
|
836 bool |
|
837 CodeGeneratorARM::visitModPowTwoI(LModPowTwoI *ins) |
|
838 { |
|
839 Register in = ToRegister(ins->getOperand(0)); |
|
840 Register out = ToRegister(ins->getDef(0)); |
|
841 MMod *mir = ins->mir(); |
|
842 Label fin; |
|
843 // bug 739870, jbramley has a different sequence that may help with speed here |
|
844 masm.ma_mov(in, out, SetCond); |
|
845 masm.ma_b(&fin, Assembler::Zero); |
|
846 masm.ma_rsb(Imm32(0), out, NoSetCond, Assembler::Signed); |
|
847 masm.ma_and(Imm32((1<<ins->shift())-1), out); |
|
848 masm.ma_rsb(Imm32(0), out, SetCond, Assembler::Signed); |
|
849 if (mir->canBeNegativeDividend()) { |
|
850 if (!mir->isTruncated()) { |
|
851 JS_ASSERT(mir->fallible()); |
|
852 if (!bailoutIf(Assembler::Zero, ins->snapshot())) |
|
853 return false; |
|
854 } else { |
|
855 // -0|0 == 0 |
|
856 } |
|
857 } |
|
858 masm.bind(&fin); |
|
859 return true; |
|
860 } |
|
861 |
|
862 bool |
|
863 CodeGeneratorARM::visitModMaskI(LModMaskI *ins) |
|
864 { |
|
865 Register src = ToRegister(ins->getOperand(0)); |
|
866 Register dest = ToRegister(ins->getDef(0)); |
|
867 Register tmp1 = ToRegister(ins->getTemp(0)); |
|
868 Register tmp2 = ToRegister(ins->getTemp(1)); |
|
869 MMod *mir = ins->mir(); |
|
870 masm.ma_mod_mask(src, dest, tmp1, tmp2, ins->shift()); |
|
871 if (mir->canBeNegativeDividend()) { |
|
872 if (!mir->isTruncated()) { |
|
873 JS_ASSERT(mir->fallible()); |
|
874 if (!bailoutIf(Assembler::Zero, ins->snapshot())) |
|
875 return false; |
|
876 } else { |
|
877 // -0|0 == 0 |
|
878 } |
|
879 } |
|
880 return true; |
|
881 } |
|
882 bool |
|
883 CodeGeneratorARM::visitBitNotI(LBitNotI *ins) |
|
884 { |
|
885 const LAllocation *input = ins->getOperand(0); |
|
886 const LDefinition *dest = ins->getDef(0); |
|
887 // this will not actually be true on arm. |
|
888 // We can not an imm8m in order to get a wider range |
|
889 // of numbers |
|
890 JS_ASSERT(!input->isConstant()); |
|
891 |
|
892 masm.ma_mvn(ToRegister(input), ToRegister(dest)); |
|
893 return true; |
|
894 } |
|
895 |
|
896 bool |
|
897 CodeGeneratorARM::visitBitOpI(LBitOpI *ins) |
|
898 { |
|
899 const LAllocation *lhs = ins->getOperand(0); |
|
900 const LAllocation *rhs = ins->getOperand(1); |
|
901 const LDefinition *dest = ins->getDef(0); |
|
902 // all of these bitops should be either imm32's, or integer registers. |
|
903 switch (ins->bitop()) { |
|
904 case JSOP_BITOR: |
|
905 if (rhs->isConstant()) |
|
906 masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest)); |
|
907 else |
|
908 masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest)); |
|
909 break; |
|
910 case JSOP_BITXOR: |
|
911 if (rhs->isConstant()) |
|
912 masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest)); |
|
913 else |
|
914 masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest)); |
|
915 break; |
|
916 case JSOP_BITAND: |
|
917 if (rhs->isConstant()) |
|
918 masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest)); |
|
919 else |
|
920 masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest)); |
|
921 break; |
|
922 default: |
|
923 MOZ_ASSUME_UNREACHABLE("unexpected binary opcode"); |
|
924 } |
|
925 |
|
926 return true; |
|
927 } |
|
928 |
|
929 bool |
|
930 CodeGeneratorARM::visitShiftI(LShiftI *ins) |
|
931 { |
|
932 Register lhs = ToRegister(ins->lhs()); |
|
933 const LAllocation *rhs = ins->rhs(); |
|
934 Register dest = ToRegister(ins->output()); |
|
935 |
|
936 if (rhs->isConstant()) { |
|
937 int32_t shift = ToInt32(rhs) & 0x1F; |
|
938 switch (ins->bitop()) { |
|
939 case JSOP_LSH: |
|
940 if (shift) |
|
941 masm.ma_lsl(Imm32(shift), lhs, dest); |
|
942 else |
|
943 masm.ma_mov(lhs, dest); |
|
944 break; |
|
945 case JSOP_RSH: |
|
946 if (shift) |
|
947 masm.ma_asr(Imm32(shift), lhs, dest); |
|
948 else |
|
949 masm.ma_mov(lhs, dest); |
|
950 break; |
|
951 case JSOP_URSH: |
|
952 if (shift) { |
|
953 masm.ma_lsr(Imm32(shift), lhs, dest); |
|
954 } else { |
|
955 // x >>> 0 can overflow. |
|
956 masm.ma_mov(lhs, dest); |
|
957 if (ins->mir()->toUrsh()->fallible()) { |
|
958 masm.ma_cmp(dest, Imm32(0)); |
|
959 if (!bailoutIf(Assembler::LessThan, ins->snapshot())) |
|
960 return false; |
|
961 } |
|
962 } |
|
963 break; |
|
964 default: |
|
965 MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); |
|
966 } |
|
967 } else { |
|
968 // The shift amounts should be AND'ed into the 0-31 range since arm |
|
969 // shifts by the lower byte of the register (it will attempt to shift |
|
970 // by 250 if you ask it to). |
|
971 masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest); |
|
972 |
|
973 switch (ins->bitop()) { |
|
974 case JSOP_LSH: |
|
975 masm.ma_lsl(dest, lhs, dest); |
|
976 break; |
|
977 case JSOP_RSH: |
|
978 masm.ma_asr(dest, lhs, dest); |
|
979 break; |
|
980 case JSOP_URSH: |
|
981 masm.ma_lsr(dest, lhs, dest); |
|
982 if (ins->mir()->toUrsh()->fallible()) { |
|
983 // x >>> 0 can overflow. |
|
984 masm.ma_cmp(dest, Imm32(0)); |
|
985 if (!bailoutIf(Assembler::LessThan, ins->snapshot())) |
|
986 return false; |
|
987 } |
|
988 break; |
|
989 default: |
|
990 MOZ_ASSUME_UNREACHABLE("Unexpected shift op"); |
|
991 } |
|
992 } |
|
993 |
|
994 return true; |
|
995 } |
|
996 |
|
997 bool |
|
998 CodeGeneratorARM::visitUrshD(LUrshD *ins) |
|
999 { |
|
1000 Register lhs = ToRegister(ins->lhs()); |
|
1001 Register temp = ToRegister(ins->temp()); |
|
1002 |
|
1003 const LAllocation *rhs = ins->rhs(); |
|
1004 FloatRegister out = ToFloatRegister(ins->output()); |
|
1005 |
|
1006 if (rhs->isConstant()) { |
|
1007 int32_t shift = ToInt32(rhs) & 0x1F; |
|
1008 if (shift) |
|
1009 masm.ma_lsr(Imm32(shift), lhs, temp); |
|
1010 else |
|
1011 masm.ma_mov(lhs, temp); |
|
1012 } else { |
|
1013 masm.ma_and(Imm32(0x1F), ToRegister(rhs), temp); |
|
1014 masm.ma_lsr(temp, lhs, temp); |
|
1015 } |
|
1016 |
|
1017 masm.convertUInt32ToDouble(temp, out); |
|
1018 return true; |
|
1019 } |
|
1020 |
|
1021 bool |
|
1022 CodeGeneratorARM::visitPowHalfD(LPowHalfD *ins) |
|
1023 { |
|
1024 FloatRegister input = ToFloatRegister(ins->input()); |
|
1025 FloatRegister output = ToFloatRegister(ins->output()); |
|
1026 |
|
1027 Label done; |
|
1028 |
|
1029 // Masm.pow(-Infinity, 0.5) == Infinity. |
|
1030 masm.ma_vimm(NegativeInfinity<double>(), ScratchFloatReg); |
|
1031 masm.compareDouble(input, ScratchFloatReg); |
|
1032 masm.ma_vneg(ScratchFloatReg, output, Assembler::Equal); |
|
1033 masm.ma_b(&done, Assembler::Equal); |
|
1034 |
|
1035 // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0. |
|
1036 masm.ma_vimm(0.0, ScratchFloatReg); |
|
1037 masm.ma_vadd(ScratchFloatReg, input, output); |
|
1038 masm.ma_vsqrt(output, output); |
|
1039 |
|
1040 masm.bind(&done); |
|
1041 return true; |
|
1042 } |
|
1043 |
|
1044 MoveOperand |
|
1045 CodeGeneratorARM::toMoveOperand(const LAllocation *a) const |
|
1046 { |
|
1047 if (a->isGeneralReg()) |
|
1048 return MoveOperand(ToRegister(a)); |
|
1049 if (a->isFloatReg()) |
|
1050 return MoveOperand(ToFloatRegister(a)); |
|
1051 JS_ASSERT((ToStackOffset(a) & 3) == 0); |
|
1052 int32_t offset = ToStackOffset(a); |
|
1053 |
|
1054 // The way the stack slots work, we assume that everything from depth == 0 downwards is writable |
|
1055 // however, since our frame is included in this, ensure that the frame gets skipped |
|
1056 if (gen->compilingAsmJS()) |
|
1057 offset -= AlignmentMidPrologue; |
|
1058 |
|
1059 return MoveOperand(StackPointer, offset); |
|
1060 } |
|
1061 |
|
1062 class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorARM> |
|
1063 { |
|
1064 MTableSwitch *mir_; |
|
1065 Vector<CodeLabel, 8, IonAllocPolicy> codeLabels_; |
|
1066 |
|
1067 bool accept(CodeGeneratorARM *codegen) { |
|
1068 return codegen->visitOutOfLineTableSwitch(this); |
|
1069 } |
|
1070 |
|
1071 public: |
|
1072 OutOfLineTableSwitch(TempAllocator &alloc, MTableSwitch *mir) |
|
1073 : mir_(mir), |
|
1074 codeLabels_(alloc) |
|
1075 {} |
|
1076 |
|
1077 MTableSwitch *mir() const { |
|
1078 return mir_; |
|
1079 } |
|
1080 |
|
1081 bool addCodeLabel(CodeLabel label) { |
|
1082 return codeLabels_.append(label); |
|
1083 } |
|
1084 CodeLabel codeLabel(unsigned i) { |
|
1085 return codeLabels_[i]; |
|
1086 } |
|
1087 }; |
|
1088 |
|
1089 bool |
|
1090 CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool) |
|
1091 { |
|
1092 MTableSwitch *mir = ool->mir(); |
|
1093 |
|
1094 size_t numCases = mir->numCases(); |
|
1095 for (size_t i = 0; i < numCases; i++) { |
|
1096 LBlock *caseblock = mir->getCase(numCases - 1 - i)->lir(); |
|
1097 Label *caseheader = caseblock->label(); |
|
1098 uint32_t caseoffset = caseheader->offset(); |
|
1099 |
|
1100 // The entries of the jump table need to be absolute addresses and thus |
|
1101 // must be patched after codegen is finished. |
|
1102 CodeLabel cl = ool->codeLabel(i); |
|
1103 cl.src()->bind(caseoffset); |
|
1104 if (!masm.addCodeLabel(cl)) |
|
1105 return false; |
|
1106 } |
|
1107 |
|
1108 return true; |
|
1109 } |
|
1110 |
|
1111 bool |
|
1112 CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index, |
|
1113 const Register &base) |
|
1114 { |
|
1115 // the code generated by this is utter hax. |
|
1116 // the end result looks something like: |
|
1117 // SUBS index, input, #base |
|
1118 // RSBSPL index, index, #max |
|
1119 // LDRPL pc, pc, index lsl 2 |
|
1120 // B default |
|
1121 |
|
1122 // If the range of targets in N through M, we first subtract off the lowest |
|
1123 // case (N), which both shifts the arguments into the range 0 to (M-N) with |
|
1124 // and sets the MInus flag if the argument was out of range on the low end. |
|
1125 |
|
1126 // Then we a reverse subtract with the size of the jump table, which will |
|
1127 // reverse the order of range (It is size through 0, rather than 0 through |
|
1128 // size). The main purpose of this is that we set the same flag as the lower |
|
1129 // bound check for the upper bound check. Lastly, we do this conditionally |
|
1130 // on the previous check succeeding. |
|
1131 |
|
1132 // Then we conditionally load the pc offset by the (reversed) index (times |
|
1133 // the address size) into the pc, which branches to the correct case. |
|
1134 // NOTE: when we go to read the pc, the value that we get back is the pc of |
|
1135 // the current instruction *PLUS 8*. This means that ldr foo, [pc, +0] |
|
1136 // reads $pc+8. In other words, there is an empty word after the branch into |
|
1137 // the switch table before the table actually starts. Since the only other |
|
1138 // unhandled case is the default case (both out of range high and out of range low) |
|
1139 // I then insert a branch to default case into the extra slot, which ensures |
|
1140 // we don't attempt to execute the address table. |
|
1141 Label *defaultcase = mir->getDefault()->lir()->label(); |
|
1142 |
|
1143 int32_t cases = mir->numCases(); |
|
1144 // Lower value with low value |
|
1145 masm.ma_sub(index, Imm32(mir->low()), index, SetCond); |
|
1146 masm.ma_rsb(index, Imm32(cases - 1), index, SetCond, Assembler::NotSigned); |
|
1147 AutoForbidPools afp(&masm); |
|
1148 masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset, Assembler::NotSigned); |
|
1149 masm.ma_b(defaultcase); |
|
1150 |
|
1151 // To fill in the CodeLabels for the case entries, we need to first |
|
1152 // generate the case entries (we don't yet know their offsets in the |
|
1153 // instruction stream). |
|
1154 OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir); |
|
1155 for (int32_t i = 0; i < cases; i++) { |
|
1156 CodeLabel cl; |
|
1157 masm.writeCodePointer(cl.dest()); |
|
1158 if (!ool->addCodeLabel(cl)) |
|
1159 return false; |
|
1160 } |
|
1161 if (!addOutOfLineCode(ool)) |
|
1162 return false; |
|
1163 |
|
1164 return true; |
|
1165 } |
|
1166 |
|
1167 bool |
|
1168 CodeGeneratorARM::visitMathD(LMathD *math) |
|
1169 { |
|
1170 const LAllocation *src1 = math->getOperand(0); |
|
1171 const LAllocation *src2 = math->getOperand(1); |
|
1172 const LDefinition *output = math->getDef(0); |
|
1173 |
|
1174 switch (math->jsop()) { |
|
1175 case JSOP_ADD: |
|
1176 masm.ma_vadd(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1177 break; |
|
1178 case JSOP_SUB: |
|
1179 masm.ma_vsub(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1180 break; |
|
1181 case JSOP_MUL: |
|
1182 masm.ma_vmul(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1183 break; |
|
1184 case JSOP_DIV: |
|
1185 masm.ma_vdiv(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1186 break; |
|
1187 default: |
|
1188 MOZ_ASSUME_UNREACHABLE("unexpected opcode"); |
|
1189 } |
|
1190 return true; |
|
1191 } |
|
1192 |
|
1193 bool |
|
1194 CodeGeneratorARM::visitMathF(LMathF *math) |
|
1195 { |
|
1196 const LAllocation *src1 = math->getOperand(0); |
|
1197 const LAllocation *src2 = math->getOperand(1); |
|
1198 const LDefinition *output = math->getDef(0); |
|
1199 |
|
1200 switch (math->jsop()) { |
|
1201 case JSOP_ADD: |
|
1202 masm.ma_vadd_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1203 break; |
|
1204 case JSOP_SUB: |
|
1205 masm.ma_vsub_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1206 break; |
|
1207 case JSOP_MUL: |
|
1208 masm.ma_vmul_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1209 break; |
|
1210 case JSOP_DIV: |
|
1211 masm.ma_vdiv_f32(ToFloatRegister(src1), ToFloatRegister(src2), ToFloatRegister(output)); |
|
1212 break; |
|
1213 default: |
|
1214 MOZ_ASSUME_UNREACHABLE("unexpected opcode"); |
|
1215 } |
|
1216 return true; |
|
1217 } |
|
1218 |
|
1219 bool |
|
1220 CodeGeneratorARM::visitFloor(LFloor *lir) |
|
1221 { |
|
1222 FloatRegister input = ToFloatRegister(lir->input()); |
|
1223 Register output = ToRegister(lir->output()); |
|
1224 Label bail; |
|
1225 masm.floor(input, output, &bail); |
|
1226 if (!bailoutFrom(&bail, lir->snapshot())) |
|
1227 return false; |
|
1228 return true; |
|
1229 } |
|
1230 |
|
1231 bool |
|
1232 CodeGeneratorARM::visitFloorF(LFloorF *lir) |
|
1233 { |
|
1234 FloatRegister input = ToFloatRegister(lir->input()); |
|
1235 Register output = ToRegister(lir->output()); |
|
1236 Label bail; |
|
1237 masm.floorf(input, output, &bail); |
|
1238 if (!bailoutFrom(&bail, lir->snapshot())) |
|
1239 return false; |
|
1240 return true; |
|
1241 } |
|
1242 |
|
1243 bool |
|
1244 CodeGeneratorARM::visitRound(LRound *lir) |
|
1245 { |
|
1246 FloatRegister input = ToFloatRegister(lir->input()); |
|
1247 Register output = ToRegister(lir->output()); |
|
1248 FloatRegister tmp = ToFloatRegister(lir->temp()); |
|
1249 Label bail; |
|
1250 // Output is either correct, or clamped. All -0 cases have been translated to a clamped |
|
1251 // case.a |
|
1252 masm.round(input, output, &bail, tmp); |
|
1253 if (!bailoutFrom(&bail, lir->snapshot())) |
|
1254 return false; |
|
1255 return true; |
|
1256 } |
|
1257 |
|
1258 bool |
|
1259 CodeGeneratorARM::visitRoundF(LRoundF *lir) |
|
1260 { |
|
1261 FloatRegister input = ToFloatRegister(lir->input()); |
|
1262 Register output = ToRegister(lir->output()); |
|
1263 FloatRegister tmp = ToFloatRegister(lir->temp()); |
|
1264 Label bail; |
|
1265 // Output is either correct, or clamped. All -0 cases have been translated to a clamped |
|
1266 // case.a |
|
1267 masm.roundf(input, output, &bail, tmp); |
|
1268 if (!bailoutFrom(&bail, lir->snapshot())) |
|
1269 return false; |
|
1270 return true; |
|
1271 } |
|
1272 |
|
1273 void |
|
1274 CodeGeneratorARM::emitRoundDouble(const FloatRegister &src, const Register &dest, Label *fail) |
|
1275 { |
|
1276 masm.ma_vcvt_F64_I32(src, ScratchFloatReg); |
|
1277 masm.ma_vxfer(ScratchFloatReg, dest); |
|
1278 masm.ma_cmp(dest, Imm32(0x7fffffff)); |
|
1279 masm.ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
|
1280 masm.ma_b(fail, Assembler::Equal); |
|
1281 } |
|
1282 |
|
1283 bool |
|
1284 CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32 *ins) |
|
1285 { |
|
1286 return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output())); |
|
1287 } |
|
1288 |
|
1289 bool |
|
1290 CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32 *ins) |
|
1291 { |
|
1292 return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output())); |
|
1293 } |
|
1294 |
|
1295 static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 }; |
|
1296 |
|
1297 FrameSizeClass |
|
1298 FrameSizeClass::FromDepth(uint32_t frameDepth) |
|
1299 { |
|
1300 for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) { |
|
1301 if (frameDepth < FrameSizes[i]) |
|
1302 return FrameSizeClass(i); |
|
1303 } |
|
1304 |
|
1305 return FrameSizeClass::None(); |
|
1306 } |
|
1307 |
|
1308 FrameSizeClass |
|
1309 FrameSizeClass::ClassLimit() |
|
1310 { |
|
1311 return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes)); |
|
1312 } |
|
1313 |
|
1314 uint32_t |
|
1315 FrameSizeClass::frameSize() const |
|
1316 { |
|
1317 JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID); |
|
1318 JS_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes)); |
|
1319 |
|
1320 return FrameSizes[class_]; |
|
1321 } |
|
1322 |
|
1323 ValueOperand |
|
1324 CodeGeneratorARM::ToValue(LInstruction *ins, size_t pos) |
|
1325 { |
|
1326 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX)); |
|
1327 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX)); |
|
1328 return ValueOperand(typeReg, payloadReg); |
|
1329 } |
|
1330 |
|
1331 ValueOperand |
|
1332 CodeGeneratorARM::ToOutValue(LInstruction *ins) |
|
1333 { |
|
1334 Register typeReg = ToRegister(ins->getDef(TYPE_INDEX)); |
|
1335 Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX)); |
|
1336 return ValueOperand(typeReg, payloadReg); |
|
1337 } |
|
1338 |
|
1339 ValueOperand |
|
1340 CodeGeneratorARM::ToTempValue(LInstruction *ins, size_t pos) |
|
1341 { |
|
1342 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX)); |
|
1343 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX)); |
|
1344 return ValueOperand(typeReg, payloadReg); |
|
1345 } |
|
1346 |
|
1347 bool |
|
1348 CodeGeneratorARM::visitValue(LValue *value) |
|
1349 { |
|
1350 const ValueOperand out = ToOutValue(value); |
|
1351 |
|
1352 masm.moveValue(value->value(), out); |
|
1353 return true; |
|
1354 } |
|
1355 |
|
1356 bool |
|
1357 CodeGeneratorARM::visitBox(LBox *box) |
|
1358 { |
|
1359 const LDefinition *type = box->getDef(TYPE_INDEX); |
|
1360 |
|
1361 JS_ASSERT(!box->getOperand(0)->isConstant()); |
|
1362 |
|
1363 // On x86, the input operand and the output payload have the same |
|
1364 // virtual register. All that needs to be written is the type tag for |
|
1365 // the type definition. |
|
1366 masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type)); |
|
1367 return true; |
|
1368 } |
|
1369 |
|
1370 bool |
|
1371 CodeGeneratorARM::visitBoxFloatingPoint(LBoxFloatingPoint *box) |
|
1372 { |
|
1373 const LDefinition *payload = box->getDef(PAYLOAD_INDEX); |
|
1374 const LDefinition *type = box->getDef(TYPE_INDEX); |
|
1375 const LAllocation *in = box->getOperand(0); |
|
1376 |
|
1377 FloatRegister reg = ToFloatRegister(in); |
|
1378 if (box->type() == MIRType_Float32) { |
|
1379 masm.convertFloat32ToDouble(reg, ScratchFloatReg); |
|
1380 reg = ScratchFloatReg; |
|
1381 } |
|
1382 |
|
1383 //masm.as_vxfer(ToRegister(payload), ToRegister(type), |
|
1384 // VFPRegister(ToFloatRegister(in)), Assembler::FloatToCore); |
|
1385 masm.ma_vxfer(VFPRegister(reg), ToRegister(payload), ToRegister(type)); |
|
1386 return true; |
|
1387 } |
|
1388 |
|
1389 bool |
|
1390 CodeGeneratorARM::visitUnbox(LUnbox *unbox) |
|
1391 { |
|
1392 // Note that for unbox, the type and payload indexes are switched on the |
|
1393 // inputs. |
|
1394 MUnbox *mir = unbox->mir(); |
|
1395 Register type = ToRegister(unbox->type()); |
|
1396 |
|
1397 if (mir->fallible()) { |
|
1398 masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type()))); |
|
1399 if (!bailoutIf(Assembler::NotEqual, unbox->snapshot())) |
|
1400 return false; |
|
1401 } |
|
1402 return true; |
|
1403 } |
|
1404 |
|
1405 bool |
|
1406 CodeGeneratorARM::visitDouble(LDouble *ins) |
|
1407 { |
|
1408 |
|
1409 const LDefinition *out = ins->getDef(0); |
|
1410 |
|
1411 masm.ma_vimm(ins->getDouble(), ToFloatRegister(out)); |
|
1412 return true; |
|
1413 } |
|
1414 |
|
1415 bool |
|
1416 CodeGeneratorARM::visitFloat32(LFloat32 *ins) |
|
1417 { |
|
1418 const LDefinition *out = ins->getDef(0); |
|
1419 masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out)); |
|
1420 return true; |
|
1421 } |
|
1422 |
|
1423 Register |
|
1424 CodeGeneratorARM::splitTagForTest(const ValueOperand &value) |
|
1425 { |
|
1426 return value.typeReg(); |
|
1427 } |
|
1428 |
|
1429 bool |
|
1430 CodeGeneratorARM::visitTestDAndBranch(LTestDAndBranch *test) |
|
1431 { |
|
1432 const LAllocation *opd = test->input(); |
|
1433 masm.ma_vcmpz(ToFloatRegister(opd)); |
|
1434 masm.as_vmrs(pc); |
|
1435 |
|
1436 MBasicBlock *ifTrue = test->ifTrue(); |
|
1437 MBasicBlock *ifFalse = test->ifFalse(); |
|
1438 // If the compare set the 0 bit, then the result |
|
1439 // is definately false. |
|
1440 jumpToBlock(ifFalse, Assembler::Zero); |
|
1441 // it is also false if one of the operands is NAN, which is |
|
1442 // shown as Overflow. |
|
1443 jumpToBlock(ifFalse, Assembler::Overflow); |
|
1444 jumpToBlock(ifTrue); |
|
1445 return true; |
|
1446 } |
|
1447 |
|
1448 bool |
|
1449 CodeGeneratorARM::visitTestFAndBranch(LTestFAndBranch *test) |
|
1450 { |
|
1451 const LAllocation *opd = test->input(); |
|
1452 masm.ma_vcmpz_f32(ToFloatRegister(opd)); |
|
1453 masm.as_vmrs(pc); |
|
1454 |
|
1455 MBasicBlock *ifTrue = test->ifTrue(); |
|
1456 MBasicBlock *ifFalse = test->ifFalse(); |
|
1457 // If the compare set the 0 bit, then the result |
|
1458 // is definately false. |
|
1459 jumpToBlock(ifFalse, Assembler::Zero); |
|
1460 // it is also false if one of the operands is NAN, which is |
|
1461 // shown as Overflow. |
|
1462 jumpToBlock(ifFalse, Assembler::Overflow); |
|
1463 jumpToBlock(ifTrue); |
|
1464 return true; |
|
1465 } |
|
1466 |
|
1467 bool |
|
1468 CodeGeneratorARM::visitCompareD(LCompareD *comp) |
|
1469 { |
|
1470 FloatRegister lhs = ToFloatRegister(comp->left()); |
|
1471 FloatRegister rhs = ToFloatRegister(comp->right()); |
|
1472 |
|
1473 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); |
|
1474 masm.compareDouble(lhs, rhs); |
|
1475 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output())); |
|
1476 return true; |
|
1477 } |
|
1478 |
|
1479 bool |
|
1480 CodeGeneratorARM::visitCompareF(LCompareF *comp) |
|
1481 { |
|
1482 FloatRegister lhs = ToFloatRegister(comp->left()); |
|
1483 FloatRegister rhs = ToFloatRegister(comp->right()); |
|
1484 |
|
1485 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); |
|
1486 masm.compareFloat(lhs, rhs); |
|
1487 masm.emitSet(Assembler::ConditionFromDoubleCondition(cond), ToRegister(comp->output())); |
|
1488 return true; |
|
1489 } |
|
1490 |
|
1491 bool |
|
1492 CodeGeneratorARM::visitCompareDAndBranch(LCompareDAndBranch *comp) |
|
1493 { |
|
1494 FloatRegister lhs = ToFloatRegister(comp->left()); |
|
1495 FloatRegister rhs = ToFloatRegister(comp->right()); |
|
1496 |
|
1497 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); |
|
1498 masm.compareDouble(lhs, rhs); |
|
1499 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse()); |
|
1500 return true; |
|
1501 } |
|
1502 |
|
1503 bool |
|
1504 CodeGeneratorARM::visitCompareFAndBranch(LCompareFAndBranch *comp) |
|
1505 { |
|
1506 FloatRegister lhs = ToFloatRegister(comp->left()); |
|
1507 FloatRegister rhs = ToFloatRegister(comp->right()); |
|
1508 |
|
1509 Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); |
|
1510 masm.compareFloat(lhs, rhs); |
|
1511 emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(), comp->ifFalse()); |
|
1512 return true; |
|
1513 } |
|
1514 |
|
1515 bool |
|
1516 CodeGeneratorARM::visitCompareB(LCompareB *lir) |
|
1517 { |
|
1518 MCompare *mir = lir->mir(); |
|
1519 |
|
1520 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs); |
|
1521 const LAllocation *rhs = lir->rhs(); |
|
1522 const Register output = ToRegister(lir->output()); |
|
1523 |
|
1524 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); |
|
1525 |
|
1526 Label notBoolean, done; |
|
1527 masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean); |
|
1528 { |
|
1529 if (rhs->isConstant()) |
|
1530 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean())); |
|
1531 else |
|
1532 masm.cmp32(lhs.payloadReg(), ToRegister(rhs)); |
|
1533 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output); |
|
1534 masm.jump(&done); |
|
1535 } |
|
1536 |
|
1537 masm.bind(¬Boolean); |
|
1538 { |
|
1539 masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output); |
|
1540 } |
|
1541 |
|
1542 masm.bind(&done); |
|
1543 return true; |
|
1544 } |
|
1545 |
|
1546 bool |
|
1547 CodeGeneratorARM::visitCompareBAndBranch(LCompareBAndBranch *lir) |
|
1548 { |
|
1549 MCompare *mir = lir->cmpMir(); |
|
1550 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs); |
|
1551 const LAllocation *rhs = lir->rhs(); |
|
1552 |
|
1553 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); |
|
1554 |
|
1555 Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs); |
|
1556 jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond); |
|
1557 |
|
1558 if (rhs->isConstant()) |
|
1559 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean())); |
|
1560 else |
|
1561 masm.cmp32(lhs.payloadReg(), ToRegister(rhs)); |
|
1562 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse()); |
|
1563 return true; |
|
1564 } |
|
1565 |
|
1566 bool |
|
1567 CodeGeneratorARM::visitCompareV(LCompareV *lir) |
|
1568 { |
|
1569 MCompare *mir = lir->mir(); |
|
1570 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); |
|
1571 const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput); |
|
1572 const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput); |
|
1573 const Register output = ToRegister(lir->output()); |
|
1574 |
|
1575 JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || |
|
1576 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); |
|
1577 |
|
1578 Label notEqual, done; |
|
1579 masm.cmp32(lhs.typeReg(), rhs.typeReg()); |
|
1580 masm.j(Assembler::NotEqual, ¬Equal); |
|
1581 { |
|
1582 masm.cmp32(lhs.payloadReg(), rhs.payloadReg()); |
|
1583 masm.emitSet(cond, output); |
|
1584 masm.jump(&done); |
|
1585 } |
|
1586 masm.bind(¬Equal); |
|
1587 { |
|
1588 masm.move32(Imm32(cond == Assembler::NotEqual), output); |
|
1589 } |
|
1590 |
|
1591 masm.bind(&done); |
|
1592 return true; |
|
1593 } |
|
1594 |
|
1595 bool |
|
1596 CodeGeneratorARM::visitCompareVAndBranch(LCompareVAndBranch *lir) |
|
1597 { |
|
1598 MCompare *mir = lir->cmpMir(); |
|
1599 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); |
|
1600 const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput); |
|
1601 const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput); |
|
1602 |
|
1603 JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || |
|
1604 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); |
|
1605 |
|
1606 MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue(); |
|
1607 |
|
1608 masm.cmp32(lhs.typeReg(), rhs.typeReg()); |
|
1609 jumpToBlock(notEqual, Assembler::NotEqual); |
|
1610 masm.cmp32(lhs.payloadReg(), rhs.payloadReg()); |
|
1611 emitBranch(cond, lir->ifTrue(), lir->ifFalse()); |
|
1612 |
|
1613 return true; |
|
1614 } |
|
1615 |
|
1616 bool |
|
1617 CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch *baab) |
|
1618 { |
|
1619 if (baab->right()->isConstant()) |
|
1620 masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right()))); |
|
1621 else |
|
1622 masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right())); |
|
1623 emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse()); |
|
1624 return true; |
|
1625 } |
|
1626 |
|
1627 bool |
|
1628 CodeGeneratorARM::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir) |
|
1629 { |
|
1630 masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); |
|
1631 return true; |
|
1632 } |
|
1633 |
|
1634 bool |
|
1635 CodeGeneratorARM::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir) |
|
1636 { |
|
1637 masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); |
|
1638 return true; |
|
1639 } |
|
1640 |
|
1641 bool |
|
1642 CodeGeneratorARM::visitNotI(LNotI *ins) |
|
1643 { |
|
1644 // It is hard to optimize !x, so just do it the basic way for now. |
|
1645 masm.ma_cmp(ToRegister(ins->input()), Imm32(0)); |
|
1646 masm.emitSet(Assembler::Equal, ToRegister(ins->output())); |
|
1647 return true; |
|
1648 } |
|
1649 |
|
1650 bool |
|
1651 CodeGeneratorARM::visitNotD(LNotD *ins) |
|
1652 { |
|
1653 // Since this operation is not, we want to set a bit if |
|
1654 // the double is falsey, which means 0.0, -0.0 or NaN. |
|
1655 // when comparing with 0, an input of 0 will set the Z bit (30) |
|
1656 // and NaN will set the V bit (28) of the APSR. |
|
1657 FloatRegister opd = ToFloatRegister(ins->input()); |
|
1658 Register dest = ToRegister(ins->output()); |
|
1659 |
|
1660 // Do the compare |
|
1661 masm.ma_vcmpz(opd); |
|
1662 // TODO There are three variations here to compare performance-wise. |
|
1663 bool nocond = true; |
|
1664 if (nocond) { |
|
1665 // Load the value into the dest register |
|
1666 masm.as_vmrs(dest); |
|
1667 masm.ma_lsr(Imm32(28), dest, dest); |
|
1668 masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30 |
|
1669 masm.ma_and(Imm32(1), dest); |
|
1670 } else { |
|
1671 masm.as_vmrs(pc); |
|
1672 masm.ma_mov(Imm32(0), dest); |
|
1673 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal); |
|
1674 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow); |
|
1675 } |
|
1676 return true; |
|
1677 } |
|
1678 |
|
1679 bool |
|
1680 CodeGeneratorARM::visitNotF(LNotF *ins) |
|
1681 { |
|
1682 // Since this operation is not, we want to set a bit if |
|
1683 // the double is falsey, which means 0.0, -0.0 or NaN. |
|
1684 // when comparing with 0, an input of 0 will set the Z bit (30) |
|
1685 // and NaN will set the V bit (28) of the APSR. |
|
1686 FloatRegister opd = ToFloatRegister(ins->input()); |
|
1687 Register dest = ToRegister(ins->output()); |
|
1688 |
|
1689 // Do the compare |
|
1690 masm.ma_vcmpz_f32(opd); |
|
1691 // TODO There are three variations here to compare performance-wise. |
|
1692 bool nocond = true; |
|
1693 if (nocond) { |
|
1694 // Load the value into the dest register |
|
1695 masm.as_vmrs(dest); |
|
1696 masm.ma_lsr(Imm32(28), dest, dest); |
|
1697 masm.ma_alu(dest, lsr(dest, 2), dest, op_orr); // 28 + 2 = 30 |
|
1698 masm.ma_and(Imm32(1), dest); |
|
1699 } else { |
|
1700 masm.as_vmrs(pc); |
|
1701 masm.ma_mov(Imm32(0), dest); |
|
1702 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Equal); |
|
1703 masm.ma_mov(Imm32(1), dest, NoSetCond, Assembler::Overflow); |
|
1704 } |
|
1705 return true; |
|
1706 } |
|
1707 |
|
1708 bool |
|
1709 CodeGeneratorARM::visitLoadSlotV(LLoadSlotV *load) |
|
1710 { |
|
1711 const ValueOperand out = ToOutValue(load); |
|
1712 Register base = ToRegister(load->input()); |
|
1713 int32_t offset = load->mir()->slot() * sizeof(js::Value); |
|
1714 |
|
1715 masm.loadValue(Address(base, offset), out); |
|
1716 return true; |
|
1717 } |
|
1718 |
|
1719 bool |
|
1720 CodeGeneratorARM::visitLoadSlotT(LLoadSlotT *load) |
|
1721 { |
|
1722 Register base = ToRegister(load->input()); |
|
1723 int32_t offset = load->mir()->slot() * sizeof(js::Value); |
|
1724 |
|
1725 if (load->mir()->type() == MIRType_Double) |
|
1726 masm.loadInt32OrDouble(Operand(base, offset), ToFloatRegister(load->output())); |
|
1727 else |
|
1728 masm.ma_ldr(Operand(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output())); |
|
1729 return true; |
|
1730 } |
|
1731 |
|
1732 bool |
|
1733 CodeGeneratorARM::visitStoreSlotT(LStoreSlotT *store) |
|
1734 { |
|
1735 |
|
1736 Register base = ToRegister(store->slots()); |
|
1737 int32_t offset = store->mir()->slot() * sizeof(js::Value); |
|
1738 |
|
1739 const LAllocation *value = store->value(); |
|
1740 MIRType valueType = store->mir()->value()->type(); |
|
1741 |
|
1742 if (store->mir()->needsBarrier()) |
|
1743 emitPreBarrier(Address(base, offset), store->mir()->slotType()); |
|
1744 |
|
1745 if (valueType == MIRType_Double) { |
|
1746 masm.ma_vstr(ToFloatRegister(value), Operand(base, offset)); |
|
1747 return true; |
|
1748 } |
|
1749 |
|
1750 // Store the type tag if needed. |
|
1751 if (valueType != store->mir()->slotType()) |
|
1752 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(base, offset)); |
|
1753 |
|
1754 // Store the payload. |
|
1755 if (value->isConstant()) |
|
1756 masm.storePayload(*value->toConstant(), Operand(base, offset)); |
|
1757 else |
|
1758 masm.storePayload(ToRegister(value), Operand(base, offset)); |
|
1759 |
|
1760 return true; |
|
1761 } |
|
1762 |
|
1763 bool |
|
1764 CodeGeneratorARM::visitLoadElementT(LLoadElementT *load) |
|
1765 { |
|
1766 Register base = ToRegister(load->elements()); |
|
1767 if (load->mir()->type() == MIRType_Double) { |
|
1768 FloatRegister fpreg = ToFloatRegister(load->output()); |
|
1769 if (load->index()->isConstant()) { |
|
1770 Address source(base, ToInt32(load->index()) * sizeof(Value)); |
|
1771 if (load->mir()->loadDoubles()) |
|
1772 masm.loadDouble(source, fpreg); |
|
1773 else |
|
1774 masm.loadInt32OrDouble(source, fpreg); |
|
1775 } else { |
|
1776 Register index = ToRegister(load->index()); |
|
1777 if (load->mir()->loadDoubles()) |
|
1778 masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg); |
|
1779 else |
|
1780 masm.loadInt32OrDouble(base, index, fpreg); |
|
1781 } |
|
1782 } else { |
|
1783 if (load->index()->isConstant()) { |
|
1784 Address source(base, ToInt32(load->index()) * sizeof(Value)); |
|
1785 masm.load32(source, ToRegister(load->output())); |
|
1786 } else { |
|
1787 masm.ma_ldr(DTRAddr(base, DtrRegImmShift(ToRegister(load->index()), LSL, 3)), |
|
1788 ToRegister(load->output())); |
|
1789 } |
|
1790 } |
|
1791 JS_ASSERT(!load->mir()->needsHoleCheck()); |
|
1792 return true; |
|
1793 } |
|
1794 |
|
1795 void |
|
1796 CodeGeneratorARM::storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType, |
|
1797 const Register &elements, const LAllocation *index) |
|
1798 { |
|
1799 if (index->isConstant()) { |
|
1800 Address dest = Address(elements, ToInt32(index) * sizeof(Value)); |
|
1801 if (valueType == MIRType_Double) { |
|
1802 masm.ma_vstr(ToFloatRegister(value), Operand(dest)); |
|
1803 return; |
|
1804 } |
|
1805 |
|
1806 // Store the type tag if needed. |
|
1807 if (valueType != elementType) |
|
1808 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest); |
|
1809 |
|
1810 // Store the payload. |
|
1811 if (value->isConstant()) |
|
1812 masm.storePayload(*value->toConstant(), dest); |
|
1813 else |
|
1814 masm.storePayload(ToRegister(value), dest); |
|
1815 } else { |
|
1816 Register indexReg = ToRegister(index); |
|
1817 if (valueType == MIRType_Double) { |
|
1818 masm.ma_vstr(ToFloatRegister(value), elements, indexReg); |
|
1819 return; |
|
1820 } |
|
1821 |
|
1822 // Store the type tag if needed. |
|
1823 if (valueType != elementType) |
|
1824 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg); |
|
1825 |
|
1826 // Store the payload. |
|
1827 if (value->isConstant()) |
|
1828 masm.storePayload(*value->toConstant(), elements, indexReg); |
|
1829 else |
|
1830 masm.storePayload(ToRegister(value), elements, indexReg); |
|
1831 } |
|
1832 } |
|
1833 |
|
1834 bool |
|
1835 CodeGeneratorARM::visitGuardShape(LGuardShape *guard) |
|
1836 { |
|
1837 Register obj = ToRegister(guard->input()); |
|
1838 Register tmp = ToRegister(guard->tempInt()); |
|
1839 |
|
1840 masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfShape())), tmp); |
|
1841 masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape())); |
|
1842 |
|
1843 return bailoutIf(Assembler::NotEqual, guard->snapshot()); |
|
1844 } |
|
1845 |
|
1846 bool |
|
1847 CodeGeneratorARM::visitGuardObjectType(LGuardObjectType *guard) |
|
1848 { |
|
1849 Register obj = ToRegister(guard->input()); |
|
1850 Register tmp = ToRegister(guard->tempInt()); |
|
1851 |
|
1852 masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfType())), tmp); |
|
1853 masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->typeObject())); |
|
1854 |
|
1855 Assembler::Condition cond = |
|
1856 guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual; |
|
1857 return bailoutIf(cond, guard->snapshot()); |
|
1858 } |
|
1859 |
|
1860 bool |
|
1861 CodeGeneratorARM::visitGuardClass(LGuardClass *guard) |
|
1862 { |
|
1863 Register obj = ToRegister(guard->input()); |
|
1864 Register tmp = ToRegister(guard->tempInt()); |
|
1865 |
|
1866 masm.loadObjClass(obj, tmp); |
|
1867 masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass())); |
|
1868 if (!bailoutIf(Assembler::NotEqual, guard->snapshot())) |
|
1869 return false; |
|
1870 return true; |
|
1871 } |
|
1872 |
|
1873 bool |
|
1874 CodeGeneratorARM::visitImplicitThis(LImplicitThis *lir) |
|
1875 { |
|
1876 Register callee = ToRegister(lir->callee()); |
|
1877 const ValueOperand out = ToOutValue(lir); |
|
1878 |
|
1879 // The implicit |this| is always |undefined| if the function's environment |
|
1880 // is the current global. |
|
1881 masm.ma_ldr(DTRAddr(callee, DtrOffImm(JSFunction::offsetOfEnvironment())), out.typeReg()); |
|
1882 masm.ma_cmp(out.typeReg(), ImmGCPtr(&gen->info().script()->global())); |
|
1883 |
|
1884 // TODO: OOL stub path. |
|
1885 if (!bailoutIf(Assembler::NotEqual, lir->snapshot())) |
|
1886 return false; |
|
1887 |
|
1888 masm.moveValue(UndefinedValue(), out); |
|
1889 return true; |
|
1890 } |
|
1891 |
|
1892 bool |
|
1893 CodeGeneratorARM::visitInterruptCheck(LInterruptCheck *lir) |
|
1894 { |
|
1895 OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing()); |
|
1896 if (!ool) |
|
1897 return false; |
|
1898 |
|
1899 void *interrupt = (void*)GetIonContext()->runtime->addressOfInterrupt(); |
|
1900 masm.load32(AbsoluteAddress(interrupt), lr); |
|
1901 masm.ma_cmp(lr, Imm32(0)); |
|
1902 masm.ma_b(ool->entry(), Assembler::NonZero); |
|
1903 masm.bind(ool->rejoin()); |
|
1904 return true; |
|
1905 } |
|
1906 |
|
1907 bool |
|
1908 CodeGeneratorARM::generateInvalidateEpilogue() |
|
1909 { |
|
1910 // Ensure that there is enough space in the buffer for the OsiPoint |
|
1911 // patching to occur. Otherwise, we could overwrite the invalidation |
|
1912 // epilogue. |
|
1913 for (size_t i = 0; i < sizeof(void *); i+= Assembler::nopSize()) |
|
1914 masm.nop(); |
|
1915 |
|
1916 masm.bind(&invalidate_); |
|
1917 |
|
1918 // Push the return address of the point that we bailed out at onto the stack |
|
1919 masm.Push(lr); |
|
1920 |
|
1921 // Push the Ion script onto the stack (when we determine what that pointer is). |
|
1922 invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1))); |
|
1923 JitCode *thunk = gen->jitRuntime()->getInvalidationThunk(); |
|
1924 |
|
1925 masm.branch(thunk); |
|
1926 |
|
1927 // We should never reach this point in JIT code -- the invalidation thunk should |
|
1928 // pop the invalidated JS frame and return directly to its caller. |
|
1929 masm.assumeUnreachable("Should have returned directly to its caller instead of here."); |
|
1930 return true; |
|
1931 } |
|
1932 |
|
1933 void |
|
1934 DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState) |
|
1935 { |
|
1936 // Can always use the scratch register on ARM. |
|
1937 addState->dispatchScratch = ScratchRegister; |
|
1938 } |
|
1939 |
|
1940 template <class U> |
|
1941 Register |
|
1942 getBase(U *mir) |
|
1943 { |
|
1944 switch (mir->base()) { |
|
1945 case U::Heap: return HeapReg; |
|
1946 case U::Global: return GlobalReg; |
|
1947 } |
|
1948 return InvalidReg; |
|
1949 } |
|
1950 |
|
1951 bool |
|
1952 CodeGeneratorARM::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins) |
|
1953 { |
|
1954 MOZ_ASSUME_UNREACHABLE("NYI"); |
|
1955 } |
|
1956 |
|
1957 bool |
|
1958 CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins) |
|
1959 { |
|
1960 MOZ_ASSUME_UNREACHABLE("NYI"); |
|
1961 } |
|
1962 |
|
1963 bool |
|
1964 CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) |
|
1965 { |
|
1966 const MAsmJSLoadHeap *mir = ins->mir(); |
|
1967 bool isSigned; |
|
1968 int size; |
|
1969 bool isFloat = false; |
|
1970 switch (mir->viewType()) { |
|
1971 case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break; |
|
1972 case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break; |
|
1973 case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break; |
|
1974 case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break; |
|
1975 case ArrayBufferView::TYPE_INT32: |
|
1976 case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break; |
|
1977 case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break; |
|
1978 case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break; |
|
1979 default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); |
|
1980 } |
|
1981 |
|
1982 const LAllocation *ptr = ins->ptr(); |
|
1983 |
|
1984 if (ptr->isConstant()) { |
|
1985 JS_ASSERT(mir->skipBoundsCheck()); |
|
1986 int32_t ptrImm = ptr->toConstant()->toInt32(); |
|
1987 JS_ASSERT(ptrImm >= 0); |
|
1988 if (isFloat) { |
|
1989 VFPRegister vd(ToFloatRegister(ins->output())); |
|
1990 if (size == 32) |
|
1991 masm.ma_vldr(Operand(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always); |
|
1992 else |
|
1993 masm.ma_vldr(Operand(HeapReg, ptrImm), vd, Assembler::Always); |
|
1994 } else { |
|
1995 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm), |
|
1996 ToRegister(ins->output()), Offset, Assembler::Always); |
|
1997 } |
|
1998 return true; |
|
1999 } |
|
2000 |
|
2001 Register ptrReg = ToRegister(ptr); |
|
2002 |
|
2003 if (mir->skipBoundsCheck()) { |
|
2004 if (isFloat) { |
|
2005 VFPRegister vd(ToFloatRegister(ins->output())); |
|
2006 if (size == 32) |
|
2007 masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always); |
|
2008 else |
|
2009 masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Always); |
|
2010 } else { |
|
2011 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, |
|
2012 ToRegister(ins->output()), Offset, Assembler::Always); |
|
2013 } |
|
2014 return true; |
|
2015 } |
|
2016 |
|
2017 BufferOffset bo = masm.ma_BoundsCheck(ptrReg); |
|
2018 if (isFloat) { |
|
2019 FloatRegister dst = ToFloatRegister(ins->output()); |
|
2020 VFPRegister vd(dst); |
|
2021 if (size == 32) { |
|
2022 masm.convertDoubleToFloat32(NANReg, dst, Assembler::AboveOrEqual); |
|
2023 masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below); |
|
2024 } else { |
|
2025 masm.ma_vmov(NANReg, dst, Assembler::AboveOrEqual); |
|
2026 masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below); |
|
2027 } |
|
2028 } else { |
|
2029 Register d = ToRegister(ins->output()); |
|
2030 masm.ma_mov(Imm32(0), d, NoSetCond, Assembler::AboveOrEqual); |
|
2031 masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below); |
|
2032 } |
|
2033 return masm.append(AsmJSHeapAccess(bo.getOffset())); |
|
2034 } |
|
2035 |
|
2036 bool |
|
2037 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) |
|
2038 { |
|
2039 const MAsmJSStoreHeap *mir = ins->mir(); |
|
2040 bool isSigned; |
|
2041 int size; |
|
2042 bool isFloat = false; |
|
2043 switch (mir->viewType()) { |
|
2044 case ArrayBufferView::TYPE_INT8: |
|
2045 case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break; |
|
2046 case ArrayBufferView::TYPE_INT16: |
|
2047 case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break; |
|
2048 case ArrayBufferView::TYPE_INT32: |
|
2049 case ArrayBufferView::TYPE_UINT32: isSigned = true; size = 32; break; |
|
2050 case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break; |
|
2051 case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break; |
|
2052 default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); |
|
2053 } |
|
2054 const LAllocation *ptr = ins->ptr(); |
|
2055 if (ptr->isConstant()) { |
|
2056 JS_ASSERT(mir->skipBoundsCheck()); |
|
2057 int32_t ptrImm = ptr->toConstant()->toInt32(); |
|
2058 JS_ASSERT(ptrImm >= 0); |
|
2059 if (isFloat) { |
|
2060 VFPRegister vd(ToFloatRegister(ins->value())); |
|
2061 if (size == 32) |
|
2062 masm.ma_vstr(vd.singleOverlay(), Operand(HeapReg, ptrImm), Assembler::Always); |
|
2063 else |
|
2064 masm.ma_vstr(vd, Operand(HeapReg, ptrImm), Assembler::Always); |
|
2065 } else { |
|
2066 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm), |
|
2067 ToRegister(ins->value()), Offset, Assembler::Always); |
|
2068 } |
|
2069 return true; |
|
2070 } |
|
2071 |
|
2072 Register ptrReg = ToRegister(ptr); |
|
2073 |
|
2074 if (mir->skipBoundsCheck()) { |
|
2075 Register ptrReg = ToRegister(ptr); |
|
2076 if (isFloat) { |
|
2077 VFPRegister vd(ToFloatRegister(ins->value())); |
|
2078 if (size == 32) |
|
2079 masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Always); |
|
2080 else |
|
2081 masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Always); |
|
2082 } else { |
|
2083 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, |
|
2084 ToRegister(ins->value()), Offset, Assembler::Always); |
|
2085 } |
|
2086 return true; |
|
2087 } |
|
2088 |
|
2089 BufferOffset bo = masm.ma_BoundsCheck(ptrReg); |
|
2090 if (isFloat) { |
|
2091 VFPRegister vd(ToFloatRegister(ins->value())); |
|
2092 if (size == 32) |
|
2093 masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below); |
|
2094 else |
|
2095 masm.ma_vstr(vd, HeapReg, ptrReg, 0, Assembler::Below); |
|
2096 } else { |
|
2097 masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, |
|
2098 ToRegister(ins->value()), Offset, Assembler::Below); |
|
2099 } |
|
2100 return masm.append(AsmJSHeapAccess(bo.getOffset())); |
|
2101 } |
|
2102 |
|
2103 bool |
|
2104 CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins) |
|
2105 { |
|
2106 const MAsmJSPassStackArg *mir = ins->mir(); |
|
2107 Operand dst(StackPointer, mir->spOffset()); |
|
2108 if (ins->arg()->isConstant()) { |
|
2109 //masm.as_bkpt(); |
|
2110 masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst); |
|
2111 } else { |
|
2112 if (ins->arg()->isGeneralReg()) |
|
2113 masm.ma_str(ToRegister(ins->arg()), dst); |
|
2114 else |
|
2115 masm.ma_vstr(ToFloatRegister(ins->arg()), dst); |
|
2116 } |
|
2117 |
|
2118 return true; |
|
2119 } |
|
2120 |
|
2121 bool |
|
2122 CodeGeneratorARM::visitUDiv(LUDiv *ins) |
|
2123 { |
|
2124 Register lhs = ToRegister(ins->lhs()); |
|
2125 Register rhs = ToRegister(ins->rhs()); |
|
2126 Register output = ToRegister(ins->output()); |
|
2127 |
|
2128 Label done; |
|
2129 if (ins->mir()->canBeDivideByZero()) { |
|
2130 masm.ma_cmp(rhs, Imm32(0)); |
|
2131 if (ins->mir()->isTruncated()) { |
|
2132 // Infinity|0 == 0 |
|
2133 Label skip; |
|
2134 masm.ma_b(&skip, Assembler::NotEqual); |
|
2135 masm.ma_mov(Imm32(0), output); |
|
2136 masm.ma_b(&done); |
|
2137 masm.bind(&skip); |
|
2138 } else { |
|
2139 JS_ASSERT(ins->mir()->fallible()); |
|
2140 if (!bailoutIf(Assembler::Equal, ins->snapshot())) |
|
2141 return false; |
|
2142 } |
|
2143 } |
|
2144 |
|
2145 masm.ma_udiv(lhs, rhs, output); |
|
2146 |
|
2147 if (!ins->mir()->isTruncated()) { |
|
2148 masm.ma_cmp(output, Imm32(0)); |
|
2149 if (!bailoutIf(Assembler::LessThan, ins->snapshot())) |
|
2150 return false; |
|
2151 } |
|
2152 |
|
2153 masm.bind(&done); |
|
2154 return true; |
|
2155 } |
|
2156 |
|
2157 bool |
|
2158 CodeGeneratorARM::visitUMod(LUMod *ins) |
|
2159 { |
|
2160 Register lhs = ToRegister(ins->lhs()); |
|
2161 Register rhs = ToRegister(ins->rhs()); |
|
2162 Register output = ToRegister(ins->output()); |
|
2163 Label done; |
|
2164 |
|
2165 if (ins->mir()->canBeDivideByZero()) { |
|
2166 masm.ma_cmp(rhs, Imm32(0)); |
|
2167 if (ins->mir()->isTruncated()) { |
|
2168 // Infinity|0 == 0 |
|
2169 Label skip; |
|
2170 masm.ma_b(&skip, Assembler::NotEqual); |
|
2171 masm.ma_mov(Imm32(0), output); |
|
2172 masm.ma_b(&done); |
|
2173 masm.bind(&skip); |
|
2174 } else { |
|
2175 JS_ASSERT(ins->mir()->fallible()); |
|
2176 if (!bailoutIf(Assembler::Equal, ins->snapshot())) |
|
2177 return false; |
|
2178 } |
|
2179 } |
|
2180 |
|
2181 masm.ma_umod(lhs, rhs, output); |
|
2182 |
|
2183 if (!ins->mir()->isTruncated()) { |
|
2184 masm.ma_cmp(output, Imm32(0)); |
|
2185 if (!bailoutIf(Assembler::LessThan, ins->snapshot())) |
|
2186 return false; |
|
2187 } |
|
2188 |
|
2189 masm.bind(&done); |
|
2190 return true; |
|
2191 } |
|
2192 |
|
2193 bool |
|
2194 CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod *ins) |
|
2195 { |
|
2196 Register lhs = ToRegister(ins->lhs()); |
|
2197 Register rhs = ToRegister(ins->rhs()); |
|
2198 Register output = ToRegister(ins->output()); |
|
2199 |
|
2200 JS_ASSERT(lhs == r0); |
|
2201 JS_ASSERT(rhs == r1); |
|
2202 JS_ASSERT(ins->mirRaw()->isDiv() || ins->mirRaw()->isMod()); |
|
2203 JS_ASSERT_IF(ins->mirRaw()->isDiv(), output == r0); |
|
2204 JS_ASSERT_IF(ins->mirRaw()->isMod(), output == r1); |
|
2205 |
|
2206 Label afterDiv; |
|
2207 |
|
2208 masm.ma_cmp(rhs, Imm32(0)); |
|
2209 Label notzero; |
|
2210 masm.ma_b(¬zero, Assembler::NonZero); |
|
2211 masm.ma_mov(Imm32(0), output); |
|
2212 masm.ma_b(&afterDiv); |
|
2213 masm.bind(¬zero); |
|
2214 |
|
2215 masm.setupAlignedABICall(2); |
|
2216 masm.passABIArg(lhs); |
|
2217 masm.passABIArg(rhs); |
|
2218 if (gen->compilingAsmJS()) |
|
2219 masm.callWithABI(AsmJSImm_aeabi_uidivmod); |
|
2220 else |
|
2221 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, __aeabi_uidivmod)); |
|
2222 |
|
2223 masm.bind(&afterDiv); |
|
2224 return true; |
|
2225 } |
|
2226 |
|
2227 bool |
|
2228 CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress *ins) |
|
2229 { |
|
2230 const MEffectiveAddress *mir = ins->mir(); |
|
2231 Register base = ToRegister(ins->base()); |
|
2232 Register index = ToRegister(ins->index()); |
|
2233 Register output = ToRegister(ins->output()); |
|
2234 masm.as_add(output, base, lsl(index, mir->scale())); |
|
2235 masm.ma_add(Imm32(mir->displacement()), output); |
|
2236 return true; |
|
2237 } |
|
2238 |
|
2239 bool |
|
2240 CodeGeneratorARM::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins) |
|
2241 { |
|
2242 const MAsmJSLoadGlobalVar *mir = ins->mir(); |
|
2243 unsigned addr = mir->globalDataOffset(); |
|
2244 if (mir->type() == MIRType_Int32) { |
|
2245 masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output())); |
|
2246 } else if (mir->type() == MIRType_Float32) { |
|
2247 VFPRegister vd(ToFloatRegister(ins->output())); |
|
2248 masm.ma_vldr(Operand(GlobalReg, addr), vd.singleOverlay()); |
|
2249 } else { |
|
2250 masm.ma_vldr(Operand(GlobalReg, addr), ToFloatRegister(ins->output())); |
|
2251 } |
|
2252 return true; |
|
2253 } |
|
2254 |
|
2255 bool |
|
2256 CodeGeneratorARM::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins) |
|
2257 { |
|
2258 const MAsmJSStoreGlobalVar *mir = ins->mir(); |
|
2259 |
|
2260 MIRType type = mir->value()->type(); |
|
2261 JS_ASSERT(IsNumberType(type)); |
|
2262 unsigned addr = mir->globalDataOffset(); |
|
2263 if (mir->value()->type() == MIRType_Int32) { |
|
2264 masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value())); |
|
2265 } else if (mir->value()->type() == MIRType_Float32) { |
|
2266 VFPRegister vd(ToFloatRegister(ins->value())); |
|
2267 masm.ma_vstr(vd.singleOverlay(), Operand(GlobalReg, addr)); |
|
2268 } else { |
|
2269 masm.ma_vstr(ToFloatRegister(ins->value()), Operand(GlobalReg, addr)); |
|
2270 } |
|
2271 return true; |
|
2272 } |
|
2273 |
|
2274 bool |
|
2275 CodeGeneratorARM::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins) |
|
2276 { |
|
2277 const MAsmJSLoadFuncPtr *mir = ins->mir(); |
|
2278 |
|
2279 Register index = ToRegister(ins->index()); |
|
2280 Register tmp = ToRegister(ins->temp()); |
|
2281 Register out = ToRegister(ins->output()); |
|
2282 unsigned addr = mir->globalDataOffset(); |
|
2283 masm.ma_mov(Imm32(addr), tmp); |
|
2284 masm.as_add(tmp, tmp, lsl(index, 2)); |
|
2285 masm.ma_ldr(DTRAddr(GlobalReg, DtrRegImmShift(tmp, LSL, 0)), out); |
|
2286 |
|
2287 return true; |
|
2288 } |
|
2289 |
|
2290 bool |
|
2291 CodeGeneratorARM::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins) |
|
2292 { |
|
2293 const MAsmJSLoadFFIFunc *mir = ins->mir(); |
|
2294 |
|
2295 masm.ma_ldr(Operand(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output())); |
|
2296 |
|
2297 return true; |
|
2298 } |
|
2299 |
|
2300 bool |
|
2301 CodeGeneratorARM::visitNegI(LNegI *ins) |
|
2302 { |
|
2303 Register input = ToRegister(ins->input()); |
|
2304 masm.ma_neg(input, ToRegister(ins->output())); |
|
2305 return true; |
|
2306 } |
|
2307 |
|
2308 bool |
|
2309 CodeGeneratorARM::visitNegD(LNegD *ins) |
|
2310 { |
|
2311 FloatRegister input = ToFloatRegister(ins->input()); |
|
2312 masm.ma_vneg(input, ToFloatRegister(ins->output())); |
|
2313 return true; |
|
2314 } |
|
2315 |
|
2316 bool |
|
2317 CodeGeneratorARM::visitNegF(LNegF *ins) |
|
2318 { |
|
2319 FloatRegister input = ToFloatRegister(ins->input()); |
|
2320 masm.ma_vneg_f32(input, ToFloatRegister(ins->output())); |
|
2321 return true; |
|
2322 } |
|
2323 |
|
2324 bool |
|
2325 CodeGeneratorARM::visitForkJoinGetSlice(LForkJoinGetSlice *ins) |
|
2326 { |
|
2327 MOZ_ASSUME_UNREACHABLE("NYI"); |
|
2328 } |
|
2329 |
|
2330 JitCode * |
|
2331 JitRuntime::generateForkJoinGetSliceStub(JSContext *cx) |
|
2332 { |
|
2333 MOZ_ASSUME_UNREACHABLE("NYI"); |
|
2334 } |