js/src/jit/x86/CodeGenerator-x86.cpp

changeset 0
6474c204b198
equal deleted inserted replaced
-1:000000000000 0:a5f9289a86c8
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86/CodeGenerator-x86.h"
8
9 #include "mozilla/DebugOnly.h"
10
11 #include "jsnum.h"
12
13 #include "jit/IonCaches.h"
14 #include "jit/MIR.h"
15 #include "jit/MIRGraph.h"
16 #include "vm/Shape.h"
17
18 #include "jsscriptinlines.h"
19
20 #include "jit/ExecutionMode-inl.h"
21 #include "jit/shared/CodeGenerator-shared-inl.h"
22
23 using namespace js;
24 using namespace js::jit;
25
26 using mozilla::DebugOnly;
27 using mozilla::FloatingPoint;
28 using JS::GenericNaN;
29
30 CodeGeneratorX86::CodeGeneratorX86(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
31 : CodeGeneratorX86Shared(gen, graph, masm)
32 {
33 }
34
35 static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
36
37 FrameSizeClass
38 FrameSizeClass::FromDepth(uint32_t frameDepth)
39 {
40 for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
41 if (frameDepth < FrameSizes[i])
42 return FrameSizeClass(i);
43 }
44
45 return FrameSizeClass::None();
46 }
47
48 FrameSizeClass
49 FrameSizeClass::ClassLimit()
50 {
51 return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
52 }
53
54 uint32_t
55 FrameSizeClass::frameSize() const
56 {
57 JS_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
58 JS_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
59
60 return FrameSizes[class_];
61 }
62
63 ValueOperand
64 CodeGeneratorX86::ToValue(LInstruction *ins, size_t pos)
65 {
66 Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
67 Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
68 return ValueOperand(typeReg, payloadReg);
69 }
70
71 ValueOperand
72 CodeGeneratorX86::ToOutValue(LInstruction *ins)
73 {
74 Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
75 Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
76 return ValueOperand(typeReg, payloadReg);
77 }
78
79 ValueOperand
80 CodeGeneratorX86::ToTempValue(LInstruction *ins, size_t pos)
81 {
82 Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
83 Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
84 return ValueOperand(typeReg, payloadReg);
85 }
86
87 bool
88 CodeGeneratorX86::visitValue(LValue *value)
89 {
90 const ValueOperand out = ToOutValue(value);
91 masm.moveValue(value->value(), out);
92 return true;
93 }
94
95 bool
96 CodeGeneratorX86::visitBox(LBox *box)
97 {
98 const LDefinition *type = box->getDef(TYPE_INDEX);
99
100 DebugOnly<const LAllocation *> a = box->getOperand(0);
101 JS_ASSERT(!a->isConstant());
102
103 // On x86, the input operand and the output payload have the same
104 // virtual register. All that needs to be written is the type tag for
105 // the type definition.
106 masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
107 return true;
108 }
109
110 bool
111 CodeGeneratorX86::visitBoxFloatingPoint(LBoxFloatingPoint *box)
112 {
113 const LAllocation *in = box->getOperand(0);
114 const ValueOperand out = ToOutValue(box);
115
116 FloatRegister reg = ToFloatRegister(in);
117 if (box->type() == MIRType_Float32) {
118 masm.convertFloat32ToDouble(reg, ScratchFloatReg);
119 reg = ScratchFloatReg;
120 }
121 masm.boxDouble(reg, out);
122 return true;
123 }
124
125 bool
126 CodeGeneratorX86::visitUnbox(LUnbox *unbox)
127 {
128 // Note that for unbox, the type and payload indexes are switched on the
129 // inputs.
130 MUnbox *mir = unbox->mir();
131
132 if (mir->fallible()) {
133 masm.cmpl(ToOperand(unbox->type()), Imm32(MIRTypeToTag(mir->type())));
134 if (!bailoutIf(Assembler::NotEqual, unbox->snapshot()))
135 return false;
136 }
137 return true;
138 }
139
140 bool
141 CodeGeneratorX86::visitLoadSlotV(LLoadSlotV *load)
142 {
143 const ValueOperand out = ToOutValue(load);
144 Register base = ToRegister(load->input());
145 int32_t offset = load->mir()->slot() * sizeof(js::Value);
146
147 masm.loadValue(Address(base, offset), out);
148 return true;
149 }
150
151 bool
152 CodeGeneratorX86::visitLoadSlotT(LLoadSlotT *load)
153 {
154 Register base = ToRegister(load->input());
155 int32_t offset = load->mir()->slot() * sizeof(js::Value);
156
157 if (load->mir()->type() == MIRType_Double)
158 masm.loadInt32OrDouble(Operand(base, offset), ToFloatRegister(load->output()));
159 else
160 masm.load32(Address(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output()));
161 return true;
162 }
163
164 bool
165 CodeGeneratorX86::visitStoreSlotT(LStoreSlotT *store)
166 {
167 Register base = ToRegister(store->slots());
168 int32_t offset = store->mir()->slot() * sizeof(js::Value);
169
170 const LAllocation *value = store->value();
171 MIRType valueType = store->mir()->value()->type();
172
173 if (store->mir()->needsBarrier())
174 emitPreBarrier(Address(base, offset), store->mir()->slotType());
175
176 if (valueType == MIRType_Double) {
177 masm.storeDouble(ToFloatRegister(value), Operand(base, offset));
178 return true;
179 }
180
181 // Store the type tag if needed.
182 if (valueType != store->mir()->slotType())
183 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(base, offset));
184
185 // Store the payload.
186 if (value->isConstant())
187 masm.storePayload(*value->toConstant(), Operand(base, offset));
188 else
189 masm.storePayload(ToRegister(value), Operand(base, offset));
190
191 return true;
192 }
193
194 bool
195 CodeGeneratorX86::visitLoadElementT(LLoadElementT *load)
196 {
197 Operand source = createArrayElementOperand(ToRegister(load->elements()), load->index());
198
199 if (load->mir()->needsHoleCheck()) {
200 Assembler::Condition cond = masm.testMagic(Assembler::Equal, source);
201 if (!bailoutIf(cond, load->snapshot()))
202 return false;
203 }
204
205 if (load->mir()->type() == MIRType_Double) {
206 FloatRegister fpreg = ToFloatRegister(load->output());
207 if (load->mir()->loadDoubles()) {
208 if (source.kind() == Operand::MEM_REG_DISP)
209 masm.loadDouble(source.toAddress(), fpreg);
210 else
211 masm.loadDouble(source.toBaseIndex(), fpreg);
212 } else {
213 masm.loadInt32OrDouble(source, fpreg);
214 }
215 } else {
216 masm.movl(masm.ToPayload(source), ToRegister(load->output()));
217 }
218
219 return true;
220 }
221
222 void
223 CodeGeneratorX86::storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType,
224 const Register &elements, const LAllocation *index)
225 {
226 Operand dest = createArrayElementOperand(elements, index);
227
228 if (valueType == MIRType_Double) {
229 masm.storeDouble(ToFloatRegister(value), dest);
230 return;
231 }
232
233 // Store the type tag if needed.
234 if (valueType != elementType)
235 masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
236
237 // Store the payload.
238 if (value->isConstant())
239 masm.storePayload(*value->toConstant(), dest);
240 else
241 masm.storePayload(ToRegister(value), dest);
242 }
243
244 bool
245 CodeGeneratorX86::visitImplicitThis(LImplicitThis *lir)
246 {
247 Register callee = ToRegister(lir->callee());
248 const ValueOperand out = ToOutValue(lir);
249
250 // The implicit |this| is always |undefined| if the function's environment
251 // is the current global.
252 GlobalObject *global = &gen->info().script()->global();
253 masm.cmpPtr(Operand(callee, JSFunction::offsetOfEnvironment()), ImmGCPtr(global));
254
255 // TODO: OOL stub path.
256 if (!bailoutIf(Assembler::NotEqual, lir->snapshot()))
257 return false;
258
259 masm.moveValue(UndefinedValue(), out);
260 return true;
261 }
262
263 bool
264 CodeGeneratorX86::visitInterruptCheck(LInterruptCheck *lir)
265 {
266 OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing());
267 if (!ool)
268 return false;
269
270 masm.cmpl(Operand(AbsoluteAddress(GetIonContext()->runtime->addressOfInterrupt())), Imm32(0));
271 masm.j(Assembler::NonZero, ool->entry());
272 masm.bind(ool->rejoin());
273 return true;
274 }
275
276 bool
277 CodeGeneratorX86::visitCompareB(LCompareB *lir)
278 {
279 MCompare *mir = lir->mir();
280
281 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
282 const LAllocation *rhs = lir->rhs();
283 const Register output = ToRegister(lir->output());
284
285 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
286
287 Label notBoolean, done;
288 masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
289 {
290 if (rhs->isConstant())
291 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
292 else
293 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
294 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
295 masm.jump(&done);
296 }
297 masm.bind(&notBoolean);
298 {
299 masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
300 }
301
302 masm.bind(&done);
303 return true;
304 }
305
306 bool
307 CodeGeneratorX86::visitCompareBAndBranch(LCompareBAndBranch *lir)
308 {
309 MCompare *mir = lir->cmpMir();
310 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
311 const LAllocation *rhs = lir->rhs();
312
313 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
314
315 Assembler::Condition cond = masm.testBoolean(Assembler::NotEqual, lhs);
316 jumpToBlock((mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(), cond);
317
318 if (rhs->isConstant())
319 masm.cmp32(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()));
320 else
321 masm.cmp32(lhs.payloadReg(), ToRegister(rhs));
322 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
323 return true;
324 }
325
326 bool
327 CodeGeneratorX86::visitCompareV(LCompareV *lir)
328 {
329 MCompare *mir = lir->mir();
330 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
331 const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput);
332 const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
333 const Register output = ToRegister(lir->output());
334
335 JS_ASSERT(IsEqualityOp(mir->jsop()));
336
337 Label notEqual, done;
338 masm.cmp32(lhs.typeReg(), rhs.typeReg());
339 masm.j(Assembler::NotEqual, &notEqual);
340 {
341 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
342 masm.emitSet(cond, output);
343 masm.jump(&done);
344 }
345 masm.bind(&notEqual);
346 {
347 masm.move32(Imm32(cond == Assembler::NotEqual), output);
348 }
349
350 masm.bind(&done);
351 return true;
352 }
353
354 bool
355 CodeGeneratorX86::visitCompareVAndBranch(LCompareVAndBranch *lir)
356 {
357 MCompare *mir = lir->cmpMir();
358 Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
359 const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput);
360 const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput);
361
362 JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
363 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
364
365 MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
366
367 masm.cmp32(lhs.typeReg(), rhs.typeReg());
368 jumpToBlock(notEqual, Assembler::NotEqual);
369 masm.cmp32(lhs.payloadReg(), rhs.payloadReg());
370 emitBranch(cond, lir->ifTrue(), lir->ifFalse());
371
372 return true;
373 }
374
375 bool
376 CodeGeneratorX86::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir)
377 {
378 Register input = ToRegister(lir->input());
379 Register temp = ToRegister(lir->temp());
380
381 if (input != temp)
382 masm.mov(input, temp);
383
384 // Beware: convertUInt32ToDouble clobbers input.
385 masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
386 return true;
387 }
388
389 bool
390 CodeGeneratorX86::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir)
391 {
392 Register input = ToRegister(lir->input());
393 Register temp = ToRegister(lir->temp());
394 FloatRegister output = ToFloatRegister(lir->output());
395
396 if (input != temp)
397 masm.mov(input, temp);
398
399 // Beware: convertUInt32ToFloat32 clobbers input.
400 masm.convertUInt32ToFloat32(temp, output);
401 return true;
402 }
403
404 // Load a NaN or zero into a register for an out of bounds AsmJS or static
405 // typed array load.
406 class jit::OutOfLineLoadTypedArrayOutOfBounds : public OutOfLineCodeBase<CodeGeneratorX86>
407 {
408 AnyRegister dest_;
409 bool isFloat32Load_;
410 public:
411 OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, bool isFloat32Load)
412 : dest_(dest), isFloat32Load_(isFloat32Load)
413 {}
414
415 const AnyRegister &dest() const { return dest_; }
416 bool isFloat32Load() const { return isFloat32Load_; }
417 bool accept(CodeGeneratorX86 *codegen) { return codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this); }
418 };
419
420 template<typename T>
421 void
422 CodeGeneratorX86::loadViewTypeElement(ArrayBufferView::ViewType vt, const T &srcAddr,
423 const LDefinition *out)
424 {
425 switch (vt) {
426 case ArrayBufferView::TYPE_INT8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
427 case ArrayBufferView::TYPE_UINT8_CLAMPED:
428 case ArrayBufferView::TYPE_UINT8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
429 case ArrayBufferView::TYPE_INT16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
430 case ArrayBufferView::TYPE_UINT16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
431 case ArrayBufferView::TYPE_INT32:
432 case ArrayBufferView::TYPE_UINT32: masm.movlWithPatch(srcAddr, ToRegister(out)); break;
433 case ArrayBufferView::TYPE_FLOAT32: masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break;
434 case ArrayBufferView::TYPE_FLOAT64: masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break;
435 default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
436 }
437 }
438
439 template<typename T>
440 bool
441 CodeGeneratorX86::loadAndNoteViewTypeElement(ArrayBufferView::ViewType vt, const T &srcAddr,
442 const LDefinition *out)
443 {
444 uint32_t before = masm.size();
445 loadViewTypeElement(vt, srcAddr, out);
446 uint32_t after = masm.size();
447 return masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out)));
448 }
449
450 bool
451 CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
452 {
453 const MLoadTypedArrayElementStatic *mir = ins->mir();
454 ArrayBufferView::ViewType vt = mir->viewType();
455 JS_ASSERT_IF(vt == ArrayBufferView::TYPE_FLOAT32, mir->type() == MIRType_Float32);
456
457 Register ptr = ToRegister(ins->ptr());
458 const LDefinition *out = ins->output();
459
460 OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
461 bool isFloat32Load = (vt == ArrayBufferView::TYPE_FLOAT32);
462 if (!mir->fallible()) {
463 ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
464 if (!addOutOfLineCode(ool))
465 return false;
466 }
467
468 masm.cmpl(ptr, Imm32(mir->length()));
469 if (ool)
470 masm.j(Assembler::AboveOrEqual, ool->entry());
471 else if (!bailoutIf(Assembler::AboveOrEqual, ins->snapshot()))
472 return false;
473
474 Address srcAddr(ptr, (int32_t) mir->base());
475 loadViewTypeElement(vt, srcAddr, out);
476 if (vt == ArrayBufferView::TYPE_FLOAT64)
477 masm.canonicalizeDouble(ToFloatRegister(out));
478 if (vt == ArrayBufferView::TYPE_FLOAT32)
479 masm.canonicalizeFloat(ToFloatRegister(out));
480 if (ool)
481 masm.bind(ool->rejoin());
482 return true;
483 }
484
485 bool
486 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
487 {
488 const MAsmJSLoadHeap *mir = ins->mir();
489 ArrayBufferView::ViewType vt = mir->viewType();
490 const LAllocation *ptr = ins->ptr();
491 const LDefinition *out = ins->output();
492
493 if (ptr->isConstant()) {
494 // The constant displacement still needs to be added to the as-yet-unknown
495 // base address of the heap. For now, embed the displacement as an
496 // immediate in the instruction. This displacement will fixed up when the
497 // base address is known during dynamic linking (AsmJSModule::initHeap).
498 PatchedAbsoluteAddress srcAddr((void *) ptr->toConstant()->toInt32());
499 return loadAndNoteViewTypeElement(vt, srcAddr, out);
500 }
501
502 Register ptrReg = ToRegister(ptr);
503 Address srcAddr(ptrReg, 0);
504
505 if (mir->skipBoundsCheck())
506 return loadAndNoteViewTypeElement(vt, srcAddr, out);
507
508 bool isFloat32Load = vt == ArrayBufferView::TYPE_FLOAT32;
509 OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
510 if (!addOutOfLineCode(ool))
511 return false;
512
513 CodeOffsetLabel cmp = masm.cmplWithPatch(ptrReg, Imm32(0));
514 masm.j(Assembler::AboveOrEqual, ool->entry());
515
516 uint32_t before = masm.size();
517 loadViewTypeElement(vt, srcAddr, out);
518 uint32_t after = masm.size();
519 masm.bind(ool->rejoin());
520 return masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), cmp.offset()));
521 }
522
523 bool
524 CodeGeneratorX86::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool)
525 {
526 if (ool->dest().isFloat()) {
527 if (ool->isFloat32Load())
528 masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
529 else
530 masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
531 } else {
532 Register destReg = ool->dest().gpr();
533 masm.mov(ImmWord(0), destReg);
534 }
535 masm.jmp(ool->rejoin());
536 return true;
537 }
538
539 template<typename T>
540 void
541 CodeGeneratorX86::storeViewTypeElement(ArrayBufferView::ViewType vt, const LAllocation *value,
542 const T &dstAddr)
543 {
544 switch (vt) {
545 case ArrayBufferView::TYPE_INT8:
546 case ArrayBufferView::TYPE_UINT8_CLAMPED:
547 case ArrayBufferView::TYPE_UINT8: masm.movbWithPatch(ToRegister(value), dstAddr); break;
548 case ArrayBufferView::TYPE_INT16:
549 case ArrayBufferView::TYPE_UINT16: masm.movwWithPatch(ToRegister(value), dstAddr); break;
550 case ArrayBufferView::TYPE_INT32:
551 case ArrayBufferView::TYPE_UINT32: masm.movlWithPatch(ToRegister(value), dstAddr); break;
552 case ArrayBufferView::TYPE_FLOAT32: masm.movssWithPatch(ToFloatRegister(value), dstAddr); break;
553 case ArrayBufferView::TYPE_FLOAT64: masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break;
554 default: MOZ_ASSUME_UNREACHABLE("unexpected array type");
555 }
556 }
557
558 template<typename T>
559 bool
560 CodeGeneratorX86::storeAndNoteViewTypeElement(ArrayBufferView::ViewType vt, const LAllocation *value,
561 const T &dstAddr)
562 {
563 uint32_t before = masm.size();
564 storeViewTypeElement(vt, value, dstAddr);
565 uint32_t after = masm.size();
566 return masm.append(AsmJSHeapAccess(before, after));
567 }
568
569 bool
570 CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
571 {
572 MStoreTypedArrayElementStatic *mir = ins->mir();
573 ArrayBufferView::ViewType vt = mir->viewType();
574
575 Register ptr = ToRegister(ins->ptr());
576 const LAllocation *value = ins->value();
577
578 masm.cmpl(ptr, Imm32(mir->length()));
579 Label rejoin;
580 masm.j(Assembler::AboveOrEqual, &rejoin);
581
582 Address dstAddr(ptr, (int32_t) mir->base());
583 storeViewTypeElement(vt, value, dstAddr);
584 masm.bind(&rejoin);
585 return true;
586 }
587
588 bool
589 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
590 {
591 MAsmJSStoreHeap *mir = ins->mir();
592 ArrayBufferView::ViewType vt = mir->viewType();
593 const LAllocation *value = ins->value();
594 const LAllocation *ptr = ins->ptr();
595
596 if (ptr->isConstant()) {
597 // The constant displacement still needs to be added to the as-yet-unknown
598 // base address of the heap. For now, embed the displacement as an
599 // immediate in the instruction. This displacement will fixed up when the
600 // base address is known during dynamic linking (AsmJSModule::initHeap).
601 PatchedAbsoluteAddress dstAddr((void *) ptr->toConstant()->toInt32());
602 return storeAndNoteViewTypeElement(vt, value, dstAddr);
603 }
604
605 Register ptrReg = ToRegister(ptr);
606 Address dstAddr(ptrReg, 0);
607
608 if (mir->skipBoundsCheck())
609 return storeAndNoteViewTypeElement(vt, value, dstAddr);
610
611 CodeOffsetLabel cmp = masm.cmplWithPatch(ptrReg, Imm32(0));
612 Label rejoin;
613 masm.j(Assembler::AboveOrEqual, &rejoin);
614
615 uint32_t before = masm.size();
616 storeViewTypeElement(vt, value, dstAddr);
617 uint32_t after = masm.size();
618 masm.bind(&rejoin);
619 return masm.append(AsmJSHeapAccess(before, after, cmp.offset()));
620 }
621
622 bool
623 CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
624 {
625 MAsmJSLoadGlobalVar *mir = ins->mir();
626 MIRType type = mir->type();
627 JS_ASSERT(IsNumberType(type));
628
629 CodeOffsetLabel label;
630 if (type == MIRType_Int32)
631 label = masm.movlWithPatch(PatchedAbsoluteAddress(), ToRegister(ins->output()));
632 else if (type == MIRType_Float32)
633 label = masm.movssWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
634 else
635 label = masm.movsdWithPatch(PatchedAbsoluteAddress(), ToFloatRegister(ins->output()));
636
637 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset()));
638 }
639
640 bool
641 CodeGeneratorX86::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
642 {
643 MAsmJSStoreGlobalVar *mir = ins->mir();
644
645 MIRType type = mir->value()->type();
646 JS_ASSERT(IsNumberType(type));
647
648 CodeOffsetLabel label;
649 if (type == MIRType_Int32)
650 label = masm.movlWithPatch(ToRegister(ins->value()), PatchedAbsoluteAddress());
651 else if (type == MIRType_Float32)
652 label = masm.movssWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
653 else
654 label = masm.movsdWithPatch(ToFloatRegister(ins->value()), PatchedAbsoluteAddress());
655
656 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset()));
657 }
658
659 bool
660 CodeGeneratorX86::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins)
661 {
662 MAsmJSLoadFuncPtr *mir = ins->mir();
663
664 Register index = ToRegister(ins->index());
665 Register out = ToRegister(ins->output());
666 CodeOffsetLabel label = masm.movlWithPatch(PatchedAbsoluteAddress(), index, TimesFour, out);
667
668 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset()));
669 }
670
671 bool
672 CodeGeneratorX86::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
673 {
674 MAsmJSLoadFFIFunc *mir = ins->mir();
675
676 Register out = ToRegister(ins->output());
677 CodeOffsetLabel label = masm.movlWithPatch(PatchedAbsoluteAddress(), out);
678
679 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset()));
680 }
681
682 void
683 CodeGeneratorX86::postAsmJSCall(LAsmJSCall *lir)
684 {
685 MAsmJSCall *mir = lir->mir();
686 if (!IsFloatingPointType(mir->type()) || mir->callee().which() != MAsmJSCall::Callee::Builtin)
687 return;
688
689 if (mir->type() == MIRType_Float32) {
690 masm.reserveStack(sizeof(float));
691 Operand op(esp, 0);
692 masm.fstp32(op);
693 masm.loadFloat32(op, ReturnFloatReg);
694 masm.freeStack(sizeof(float));
695 } else {
696 masm.reserveStack(sizeof(double));
697 Operand op(esp, 0);
698 masm.fstp(op);
699 masm.loadDouble(op, ReturnFloatReg);
700 masm.freeStack(sizeof(double));
701 }
702 }
703
704 void
705 DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
706 {
707 // On x86, where there is no general purpose scratch register available,
708 // child cache classes must manually specify a dispatch scratch register.
709 MOZ_ASSUME_UNREACHABLE("x86 needs manual assignment of dispatchScratch");
710 }
711
712 void
713 GetPropertyParIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
714 {
715 // We don't have a scratch register, but only use the temp if we needed
716 // one, it's BogusTemp otherwise.
717 JS_ASSERT(ins->isGetPropertyCacheV() || ins->isGetPropertyCacheT());
718 if (ins->isGetPropertyCacheV() || ins->toGetPropertyCacheT()->temp()->isBogusTemp())
719 addState->dispatchScratch = output_.scratchReg().gpr();
720 else
721 addState->dispatchScratch = ToRegister(ins->toGetPropertyCacheT()->temp());
722 }
723
724 void
725 GetElementParIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
726 {
727 // We don't have a scratch register, but only use the temp if we needed
728 // one, it's BogusTemp otherwise.
729 JS_ASSERT(ins->isGetElementCacheV() || ins->isGetElementCacheT());
730 if (ins->isGetElementCacheV() || ins->toGetElementCacheT()->temp()->isBogusTemp())
731 addState->dispatchScratch = output_.scratchReg().gpr();
732 else
733 addState->dispatchScratch = ToRegister(ins->toGetElementCacheT()->temp());
734 }
735
736 void
737 SetPropertyParIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
738 {
739 // We don't have an output register to reuse, so we always need a temp.
740 JS_ASSERT(ins->isSetPropertyCacheV() || ins->isSetPropertyCacheT());
741 if (ins->isSetPropertyCacheV())
742 addState->dispatchScratch = ToRegister(ins->toSetPropertyCacheV()->tempForDispatchCache());
743 else
744 addState->dispatchScratch = ToRegister(ins->toSetPropertyCacheT()->tempForDispatchCache());
745 }
746
747 void
748 SetElementParIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
749 {
750 // We don't have an output register to reuse, but luckily SetElementCache
751 // already needs a temp.
752 JS_ASSERT(ins->isSetElementCacheV() || ins->isSetElementCacheT());
753 if (ins->isSetElementCacheV())
754 addState->dispatchScratch = ToRegister(ins->toSetElementCacheV()->temp());
755 else
756 addState->dispatchScratch = ToRegister(ins->toSetElementCacheT()->temp());
757 }
758
759 namespace js {
760 namespace jit {
761
762 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86>
763 {
764 LTruncateDToInt32 *ins_;
765
766 public:
767 OutOfLineTruncate(LTruncateDToInt32 *ins)
768 : ins_(ins)
769 { }
770
771 bool accept(CodeGeneratorX86 *codegen) {
772 return codegen->visitOutOfLineTruncate(this);
773 }
774 LTruncateDToInt32 *ins() const {
775 return ins_;
776 }
777 };
778
779 class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86>
780 {
781 LTruncateFToInt32 *ins_;
782
783 public:
784 OutOfLineTruncateFloat32(LTruncateFToInt32 *ins)
785 : ins_(ins)
786 { }
787
788 bool accept(CodeGeneratorX86 *codegen) {
789 return codegen->visitOutOfLineTruncateFloat32(this);
790 }
791 LTruncateFToInt32 *ins() const {
792 return ins_;
793 }
794 };
795
796 } // namespace jit
797 } // namespace js
798
799 bool
800 CodeGeneratorX86::visitTruncateDToInt32(LTruncateDToInt32 *ins)
801 {
802 FloatRegister input = ToFloatRegister(ins->input());
803 Register output = ToRegister(ins->output());
804
805 OutOfLineTruncate *ool = new(alloc()) OutOfLineTruncate(ins);
806 if (!addOutOfLineCode(ool))
807 return false;
808
809 masm.branchTruncateDouble(input, output, ool->entry());
810 masm.bind(ool->rejoin());
811 return true;
812 }
813
814 bool
815 CodeGeneratorX86::visitTruncateFToInt32(LTruncateFToInt32 *ins)
816 {
817 FloatRegister input = ToFloatRegister(ins->input());
818 Register output = ToRegister(ins->output());
819
820 OutOfLineTruncateFloat32 *ool = new(alloc()) OutOfLineTruncateFloat32(ins);
821 if (!addOutOfLineCode(ool))
822 return false;
823
824 masm.branchTruncateFloat32(input, output, ool->entry());
825 masm.bind(ool->rejoin());
826 return true;
827 }
828
829 bool
830 CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate *ool)
831 {
832 LTruncateDToInt32 *ins = ool->ins();
833 FloatRegister input = ToFloatRegister(ins->input());
834 Register output = ToRegister(ins->output());
835
836 Label fail;
837
838 if (Assembler::HasSSE3()) {
839 // Push double.
840 masm.subl(Imm32(sizeof(double)), esp);
841 masm.storeDouble(input, Operand(esp, 0));
842
843 static const uint32_t EXPONENT_MASK = 0x7ff00000;
844 static const uint32_t EXPONENT_SHIFT = FloatingPoint<double>::ExponentShift - 32;
845 static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<double>::ExponentBias + 63)
846 << EXPONENT_SHIFT;
847
848 // Check exponent to avoid fp exceptions.
849 Label failPopDouble;
850 masm.load32(Address(esp, 4), output);
851 masm.and32(Imm32(EXPONENT_MASK), output);
852 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(TOO_BIG_EXPONENT), &failPopDouble);
853
854 // Load double, perform 64-bit truncation.
855 masm.fld(Operand(esp, 0));
856 masm.fisttp(Operand(esp, 0));
857
858 // Load low word, pop double and jump back.
859 masm.load32(Address(esp, 0), output);
860 masm.addl(Imm32(sizeof(double)), esp);
861 masm.jump(ool->rejoin());
862
863 masm.bind(&failPopDouble);
864 masm.addl(Imm32(sizeof(double)), esp);
865 masm.jump(&fail);
866 } else {
867 FloatRegister temp = ToFloatRegister(ins->tempFloat());
868
869 // Try to convert doubles representing integers within 2^32 of a signed
870 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
871 // This has to be an exact conversion, as otherwise the truncation works
872 // incorrectly on the modified value.
873 masm.xorpd(ScratchFloatReg, ScratchFloatReg);
874 masm.ucomisd(input, ScratchFloatReg);
875 masm.j(Assembler::Parity, &fail);
876
877 {
878 Label positive;
879 masm.j(Assembler::Above, &positive);
880
881 masm.loadConstantDouble(4294967296.0, temp);
882 Label skip;
883 masm.jmp(&skip);
884
885 masm.bind(&positive);
886 masm.loadConstantDouble(-4294967296.0, temp);
887 masm.bind(&skip);
888 }
889
890 masm.addsd(input, temp);
891 masm.cvttsd2si(temp, output);
892 masm.cvtsi2sd(output, ScratchFloatReg);
893
894 masm.ucomisd(temp, ScratchFloatReg);
895 masm.j(Assembler::Parity, &fail);
896 masm.j(Assembler::Equal, ool->rejoin());
897 }
898
899 masm.bind(&fail);
900 {
901 saveVolatile(output);
902
903 masm.setupUnalignedABICall(1, output);
904 masm.passABIArg(input, MoveOp::DOUBLE);
905 if (gen->compilingAsmJS())
906 masm.callWithABI(AsmJSImm_ToInt32);
907 else
908 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
909 masm.storeCallResult(output);
910
911 restoreVolatile(output);
912 }
913
914 masm.jump(ool->rejoin());
915 return true;
916 }
917
918 bool
919 CodeGeneratorX86::visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 *ool)
920 {
921 LTruncateFToInt32 *ins = ool->ins();
922 FloatRegister input = ToFloatRegister(ins->input());
923 Register output = ToRegister(ins->output());
924
925 Label fail;
926
927 if (Assembler::HasSSE3()) {
928 // Push float32, but subtracts 64 bits so that the value popped by fisttp fits
929 masm.subl(Imm32(sizeof(uint64_t)), esp);
930 masm.storeFloat32(input, Operand(esp, 0));
931
932 static const uint32_t EXPONENT_MASK = FloatingPoint<float>::ExponentBits;
933 static const uint32_t EXPONENT_SHIFT = FloatingPoint<float>::ExponentShift;
934 // Integers are still 64 bits long, so we can still test for an exponent > 63.
935 static const uint32_t TOO_BIG_EXPONENT = (FloatingPoint<float>::ExponentBias + 63)
936 << EXPONENT_SHIFT;
937
938 // Check exponent to avoid fp exceptions.
939 Label failPopFloat;
940 masm.movl(Operand(esp, 0), output);
941 masm.and32(Imm32(EXPONENT_MASK), output);
942 masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(TOO_BIG_EXPONENT), &failPopFloat);
943
944 // Load float, perform 32-bit truncation.
945 masm.fld32(Operand(esp, 0));
946 masm.fisttp(Operand(esp, 0));
947
948 // Load low word, pop 64bits and jump back.
949 masm.movl(Operand(esp, 0), output);
950 masm.addl(Imm32(sizeof(uint64_t)), esp);
951 masm.jump(ool->rejoin());
952
953 masm.bind(&failPopFloat);
954 masm.addl(Imm32(sizeof(uint64_t)), esp);
955 masm.jump(&fail);
956 } else {
957 FloatRegister temp = ToFloatRegister(ins->tempFloat());
958
959 // Try to convert float32 representing integers within 2^32 of a signed
960 // integer, by adding/subtracting 2^32 and then trying to convert to int32.
961 // This has to be an exact conversion, as otherwise the truncation works
962 // incorrectly on the modified value.
963 masm.xorps(ScratchFloatReg, ScratchFloatReg);
964 masm.ucomiss(input, ScratchFloatReg);
965 masm.j(Assembler::Parity, &fail);
966
967 {
968 Label positive;
969 masm.j(Assembler::Above, &positive);
970
971 masm.loadConstantFloat32(4294967296.f, temp);
972 Label skip;
973 masm.jmp(&skip);
974
975 masm.bind(&positive);
976 masm.loadConstantFloat32(-4294967296.f, temp);
977 masm.bind(&skip);
978 }
979
980 masm.addss(input, temp);
981 masm.cvttss2si(temp, output);
982 masm.cvtsi2ss(output, ScratchFloatReg);
983
984 masm.ucomiss(temp, ScratchFloatReg);
985 masm.j(Assembler::Parity, &fail);
986 masm.j(Assembler::Equal, ool->rejoin());
987 }
988
989 masm.bind(&fail);
990 {
991 saveVolatile(output);
992
993 masm.push(input);
994 masm.setupUnalignedABICall(1, output);
995 masm.cvtss2sd(input, input);
996 masm.passABIArg(input, MoveOp::DOUBLE);
997
998 if (gen->compilingAsmJS())
999 masm.callWithABI(AsmJSImm_ToInt32);
1000 else
1001 masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, js::ToInt32));
1002
1003 masm.storeCallResult(output);
1004 masm.pop(input);
1005
1006 restoreVolatile(output);
1007 }
1008
1009 masm.jump(ool->rejoin());
1010 return true;
1011 }

mercurial