|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "jit/x64/CodeGenerator-x64.h" |
|
8 |
|
9 #include "jit/IonCaches.h" |
|
10 #include "jit/MIR.h" |
|
11 |
|
12 #include "jsscriptinlines.h" |
|
13 |
|
14 #include "jit/shared/CodeGenerator-shared-inl.h" |
|
15 |
|
16 using namespace js; |
|
17 using namespace js::jit; |
|
18 |
|
19 CodeGeneratorX64::CodeGeneratorX64(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm) |
|
20 : CodeGeneratorX86Shared(gen, graph, masm) |
|
21 { |
|
22 } |
|
23 |
|
24 ValueOperand |
|
25 CodeGeneratorX64::ToValue(LInstruction *ins, size_t pos) |
|
26 { |
|
27 return ValueOperand(ToRegister(ins->getOperand(pos))); |
|
28 } |
|
29 |
|
30 ValueOperand |
|
31 CodeGeneratorX64::ToOutValue(LInstruction *ins) |
|
32 { |
|
33 return ValueOperand(ToRegister(ins->getDef(0))); |
|
34 } |
|
35 |
|
36 ValueOperand |
|
37 CodeGeneratorX64::ToTempValue(LInstruction *ins, size_t pos) |
|
38 { |
|
39 return ValueOperand(ToRegister(ins->getTemp(pos))); |
|
40 } |
|
41 |
|
42 FrameSizeClass |
|
43 FrameSizeClass::FromDepth(uint32_t frameDepth) |
|
44 { |
|
45 return FrameSizeClass::None(); |
|
46 } |
|
47 |
|
48 FrameSizeClass |
|
49 FrameSizeClass::ClassLimit() |
|
50 { |
|
51 return FrameSizeClass(0); |
|
52 } |
|
53 |
|
54 uint32_t |
|
55 FrameSizeClass::frameSize() const |
|
56 { |
|
57 MOZ_ASSUME_UNREACHABLE("x64 does not use frame size classes"); |
|
58 } |
|
59 |
|
60 bool |
|
61 CodeGeneratorX64::visitValue(LValue *value) |
|
62 { |
|
63 LDefinition *reg = value->getDef(0); |
|
64 masm.moveValue(value->value(), ToRegister(reg)); |
|
65 return true; |
|
66 } |
|
67 |
|
68 bool |
|
69 CodeGeneratorX64::visitBox(LBox *box) |
|
70 { |
|
71 const LAllocation *in = box->getOperand(0); |
|
72 const LDefinition *result = box->getDef(0); |
|
73 |
|
74 if (IsFloatingPointType(box->type())) { |
|
75 FloatRegister reg = ToFloatRegister(in); |
|
76 if (box->type() == MIRType_Float32) { |
|
77 masm.convertFloat32ToDouble(reg, ScratchFloatReg); |
|
78 reg = ScratchFloatReg; |
|
79 } |
|
80 masm.movq(reg, ToRegister(result)); |
|
81 } else { |
|
82 masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result)); |
|
83 } |
|
84 return true; |
|
85 } |
|
86 |
|
87 bool |
|
88 CodeGeneratorX64::visitUnbox(LUnbox *unbox) |
|
89 { |
|
90 const ValueOperand value = ToValue(unbox, LUnbox::Input); |
|
91 const LDefinition *result = unbox->output(); |
|
92 MUnbox *mir = unbox->mir(); |
|
93 |
|
94 if (mir->fallible()) { |
|
95 Assembler::Condition cond; |
|
96 switch (mir->type()) { |
|
97 case MIRType_Int32: |
|
98 cond = masm.testInt32(Assembler::NotEqual, value); |
|
99 break; |
|
100 case MIRType_Boolean: |
|
101 cond = masm.testBoolean(Assembler::NotEqual, value); |
|
102 break; |
|
103 case MIRType_Object: |
|
104 cond = masm.testObject(Assembler::NotEqual, value); |
|
105 break; |
|
106 case MIRType_String: |
|
107 cond = masm.testString(Assembler::NotEqual, value); |
|
108 break; |
|
109 default: |
|
110 MOZ_ASSUME_UNREACHABLE("Given MIRType cannot be unboxed."); |
|
111 } |
|
112 if (!bailoutIf(cond, unbox->snapshot())) |
|
113 return false; |
|
114 } |
|
115 |
|
116 switch (mir->type()) { |
|
117 case MIRType_Int32: |
|
118 masm.unboxInt32(value, ToRegister(result)); |
|
119 break; |
|
120 case MIRType_Boolean: |
|
121 masm.unboxBoolean(value, ToRegister(result)); |
|
122 break; |
|
123 case MIRType_Object: |
|
124 masm.unboxObject(value, ToRegister(result)); |
|
125 break; |
|
126 case MIRType_String: |
|
127 masm.unboxString(value, ToRegister(result)); |
|
128 break; |
|
129 default: |
|
130 MOZ_ASSUME_UNREACHABLE("Given MIRType cannot be unboxed."); |
|
131 } |
|
132 |
|
133 return true; |
|
134 } |
|
135 |
|
136 bool |
|
137 CodeGeneratorX64::visitLoadSlotV(LLoadSlotV *load) |
|
138 { |
|
139 ValueOperand dest = ToOutValue(load); |
|
140 Register base = ToRegister(load->input()); |
|
141 int32_t offset = load->mir()->slot() * sizeof(js::Value); |
|
142 |
|
143 masm.loadValue(Address(base, offset), dest); |
|
144 return true; |
|
145 } |
|
146 |
|
147 void |
|
148 CodeGeneratorX64::loadUnboxedValue(Operand source, MIRType type, const LDefinition *dest) |
|
149 { |
|
150 switch (type) { |
|
151 case MIRType_Double: |
|
152 masm.loadInt32OrDouble(source, ToFloatRegister(dest)); |
|
153 break; |
|
154 |
|
155 case MIRType_Object: |
|
156 case MIRType_String: |
|
157 masm.unboxObject(source, ToRegister(dest)); |
|
158 break; |
|
159 |
|
160 case MIRType_Int32: |
|
161 case MIRType_Boolean: |
|
162 masm.movl(source, ToRegister(dest)); |
|
163 break; |
|
164 |
|
165 default: |
|
166 MOZ_ASSUME_UNREACHABLE("unexpected type"); |
|
167 } |
|
168 } |
|
169 |
|
170 bool |
|
171 CodeGeneratorX64::visitLoadSlotT(LLoadSlotT *load) |
|
172 { |
|
173 Register base = ToRegister(load->input()); |
|
174 int32_t offset = load->mir()->slot() * sizeof(js::Value); |
|
175 |
|
176 loadUnboxedValue(Operand(base, offset), load->mir()->type(), load->output()); |
|
177 |
|
178 return true; |
|
179 } |
|
180 |
|
181 void |
|
182 CodeGeneratorX64::storeUnboxedValue(const LAllocation *value, MIRType valueType, |
|
183 Operand dest, MIRType slotType) |
|
184 { |
|
185 if (valueType == MIRType_Double) { |
|
186 masm.storeDouble(ToFloatRegister(value), dest); |
|
187 return; |
|
188 } |
|
189 |
|
190 // For known integers and booleans, we can just store the unboxed value if |
|
191 // the slot has the same type. |
|
192 if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) { |
|
193 if (value->isConstant()) { |
|
194 Value val = *value->toConstant(); |
|
195 if (valueType == MIRType_Int32) |
|
196 masm.movl(Imm32(val.toInt32()), dest); |
|
197 else |
|
198 masm.movl(Imm32(val.toBoolean() ? 1 : 0), dest); |
|
199 } else { |
|
200 masm.movl(ToRegister(value), dest); |
|
201 } |
|
202 return; |
|
203 } |
|
204 |
|
205 if (value->isConstant()) { |
|
206 masm.moveValue(*value->toConstant(), ScratchReg); |
|
207 masm.movq(ScratchReg, dest); |
|
208 } else { |
|
209 masm.storeValue(ValueTypeFromMIRType(valueType), ToRegister(value), dest); |
|
210 } |
|
211 } |
|
212 |
|
213 bool |
|
214 CodeGeneratorX64::visitStoreSlotT(LStoreSlotT *store) |
|
215 { |
|
216 Register base = ToRegister(store->slots()); |
|
217 int32_t offset = store->mir()->slot() * sizeof(js::Value); |
|
218 |
|
219 const LAllocation *value = store->value(); |
|
220 MIRType valueType = store->mir()->value()->type(); |
|
221 MIRType slotType = store->mir()->slotType(); |
|
222 |
|
223 if (store->mir()->needsBarrier()) |
|
224 emitPreBarrier(Address(base, offset), slotType); |
|
225 |
|
226 storeUnboxedValue(value, valueType, Operand(base, offset), slotType); |
|
227 return true; |
|
228 } |
|
229 |
|
230 bool |
|
231 CodeGeneratorX64::visitLoadElementT(LLoadElementT *load) |
|
232 { |
|
233 Operand source = createArrayElementOperand(ToRegister(load->elements()), load->index()); |
|
234 |
|
235 if (load->mir()->loadDoubles()) { |
|
236 FloatRegister fpreg = ToFloatRegister(load->output()); |
|
237 if (source.kind() == Operand::MEM_REG_DISP) |
|
238 masm.loadDouble(source.toAddress(), fpreg); |
|
239 else |
|
240 masm.loadDouble(source.toBaseIndex(), fpreg); |
|
241 } else { |
|
242 loadUnboxedValue(source, load->mir()->type(), load->output()); |
|
243 } |
|
244 |
|
245 JS_ASSERT(!load->mir()->needsHoleCheck()); |
|
246 return true; |
|
247 } |
|
248 |
|
249 |
|
250 void |
|
251 CodeGeneratorX64::storeElementTyped(const LAllocation *value, MIRType valueType, MIRType elementType, |
|
252 const Register &elements, const LAllocation *index) |
|
253 { |
|
254 Operand dest = createArrayElementOperand(elements, index); |
|
255 storeUnboxedValue(value, valueType, dest, elementType); |
|
256 } |
|
257 |
|
258 bool |
|
259 CodeGeneratorX64::visitImplicitThis(LImplicitThis *lir) |
|
260 { |
|
261 Register callee = ToRegister(lir->callee()); |
|
262 |
|
263 // The implicit |this| is always |undefined| if the function's environment |
|
264 // is the current global. |
|
265 GlobalObject *global = &gen->info().script()->global(); |
|
266 masm.cmpPtr(Operand(callee, JSFunction::offsetOfEnvironment()), ImmGCPtr(global)); |
|
267 |
|
268 // TODO: OOL stub path. |
|
269 if (!bailoutIf(Assembler::NotEqual, lir->snapshot())) |
|
270 return false; |
|
271 |
|
272 masm.moveValue(UndefinedValue(), ToOutValue(lir)); |
|
273 return true; |
|
274 } |
|
275 |
|
276 bool |
|
277 CodeGeneratorX64::visitInterruptCheck(LInterruptCheck *lir) |
|
278 { |
|
279 OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing()); |
|
280 if (!ool) |
|
281 return false; |
|
282 |
|
283 masm.branch32(Assembler::NotEqual, |
|
284 AbsoluteAddress(GetIonContext()->runtime->addressOfInterrupt()), Imm32(0), |
|
285 ool->entry()); |
|
286 masm.bind(ool->rejoin()); |
|
287 return true; |
|
288 } |
|
289 |
|
290 bool |
|
291 CodeGeneratorX64::visitCompareB(LCompareB *lir) |
|
292 { |
|
293 MCompare *mir = lir->mir(); |
|
294 |
|
295 const ValueOperand lhs = ToValue(lir, LCompareB::Lhs); |
|
296 const LAllocation *rhs = lir->rhs(); |
|
297 const Register output = ToRegister(lir->output()); |
|
298 |
|
299 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); |
|
300 |
|
301 // Load boxed boolean in ScratchReg. |
|
302 if (rhs->isConstant()) |
|
303 masm.moveValue(*rhs->toConstant(), ScratchReg); |
|
304 else |
|
305 masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchReg); |
|
306 |
|
307 // Perform the comparison. |
|
308 masm.cmpq(lhs.valueReg(), ScratchReg); |
|
309 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output); |
|
310 return true; |
|
311 } |
|
312 |
|
313 bool |
|
314 CodeGeneratorX64::visitCompareBAndBranch(LCompareBAndBranch *lir) |
|
315 { |
|
316 MCompare *mir = lir->cmpMir(); |
|
317 |
|
318 const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs); |
|
319 const LAllocation *rhs = lir->rhs(); |
|
320 |
|
321 JS_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); |
|
322 |
|
323 // Load boxed boolean in ScratchReg. |
|
324 if (rhs->isConstant()) |
|
325 masm.moveValue(*rhs->toConstant(), ScratchReg); |
|
326 else |
|
327 masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchReg); |
|
328 |
|
329 // Perform the comparison. |
|
330 masm.cmpq(lhs.valueReg(), ScratchReg); |
|
331 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse()); |
|
332 return true; |
|
333 } |
|
334 bool |
|
335 CodeGeneratorX64::visitCompareV(LCompareV *lir) |
|
336 { |
|
337 MCompare *mir = lir->mir(); |
|
338 const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput); |
|
339 const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput); |
|
340 const Register output = ToRegister(lir->output()); |
|
341 |
|
342 JS_ASSERT(IsEqualityOp(mir->jsop())); |
|
343 |
|
344 masm.cmpq(lhs.valueReg(), rhs.valueReg()); |
|
345 masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output); |
|
346 return true; |
|
347 } |
|
348 |
|
349 bool |
|
350 CodeGeneratorX64::visitCompareVAndBranch(LCompareVAndBranch *lir) |
|
351 { |
|
352 MCompare *mir = lir->cmpMir(); |
|
353 |
|
354 const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput); |
|
355 const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput); |
|
356 |
|
357 JS_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || |
|
358 mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); |
|
359 |
|
360 masm.cmpq(lhs.valueReg(), rhs.valueReg()); |
|
361 emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse()); |
|
362 return true; |
|
363 } |
|
364 |
|
365 bool |
|
366 CodeGeneratorX64::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir) |
|
367 { |
|
368 masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); |
|
369 return true; |
|
370 } |
|
371 |
|
372 bool |
|
373 CodeGeneratorX64::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir) |
|
374 { |
|
375 masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); |
|
376 return true; |
|
377 } |
|
378 |
|
379 bool |
|
380 CodeGeneratorX64::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins) |
|
381 { |
|
382 MOZ_ASSUME_UNREACHABLE("NYI"); |
|
383 } |
|
384 |
|
385 bool |
|
386 CodeGeneratorX64::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins) |
|
387 { |
|
388 MOZ_ASSUME_UNREACHABLE("NYI"); |
|
389 } |
|
390 |
|
391 bool |
|
392 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins) |
|
393 { |
|
394 MAsmJSLoadHeap *mir = ins->mir(); |
|
395 ArrayBufferView::ViewType vt = mir->viewType(); |
|
396 const LAllocation *ptr = ins->ptr(); |
|
397 |
|
398 // No need to note the access if it will never fault. |
|
399 bool skipNote = mir->skipBoundsCheck(); |
|
400 Operand srcAddr(HeapReg); |
|
401 |
|
402 if (ptr->isConstant()) { |
|
403 int32_t ptrImm = ptr->toConstant()->toInt32(); |
|
404 // Note only a positive index is accepted here because a negative offset would |
|
405 // not wrap back into the protected area reserved for the heap. |
|
406 JS_ASSERT(ptrImm >= 0); |
|
407 srcAddr = Operand(HeapReg, ptrImm); |
|
408 } else { |
|
409 srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne); |
|
410 } |
|
411 |
|
412 uint32_t before = masm.size(); |
|
413 switch (vt) { |
|
414 case ArrayBufferView::TYPE_INT8: masm.movsbl(srcAddr, ToRegister(ins->output())); break; |
|
415 case ArrayBufferView::TYPE_UINT8: masm.movzbl(srcAddr, ToRegister(ins->output())); break; |
|
416 case ArrayBufferView::TYPE_INT16: masm.movswl(srcAddr, ToRegister(ins->output())); break; |
|
417 case ArrayBufferView::TYPE_UINT16: masm.movzwl(srcAddr, ToRegister(ins->output())); break; |
|
418 case ArrayBufferView::TYPE_INT32: |
|
419 case ArrayBufferView::TYPE_UINT32: masm.movl(srcAddr, ToRegister(ins->output())); break; |
|
420 case ArrayBufferView::TYPE_FLOAT32: masm.loadFloat32(srcAddr, ToFloatRegister(ins->output())); break; |
|
421 case ArrayBufferView::TYPE_FLOAT64: masm.loadDouble(srcAddr, ToFloatRegister(ins->output())); break; |
|
422 default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); |
|
423 } |
|
424 uint32_t after = masm.size(); |
|
425 return skipNote || masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(ins->output()))); |
|
426 } |
|
427 |
|
428 bool |
|
429 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins) |
|
430 { |
|
431 MAsmJSStoreHeap *mir = ins->mir(); |
|
432 ArrayBufferView::ViewType vt = mir->viewType(); |
|
433 const LAllocation *ptr = ins->ptr(); |
|
434 // No need to note the access if it will never fault. |
|
435 bool skipNote = mir->skipBoundsCheck(); |
|
436 Operand dstAddr(HeapReg); |
|
437 |
|
438 if (ptr->isConstant()) { |
|
439 int32_t ptrImm = ptr->toConstant()->toInt32(); |
|
440 // Note only a positive index is accepted here because a negative offset would |
|
441 // not wrap back into the protected area reserved for the heap. |
|
442 JS_ASSERT(ptrImm >= 0); |
|
443 dstAddr = Operand(HeapReg, ptrImm); |
|
444 } else { |
|
445 dstAddr = Operand(HeapReg, ToRegister(ins->ptr()), TimesOne); |
|
446 } |
|
447 |
|
448 uint32_t before = masm.size(); |
|
449 if (ins->value()->isConstant()) { |
|
450 switch (vt) { |
|
451 case ArrayBufferView::TYPE_INT8: |
|
452 case ArrayBufferView::TYPE_UINT8: masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break; |
|
453 case ArrayBufferView::TYPE_INT16: |
|
454 case ArrayBufferView::TYPE_UINT16: masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break; |
|
455 case ArrayBufferView::TYPE_INT32: |
|
456 case ArrayBufferView::TYPE_UINT32: masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break; |
|
457 default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); |
|
458 } |
|
459 } else { |
|
460 switch (vt) { |
|
461 case ArrayBufferView::TYPE_INT8: |
|
462 case ArrayBufferView::TYPE_UINT8: masm.movb(ToRegister(ins->value()), dstAddr); break; |
|
463 case ArrayBufferView::TYPE_INT16: |
|
464 case ArrayBufferView::TYPE_UINT16: masm.movw(ToRegister(ins->value()), dstAddr); break; |
|
465 case ArrayBufferView::TYPE_INT32: |
|
466 case ArrayBufferView::TYPE_UINT32: masm.movl(ToRegister(ins->value()), dstAddr); break; |
|
467 case ArrayBufferView::TYPE_FLOAT32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break; |
|
468 case ArrayBufferView::TYPE_FLOAT64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break; |
|
469 default: MOZ_ASSUME_UNREACHABLE("unexpected array type"); |
|
470 } |
|
471 } |
|
472 uint32_t after = masm.size(); |
|
473 return skipNote || masm.append(AsmJSHeapAccess(before, after)); |
|
474 } |
|
475 |
|
476 bool |
|
477 CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins) |
|
478 { |
|
479 MAsmJSLoadGlobalVar *mir = ins->mir(); |
|
480 |
|
481 CodeOffsetLabel label; |
|
482 if (mir->type() == MIRType_Int32) |
|
483 label = masm.loadRipRelativeInt32(ToRegister(ins->output())); |
|
484 else |
|
485 label = masm.loadRipRelativeDouble(ToFloatRegister(ins->output())); |
|
486 |
|
487 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset())); |
|
488 } |
|
489 |
|
490 bool |
|
491 CodeGeneratorX64::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins) |
|
492 { |
|
493 MAsmJSStoreGlobalVar *mir = ins->mir(); |
|
494 |
|
495 MIRType type = mir->value()->type(); |
|
496 JS_ASSERT(IsNumberType(type)); |
|
497 |
|
498 CodeOffsetLabel label; |
|
499 if (type == MIRType_Int32) |
|
500 label = masm.storeRipRelativeInt32(ToRegister(ins->value())); |
|
501 else |
|
502 label = masm.storeRipRelativeDouble(ToFloatRegister(ins->value())); |
|
503 |
|
504 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset())); |
|
505 } |
|
506 |
|
507 bool |
|
508 CodeGeneratorX64::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins) |
|
509 { |
|
510 MAsmJSLoadFuncPtr *mir = ins->mir(); |
|
511 |
|
512 Register index = ToRegister(ins->index()); |
|
513 Register tmp = ToRegister(ins->temp()); |
|
514 Register out = ToRegister(ins->output()); |
|
515 |
|
516 CodeOffsetLabel label = masm.leaRipRelative(tmp); |
|
517 masm.loadPtr(Operand(tmp, index, TimesEight, 0), out); |
|
518 |
|
519 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset())); |
|
520 } |
|
521 |
|
522 bool |
|
523 CodeGeneratorX64::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins) |
|
524 { |
|
525 MAsmJSLoadFFIFunc *mir = ins->mir(); |
|
526 |
|
527 CodeOffsetLabel label = masm.loadRipRelativeInt64(ToRegister(ins->output())); |
|
528 |
|
529 return masm.append(AsmJSGlobalAccess(label.offset(), mir->globalDataOffset())); |
|
530 } |
|
531 |
|
532 void |
|
533 DispatchIonCache::initializeAddCacheState(LInstruction *ins, AddCacheState *addState) |
|
534 { |
|
535 // Can always use the scratch register on x64. |
|
536 addState->dispatchScratch = ScratchReg; |
|
537 } |
|
538 |
|
539 bool |
|
540 CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32 *ins) |
|
541 { |
|
542 FloatRegister input = ToFloatRegister(ins->input()); |
|
543 Register output = ToRegister(ins->output()); |
|
544 |
|
545 // On x64, branchTruncateDouble uses cvttsd2sq. Unlike the x86 |
|
546 // implementation, this should handle most doubles and we can just |
|
547 // call a stub if it fails. |
|
548 return emitTruncateDouble(input, output); |
|
549 } |
|
550 |
|
551 bool |
|
552 CodeGeneratorX64::visitTruncateFToInt32(LTruncateFToInt32 *ins) |
|
553 { |
|
554 FloatRegister input = ToFloatRegister(ins->input()); |
|
555 Register output = ToRegister(ins->output()); |
|
556 |
|
557 // On x64, branchTruncateFloat32 uses cvttss2sq. Unlike the x86 |
|
558 // implementation, this should handle most floats and we can just |
|
559 // call a stub if it fails. |
|
560 return emitTruncateFloat32(input, output); |
|
561 } |