|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #ifndef jit_IonMacroAssembler_h |
|
8 #define jit_IonMacroAssembler_h |
|
9 |
|
10 #ifdef JS_ION |
|
11 |
|
12 #include "jscompartment.h" |
|
13 |
|
14 #if defined(JS_CODEGEN_X86) |
|
15 # include "jit/x86/MacroAssembler-x86.h" |
|
16 #elif defined(JS_CODEGEN_X64) |
|
17 # include "jit/x64/MacroAssembler-x64.h" |
|
18 #elif defined(JS_CODEGEN_ARM) |
|
19 # include "jit/arm/MacroAssembler-arm.h" |
|
20 #elif defined(JS_CODEGEN_MIPS) |
|
21 # include "jit/mips/MacroAssembler-mips.h" |
|
22 #else |
|
23 # error "Unknown architecture!" |
|
24 #endif |
|
25 #include "jit/IonInstrumentation.h" |
|
26 #include "jit/JitCompartment.h" |
|
27 #include "jit/VMFunctions.h" |
|
28 #include "vm/ProxyObject.h" |
|
29 #include "vm/Shape.h" |
|
30 |
|
31 namespace js { |
|
32 namespace jit { |
|
33 |
|
34 // The public entrypoint for emitting assembly. Note that a MacroAssembler can |
|
35 // use cx->lifoAlloc, so take care not to interleave masm use with other |
|
36 // lifoAlloc use if one will be destroyed before the other. |
|
37 class MacroAssembler : public MacroAssemblerSpecific |
|
38 { |
|
39 MacroAssembler *thisFromCtor() { |
|
40 return this; |
|
41 } |
|
42 |
|
43 public: |
|
44 class AutoRooter : public AutoGCRooter |
|
45 { |
|
46 MacroAssembler *masm_; |
|
47 |
|
48 public: |
|
49 AutoRooter(JSContext *cx, MacroAssembler *masm) |
|
50 : AutoGCRooter(cx, IONMASM), |
|
51 masm_(masm) |
|
52 { } |
|
53 |
|
54 MacroAssembler *masm() const { |
|
55 return masm_; |
|
56 } |
|
57 }; |
|
58 |
|
59 /* |
|
60 * Base class for creating a branch. |
|
61 */ |
|
62 class Branch |
|
63 { |
|
64 bool init_; |
|
65 Condition cond_; |
|
66 Label *jump_; |
|
67 Register reg_; |
|
68 |
|
69 public: |
|
70 Branch() |
|
71 : init_(false), |
|
72 cond_(Equal), |
|
73 jump_(nullptr), |
|
74 reg_(Register::FromCode(0)) // Quell compiler warnings. |
|
75 { } |
|
76 |
|
77 Branch(Condition cond, Register reg, Label *jump) |
|
78 : init_(true), |
|
79 cond_(cond), |
|
80 jump_(jump), |
|
81 reg_(reg) |
|
82 { } |
|
83 |
|
84 bool isInitialized() const { |
|
85 return init_; |
|
86 } |
|
87 |
|
88 Condition cond() const { |
|
89 return cond_; |
|
90 } |
|
91 |
|
92 Label *jump() const { |
|
93 return jump_; |
|
94 } |
|
95 |
|
96 Register reg() const { |
|
97 return reg_; |
|
98 } |
|
99 |
|
100 void invertCondition() { |
|
101 cond_ = InvertCondition(cond_); |
|
102 } |
|
103 |
|
104 void relink(Label *jump) { |
|
105 jump_ = jump; |
|
106 } |
|
107 |
|
108 virtual void emit(MacroAssembler &masm) = 0; |
|
109 }; |
|
110 |
|
111 /* |
|
112 * Creates a branch based on a specific types::Type. |
|
113 * Note: emits number test (int/double) for types::Type::DoubleType() |
|
114 */ |
|
115 class BranchType : public Branch |
|
116 { |
|
117 types::Type type_; |
|
118 |
|
119 public: |
|
120 BranchType() |
|
121 : Branch(), |
|
122 type_(types::Type::UnknownType()) |
|
123 { } |
|
124 |
|
125 BranchType(Condition cond, Register reg, types::Type type, Label *jump) |
|
126 : Branch(cond, reg, jump), |
|
127 type_(type) |
|
128 { } |
|
129 |
|
130 void emit(MacroAssembler &masm) { |
|
131 JS_ASSERT(isInitialized()); |
|
132 MIRType mirType = MIRType_None; |
|
133 |
|
134 if (type_.isPrimitive()) { |
|
135 if (type_.isMagicArguments()) |
|
136 mirType = MIRType_MagicOptimizedArguments; |
|
137 else |
|
138 mirType = MIRTypeFromValueType(type_.primitive()); |
|
139 } else if (type_.isAnyObject()) { |
|
140 mirType = MIRType_Object; |
|
141 } else { |
|
142 MOZ_ASSUME_UNREACHABLE("Unknown conversion to mirtype"); |
|
143 } |
|
144 |
|
145 if (mirType == MIRType_Double) |
|
146 masm.branchTestNumber(cond(), reg(), jump()); |
|
147 else |
|
148 masm.branchTestMIRType(cond(), reg(), mirType, jump()); |
|
149 } |
|
150 |
|
151 }; |
|
152 |
|
153 /* |
|
154 * Creates a branch based on a GCPtr. |
|
155 */ |
|
156 class BranchGCPtr : public Branch |
|
157 { |
|
158 ImmGCPtr ptr_; |
|
159 |
|
160 public: |
|
161 BranchGCPtr() |
|
162 : Branch(), |
|
163 ptr_(ImmGCPtr(nullptr)) |
|
164 { } |
|
165 |
|
166 BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label *jump) |
|
167 : Branch(cond, reg, jump), |
|
168 ptr_(ptr) |
|
169 { } |
|
170 |
|
171 void emit(MacroAssembler &masm) { |
|
172 JS_ASSERT(isInitialized()); |
|
173 masm.branchPtr(cond(), reg(), ptr_, jump()); |
|
174 } |
|
175 }; |
|
176 |
|
177 mozilla::Maybe<AutoRooter> autoRooter_; |
|
178 mozilla::Maybe<IonContext> ionContext_; |
|
179 mozilla::Maybe<AutoIonContextAlloc> alloc_; |
|
180 bool enoughMemory_; |
|
181 bool embedsNurseryPointers_; |
|
182 |
|
183 // SPS instrumentation, only used for Ion caches. |
|
184 mozilla::Maybe<IonInstrumentation> spsInstrumentation_; |
|
185 jsbytecode *spsPc_; |
|
186 |
|
187 private: |
|
188 // This field is used to manage profiling instrumentation output. If |
|
189 // provided and enabled, then instrumentation will be emitted around call |
|
190 // sites. The IonInstrumentation instance is hosted inside of |
|
191 // CodeGeneratorShared and is the manager of when instrumentation is |
|
192 // actually emitted or not. If nullptr, then no instrumentation is emitted. |
|
193 IonInstrumentation *sps_; |
|
194 |
|
195 // Labels for handling exceptions and failures. |
|
196 NonAssertingLabel sequentialFailureLabel_; |
|
197 NonAssertingLabel parallelFailureLabel_; |
|
198 |
|
199 public: |
|
200 // If instrumentation should be emitted, then the sps parameter should be |
|
201 // provided, but otherwise it can be safely omitted to prevent all |
|
202 // instrumentation from being emitted. |
|
203 MacroAssembler() |
|
204 : enoughMemory_(true), |
|
205 embedsNurseryPointers_(false), |
|
206 sps_(nullptr) |
|
207 { |
|
208 IonContext *icx = GetIonContext(); |
|
209 JSContext *cx = icx->cx; |
|
210 if (cx) |
|
211 constructRoot(cx); |
|
212 |
|
213 if (!icx->temp) { |
|
214 JS_ASSERT(cx); |
|
215 alloc_.construct(cx); |
|
216 } |
|
217 |
|
218 moveResolver_.setAllocator(*icx->temp); |
|
219 #ifdef JS_CODEGEN_ARM |
|
220 initWithAllocator(); |
|
221 m_buffer.id = icx->getNextAssemblerId(); |
|
222 #endif |
|
223 } |
|
224 |
|
225 // This constructor should only be used when there is no IonContext active |
|
226 // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp). |
|
227 MacroAssembler(JSContext *cx, IonScript *ion = nullptr, |
|
228 JSScript *script = nullptr, jsbytecode *pc = nullptr) |
|
229 : enoughMemory_(true), |
|
230 embedsNurseryPointers_(false), |
|
231 sps_(nullptr) |
|
232 { |
|
233 constructRoot(cx); |
|
234 ionContext_.construct(cx, (js::jit::TempAllocator *)nullptr); |
|
235 alloc_.construct(cx); |
|
236 moveResolver_.setAllocator(*ionContext_.ref().temp); |
|
237 #ifdef JS_CODEGEN_ARM |
|
238 initWithAllocator(); |
|
239 m_buffer.id = GetIonContext()->getNextAssemblerId(); |
|
240 #endif |
|
241 if (ion) { |
|
242 setFramePushed(ion->frameSize()); |
|
243 if (pc && cx->runtime()->spsProfiler.enabled()) { |
|
244 // We have to update the SPS pc when this IC stub calls into |
|
245 // the VM. |
|
246 spsPc_ = pc; |
|
247 spsInstrumentation_.construct(&cx->runtime()->spsProfiler, &spsPc_); |
|
248 sps_ = spsInstrumentation_.addr(); |
|
249 sps_->setPushed(script); |
|
250 } |
|
251 } |
|
252 } |
|
253 |
|
254 // asm.js compilation handles its own IonContet-pushing |
|
255 struct AsmJSToken {}; |
|
256 MacroAssembler(AsmJSToken) |
|
257 : enoughMemory_(true), |
|
258 embedsNurseryPointers_(false), |
|
259 sps_(nullptr) |
|
260 { |
|
261 #ifdef JS_CODEGEN_ARM |
|
262 initWithAllocator(); |
|
263 m_buffer.id = 0; |
|
264 #endif |
|
265 } |
|
266 |
|
267 void setInstrumentation(IonInstrumentation *sps) { |
|
268 sps_ = sps; |
|
269 } |
|
270 |
|
271 void resetForNewCodeGenerator(TempAllocator &alloc) { |
|
272 setFramePushed(0); |
|
273 moveResolver_.clearTempObjectPool(); |
|
274 moveResolver_.setAllocator(alloc); |
|
275 } |
|
276 |
|
277 void constructRoot(JSContext *cx) { |
|
278 autoRooter_.construct(cx, this); |
|
279 } |
|
280 |
|
281 MoveResolver &moveResolver() { |
|
282 return moveResolver_; |
|
283 } |
|
284 |
|
285 size_t instructionsSize() const { |
|
286 return size(); |
|
287 } |
|
288 |
|
289 void propagateOOM(bool success) { |
|
290 enoughMemory_ &= success; |
|
291 } |
|
292 bool oom() const { |
|
293 return !enoughMemory_ || MacroAssemblerSpecific::oom(); |
|
294 } |
|
295 |
|
296 bool embedsNurseryPointers() const { |
|
297 return embedsNurseryPointers_; |
|
298 } |
|
299 |
|
300 // Emits a test of a value against all types in a TypeSet. A scratch |
|
301 // register is required. |
|
302 template <typename Source, typename TypeSet> |
|
303 void guardTypeSet(const Source &address, const TypeSet *types, Register scratch, Label *miss); |
|
304 template <typename TypeSet> |
|
305 void guardObjectType(Register obj, const TypeSet *types, Register scratch, Label *miss); |
|
306 template <typename Source> |
|
307 void guardType(const Source &address, types::Type type, Register scratch, Label *miss); |
|
308 |
|
309 void loadObjShape(Register objReg, Register dest) { |
|
310 loadPtr(Address(objReg, JSObject::offsetOfShape()), dest); |
|
311 } |
|
312 void loadBaseShape(Register objReg, Register dest) { |
|
313 loadPtr(Address(objReg, JSObject::offsetOfShape()), dest); |
|
314 |
|
315 loadPtr(Address(dest, Shape::offsetOfBase()), dest); |
|
316 } |
|
317 void loadObjClass(Register objReg, Register dest) { |
|
318 loadPtr(Address(objReg, JSObject::offsetOfType()), dest); |
|
319 loadPtr(Address(dest, types::TypeObject::offsetOfClasp()), dest); |
|
320 } |
|
321 void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class *clasp, |
|
322 Label *label) { |
|
323 loadPtr(Address(obj, JSObject::offsetOfType()), scratch); |
|
324 branchPtr(cond, Address(scratch, types::TypeObject::offsetOfClasp()), ImmPtr(clasp), label); |
|
325 } |
|
326 void branchTestObjShape(Condition cond, Register obj, const Shape *shape, Label *label) { |
|
327 branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape), label); |
|
328 } |
|
329 void branchTestObjShape(Condition cond, Register obj, Register shape, Label *label) { |
|
330 branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label); |
|
331 } |
|
332 void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch, |
|
333 const void *handlerp, Label *label) { |
|
334 Address handlerAddr(proxy, ProxyObject::offsetOfHandler()); |
|
335 loadPrivate(handlerAddr, scratch); |
|
336 Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily()); |
|
337 branchPtr(cond, familyAddr, ImmPtr(handlerp), label); |
|
338 } |
|
339 |
|
340 template <typename Value> |
|
341 void branchTestMIRType(Condition cond, const Value &val, MIRType type, Label *label) { |
|
342 switch (type) { |
|
343 case MIRType_Null: return branchTestNull(cond, val, label); |
|
344 case MIRType_Undefined: return branchTestUndefined(cond, val, label); |
|
345 case MIRType_Boolean: return branchTestBoolean(cond, val, label); |
|
346 case MIRType_Int32: return branchTestInt32(cond, val, label); |
|
347 case MIRType_String: return branchTestString(cond, val, label); |
|
348 case MIRType_Object: return branchTestObject(cond, val, label); |
|
349 case MIRType_Double: return branchTestDouble(cond, val, label); |
|
350 case MIRType_MagicOptimizedArguments: // Fall through. |
|
351 case MIRType_MagicIsConstructing: |
|
352 case MIRType_MagicHole: return branchTestMagic(cond, val, label); |
|
353 default: |
|
354 MOZ_ASSUME_UNREACHABLE("Bad MIRType"); |
|
355 } |
|
356 } |
|
357 |
|
358 // Branches to |label| if |reg| is false. |reg| should be a C++ bool. |
|
359 void branchIfFalseBool(Register reg, Label *label) { |
|
360 // Note that C++ bool is only 1 byte, so ignore the higher-order bits. |
|
361 branchTest32(Assembler::Zero, reg, Imm32(0xFF), label); |
|
362 } |
|
363 |
|
364 // Branches to |label| if |reg| is true. |reg| should be a C++ bool. |
|
365 void branchIfTrueBool(Register reg, Label *label) { |
|
366 // Note that C++ bool is only 1 byte, so ignore the higher-order bits. |
|
367 branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label); |
|
368 } |
|
369 |
|
370 void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) { |
|
371 loadPtr(Address(obj, JSObject::getPrivateDataOffset(nfixed)), dest); |
|
372 } |
|
373 |
|
374 void loadObjProto(Register obj, Register dest) { |
|
375 loadPtr(Address(obj, JSObject::offsetOfType()), dest); |
|
376 loadPtr(Address(dest, types::TypeObject::offsetOfProto()), dest); |
|
377 } |
|
378 |
|
379 void loadStringLength(Register str, Register dest) { |
|
380 loadPtr(Address(str, JSString::offsetOfLengthAndFlags()), dest); |
|
381 rshiftPtr(Imm32(JSString::LENGTH_SHIFT), dest); |
|
382 } |
|
383 |
|
384 void loadSliceBounds(Register worker, Register dest) { |
|
385 loadPtr(Address(worker, ThreadPoolWorker::offsetOfSliceBounds()), dest); |
|
386 } |
|
387 |
|
388 void loadJSContext(Register dest) { |
|
389 loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfJSContext()), dest); |
|
390 } |
|
391 void loadJitActivation(Register dest) { |
|
392 loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfActivation()), dest); |
|
393 } |
|
394 |
|
395 template<typename T> |
|
396 void loadTypedOrValue(const T &src, TypedOrValueRegister dest) { |
|
397 if (dest.hasValue()) |
|
398 loadValue(src, dest.valueReg()); |
|
399 else |
|
400 loadUnboxedValue(src, dest.type(), dest.typedReg()); |
|
401 } |
|
402 |
|
403 template<typename T> |
|
404 void loadElementTypedOrValue(const T &src, TypedOrValueRegister dest, bool holeCheck, |
|
405 Label *hole) { |
|
406 if (dest.hasValue()) { |
|
407 loadValue(src, dest.valueReg()); |
|
408 if (holeCheck) |
|
409 branchTestMagic(Assembler::Equal, dest.valueReg(), hole); |
|
410 } else { |
|
411 if (holeCheck) |
|
412 branchTestMagic(Assembler::Equal, src, hole); |
|
413 loadUnboxedValue(src, dest.type(), dest.typedReg()); |
|
414 } |
|
415 } |
|
416 |
|
417 template <typename T> |
|
418 void storeTypedOrValue(TypedOrValueRegister src, const T &dest) { |
|
419 if (src.hasValue()) { |
|
420 storeValue(src.valueReg(), dest); |
|
421 } else if (IsFloatingPointType(src.type())) { |
|
422 FloatRegister reg = src.typedReg().fpu(); |
|
423 if (src.type() == MIRType_Float32) { |
|
424 convertFloat32ToDouble(reg, ScratchFloatReg); |
|
425 reg = ScratchFloatReg; |
|
426 } |
|
427 storeDouble(reg, dest); |
|
428 } else { |
|
429 storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest); |
|
430 } |
|
431 } |
|
432 |
|
433 template <typename T> |
|
434 void storeConstantOrRegister(ConstantOrRegister src, const T &dest) { |
|
435 if (src.constant()) |
|
436 storeValue(src.value(), dest); |
|
437 else |
|
438 storeTypedOrValue(src.reg(), dest); |
|
439 } |
|
440 |
|
441 void storeCallResult(Register reg) { |
|
442 if (reg != ReturnReg) |
|
443 mov(ReturnReg, reg); |
|
444 } |
|
445 |
|
446 void storeCallFloatResult(const FloatRegister ®) { |
|
447 if (reg != ReturnFloatReg) |
|
448 moveDouble(ReturnFloatReg, reg); |
|
449 } |
|
450 |
|
451 void storeCallResultValue(AnyRegister dest) { |
|
452 #if defined(JS_NUNBOX32) |
|
453 unboxValue(ValueOperand(JSReturnReg_Type, JSReturnReg_Data), dest); |
|
454 #elif defined(JS_PUNBOX64) |
|
455 unboxValue(ValueOperand(JSReturnReg), dest); |
|
456 #else |
|
457 #error "Bad architecture" |
|
458 #endif |
|
459 } |
|
460 |
|
461 void storeCallResultValue(ValueOperand dest) { |
|
462 #if defined(JS_NUNBOX32) |
|
463 // reshuffle the return registers used for a call result to store into |
|
464 // dest, using ReturnReg as a scratch register if necessary. This must |
|
465 // only be called after returning from a call, at a point when the |
|
466 // return register is not live. XXX would be better to allow wrappers |
|
467 // to store the return value to different places. |
|
468 if (dest.typeReg() == JSReturnReg_Data) { |
|
469 if (dest.payloadReg() == JSReturnReg_Type) { |
|
470 // swap the two registers. |
|
471 mov(JSReturnReg_Type, ReturnReg); |
|
472 mov(JSReturnReg_Data, JSReturnReg_Type); |
|
473 mov(ReturnReg, JSReturnReg_Data); |
|
474 } else { |
|
475 mov(JSReturnReg_Data, dest.payloadReg()); |
|
476 mov(JSReturnReg_Type, dest.typeReg()); |
|
477 } |
|
478 } else { |
|
479 mov(JSReturnReg_Type, dest.typeReg()); |
|
480 mov(JSReturnReg_Data, dest.payloadReg()); |
|
481 } |
|
482 #elif defined(JS_PUNBOX64) |
|
483 if (dest.valueReg() != JSReturnReg) |
|
484 movq(JSReturnReg, dest.valueReg()); |
|
485 #else |
|
486 #error "Bad architecture" |
|
487 #endif |
|
488 } |
|
489 |
|
490 void storeCallResultValue(TypedOrValueRegister dest) { |
|
491 if (dest.hasValue()) |
|
492 storeCallResultValue(dest.valueReg()); |
|
493 else |
|
494 storeCallResultValue(dest.typedReg()); |
|
495 } |
|
496 |
|
497 template <typename T> |
|
498 Register extractString(const T &source, Register scratch) { |
|
499 return extractObject(source, scratch); |
|
500 } |
|
501 |
|
502 void PushRegsInMask(RegisterSet set); |
|
503 void PushRegsInMask(GeneralRegisterSet set) { |
|
504 PushRegsInMask(RegisterSet(set, FloatRegisterSet())); |
|
505 } |
|
506 void PopRegsInMask(RegisterSet set) { |
|
507 PopRegsInMaskIgnore(set, RegisterSet()); |
|
508 } |
|
509 void PopRegsInMask(GeneralRegisterSet set) { |
|
510 PopRegsInMask(RegisterSet(set, FloatRegisterSet())); |
|
511 } |
|
512 void PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore); |
|
513 |
|
514 void branchIfFunctionHasNoScript(Register fun, Label *label) { |
|
515 // 16-bit loads are slow and unaligned 32-bit loads may be too so |
|
516 // perform an aligned 32-bit load and adjust the bitmask accordingly. |
|
517 JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0); |
|
518 JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2); |
|
519 JS_STATIC_ASSERT(IS_LITTLE_ENDIAN); |
|
520 Address address(fun, JSFunction::offsetOfNargs()); |
|
521 uint32_t bit = JSFunction::INTERPRETED << 16; |
|
522 branchTest32(Assembler::Zero, address, Imm32(bit), label); |
|
523 } |
|
524 void branchIfInterpreted(Register fun, Label *label) { |
|
525 // 16-bit loads are slow and unaligned 32-bit loads may be too so |
|
526 // perform an aligned 32-bit load and adjust the bitmask accordingly. |
|
527 JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0); |
|
528 JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2); |
|
529 JS_STATIC_ASSERT(IS_LITTLE_ENDIAN); |
|
530 Address address(fun, JSFunction::offsetOfNargs()); |
|
531 uint32_t bit = JSFunction::INTERPRETED << 16; |
|
532 branchTest32(Assembler::NonZero, address, Imm32(bit), label); |
|
533 } |
|
534 |
|
535 void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label); |
|
536 |
|
537 using MacroAssemblerSpecific::Push; |
|
538 using MacroAssemblerSpecific::Pop; |
|
539 |
|
540 void Push(jsid id, Register scratchReg) { |
|
541 if (JSID_IS_GCTHING(id)) { |
|
542 // If we're pushing a gcthing, then we can't just push the tagged jsid |
|
543 // value since the GC won't have any idea that the push instruction |
|
544 // carries a reference to a gcthing. Need to unpack the pointer, |
|
545 // push it using ImmGCPtr, and then rematerialize the id at runtime. |
|
546 |
|
547 // double-checking this here to ensure we don't lose sync |
|
548 // with implementation of JSID_IS_GCTHING. |
|
549 if (JSID_IS_OBJECT(id)) { |
|
550 JSObject *obj = JSID_TO_OBJECT(id); |
|
551 movePtr(ImmGCPtr(obj), scratchReg); |
|
552 JS_ASSERT(((size_t)obj & JSID_TYPE_MASK) == 0); |
|
553 orPtr(Imm32(JSID_TYPE_OBJECT), scratchReg); |
|
554 Push(scratchReg); |
|
555 } else { |
|
556 JSString *str = JSID_TO_STRING(id); |
|
557 JS_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0); |
|
558 JS_ASSERT(JSID_TYPE_STRING == 0x0); |
|
559 Push(ImmGCPtr(str)); |
|
560 } |
|
561 } else { |
|
562 Push(ImmWord(JSID_BITS(id))); |
|
563 } |
|
564 } |
|
565 |
|
566 void Push(TypedOrValueRegister v) { |
|
567 if (v.hasValue()) { |
|
568 Push(v.valueReg()); |
|
569 } else if (IsFloatingPointType(v.type())) { |
|
570 FloatRegister reg = v.typedReg().fpu(); |
|
571 if (v.type() == MIRType_Float32) { |
|
572 convertFloat32ToDouble(reg, ScratchFloatReg); |
|
573 reg = ScratchFloatReg; |
|
574 } |
|
575 Push(reg); |
|
576 } else { |
|
577 Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr()); |
|
578 } |
|
579 } |
|
580 |
|
581 void Push(ConstantOrRegister v) { |
|
582 if (v.constant()) |
|
583 Push(v.value()); |
|
584 else |
|
585 Push(v.reg()); |
|
586 } |
|
587 |
|
588 void Push(const ValueOperand &val) { |
|
589 pushValue(val); |
|
590 framePushed_ += sizeof(Value); |
|
591 } |
|
592 |
|
593 void Push(const Value &val) { |
|
594 pushValue(val); |
|
595 framePushed_ += sizeof(Value); |
|
596 } |
|
597 |
|
598 void Push(JSValueType type, Register reg) { |
|
599 pushValue(type, reg); |
|
600 framePushed_ += sizeof(Value); |
|
601 } |
|
602 |
|
603 void PushValue(const Address &addr) { |
|
604 JS_ASSERT(addr.base != StackPointer); |
|
605 pushValue(addr); |
|
606 framePushed_ += sizeof(Value); |
|
607 } |
|
608 |
|
609 void PushEmptyRooted(VMFunction::RootType rootType); |
|
610 void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand &valueReg); |
|
611 |
|
612 void adjustStack(int amount) { |
|
613 if (amount > 0) |
|
614 freeStack(amount); |
|
615 else if (amount < 0) |
|
616 reserveStack(-amount); |
|
617 } |
|
618 |
|
619 void bumpKey(Int32Key *key, int diff) { |
|
620 if (key->isRegister()) |
|
621 add32(Imm32(diff), key->reg()); |
|
622 else |
|
623 key->bumpConstant(diff); |
|
624 } |
|
625 |
|
626 void storeKey(const Int32Key &key, const Address &dest) { |
|
627 if (key.isRegister()) |
|
628 store32(key.reg(), dest); |
|
629 else |
|
630 store32(Imm32(key.constant()), dest); |
|
631 } |
|
632 |
|
633 template<typename T> |
|
634 void branchKey(Condition cond, const T &length, const Int32Key &key, Label *label) { |
|
635 if (key.isRegister()) |
|
636 branch32(cond, length, key.reg(), label); |
|
637 else |
|
638 branch32(cond, length, Imm32(key.constant()), label); |
|
639 } |
|
640 |
|
641 void branchTestNeedsBarrier(Condition cond, Register scratch, Label *label) { |
|
642 JS_ASSERT(cond == Zero || cond == NonZero); |
|
643 CompileZone *zone = GetIonContext()->compartment->zone(); |
|
644 movePtr(ImmPtr(zone->addressOfNeedsBarrier()), scratch); |
|
645 Address needsBarrierAddr(scratch, 0); |
|
646 branchTest32(cond, needsBarrierAddr, Imm32(0x1), label); |
|
647 } |
|
648 |
|
649 template <typename T> |
|
650 void callPreBarrier(const T &address, MIRType type) { |
|
651 JS_ASSERT(type == MIRType_Value || |
|
652 type == MIRType_String || |
|
653 type == MIRType_Object || |
|
654 type == MIRType_Shape); |
|
655 Label done; |
|
656 |
|
657 if (type == MIRType_Value) |
|
658 branchTestGCThing(Assembler::NotEqual, address, &done); |
|
659 |
|
660 Push(PreBarrierReg); |
|
661 computeEffectiveAddress(address, PreBarrierReg); |
|
662 |
|
663 const JitRuntime *rt = GetIonContext()->runtime->jitRuntime(); |
|
664 JitCode *preBarrier = (type == MIRType_Shape) |
|
665 ? rt->shapePreBarrier() |
|
666 : rt->valuePreBarrier(); |
|
667 |
|
668 call(preBarrier); |
|
669 Pop(PreBarrierReg); |
|
670 |
|
671 bind(&done); |
|
672 } |
|
673 |
|
674 template <typename T> |
|
675 void patchableCallPreBarrier(const T &address, MIRType type) { |
|
676 JS_ASSERT(type == MIRType_Value || |
|
677 type == MIRType_String || |
|
678 type == MIRType_Object || |
|
679 type == MIRType_Shape); |
|
680 |
|
681 Label done; |
|
682 |
|
683 // All barriers are off by default. |
|
684 // They are enabled if necessary at the end of CodeGenerator::generate(). |
|
685 CodeOffsetLabel nopJump = toggledJump(&done); |
|
686 writePrebarrierOffset(nopJump); |
|
687 |
|
688 callPreBarrier(address, type); |
|
689 jump(&done); |
|
690 |
|
691 align(8); |
|
692 bind(&done); |
|
693 } |
|
694 |
|
695 void branchNurseryPtr(Condition cond, const Address &ptr1, const ImmMaybeNurseryPtr &ptr2, |
|
696 Label *label); |
|
697 void moveNurseryPtr(const ImmMaybeNurseryPtr &ptr, Register reg); |
|
698 |
|
699 void canonicalizeDouble(FloatRegister reg) { |
|
700 Label notNaN; |
|
701 branchDouble(DoubleOrdered, reg, reg, ¬NaN); |
|
702 loadConstantDouble(JS::GenericNaN(), reg); |
|
703 bind(¬NaN); |
|
704 } |
|
705 |
|
706 void canonicalizeFloat(FloatRegister reg) { |
|
707 Label notNaN; |
|
708 branchFloat(DoubleOrdered, reg, reg, ¬NaN); |
|
709 loadConstantFloat32(float(JS::GenericNaN()), reg); |
|
710 bind(¬NaN); |
|
711 } |
|
712 |
|
713 template<typename T> |
|
714 void loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp, Label *fail); |
|
715 |
|
716 template<typename T> |
|
717 void loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest, bool allowDouble, |
|
718 Register temp, Label *fail); |
|
719 |
|
720 template<typename S, typename T> |
|
721 void storeToTypedIntArray(int arrayType, const S &value, const T &dest) { |
|
722 switch (arrayType) { |
|
723 case ScalarTypeDescr::TYPE_INT8: |
|
724 case ScalarTypeDescr::TYPE_UINT8: |
|
725 case ScalarTypeDescr::TYPE_UINT8_CLAMPED: |
|
726 store8(value, dest); |
|
727 break; |
|
728 case ScalarTypeDescr::TYPE_INT16: |
|
729 case ScalarTypeDescr::TYPE_UINT16: |
|
730 store16(value, dest); |
|
731 break; |
|
732 case ScalarTypeDescr::TYPE_INT32: |
|
733 case ScalarTypeDescr::TYPE_UINT32: |
|
734 store32(value, dest); |
|
735 break; |
|
736 default: |
|
737 MOZ_ASSUME_UNREACHABLE("Invalid typed array type"); |
|
738 } |
|
739 } |
|
740 |
|
741 void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const BaseIndex &dest); |
|
742 void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const Address &dest); |
|
743 |
|
744 Register extractString(const Address &address, Register scratch) { |
|
745 return extractObject(address, scratch); |
|
746 } |
|
747 Register extractString(const ValueOperand &value, Register scratch) { |
|
748 return extractObject(value, scratch); |
|
749 } |
|
750 |
|
751 using MacroAssemblerSpecific::extractTag; |
|
752 Register extractTag(const TypedOrValueRegister ®, Register scratch) { |
|
753 if (reg.hasValue()) |
|
754 return extractTag(reg.valueReg(), scratch); |
|
755 mov(ImmWord(MIRTypeToTag(reg.type())), scratch); |
|
756 return scratch; |
|
757 } |
|
758 |
|
759 using MacroAssemblerSpecific::extractObject; |
|
760 Register extractObject(const TypedOrValueRegister ®, Register scratch) { |
|
761 if (reg.hasValue()) |
|
762 return extractObject(reg.valueReg(), scratch); |
|
763 JS_ASSERT(reg.type() == MIRType_Object); |
|
764 return reg.typedReg().gpr(); |
|
765 } |
|
766 |
|
767 // Inline version of js_TypedArray_uint8_clamp_double. |
|
768 // This function clobbers the input register. |
|
769 void clampDoubleToUint8(FloatRegister input, Register output); |
|
770 |
|
771 using MacroAssemblerSpecific::ensureDouble; |
|
772 |
|
773 template <typename S> |
|
774 void ensureDouble(const S &source, FloatRegister dest, Label *failure) { |
|
775 Label isDouble, done; |
|
776 branchTestDouble(Assembler::Equal, source, &isDouble); |
|
777 branchTestInt32(Assembler::NotEqual, source, failure); |
|
778 |
|
779 convertInt32ToDouble(source, dest); |
|
780 jump(&done); |
|
781 |
|
782 bind(&isDouble); |
|
783 unboxDouble(source, dest); |
|
784 |
|
785 bind(&done); |
|
786 } |
|
787 |
|
788 // Emit type case branch on tag matching if the type tag in the definition |
|
789 // might actually be that type. |
|
790 void branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag, Label *label); |
|
791 |
|
792 // Inline allocation. |
|
793 void newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail, |
|
794 gc::InitialHeap initialHeap = gc::DefaultHeap); |
|
795 void newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail, |
|
796 gc::InitialHeap initialHeap); |
|
797 void newGCString(Register result, Register temp, Label *fail); |
|
798 void newGCFatInlineString(Register result, Register temp, Label *fail); |
|
799 |
|
800 void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2, |
|
801 gc::AllocKind allocKind, Label *fail); |
|
802 void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2, |
|
803 JSObject *templateObject, Label *fail); |
|
804 void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2, |
|
805 Label *fail); |
|
806 void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2, |
|
807 Label *fail); |
|
808 |
|
809 void copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj, |
|
810 uint32_t start, uint32_t end); |
|
811 void fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj, |
|
812 uint32_t start, uint32_t end); |
|
813 void initGCSlots(Register obj, Register temp, JSObject *templateObj); |
|
814 void initGCThing(Register obj, Register temp, JSObject *templateObj); |
|
815 |
|
816 // Compares two strings for equality based on the JSOP. |
|
817 // This checks for identical pointers, atoms and length and fails for everything else. |
|
818 void compareStrings(JSOp op, Register left, Register right, Register result, |
|
819 Register temp, Label *fail); |
|
820 |
|
821 // Checks the flags that signal that parallel code may need to interrupt or |
|
822 // abort. Branches to fail in that case. |
|
823 void checkInterruptFlagPar(Register tempReg, Label *fail); |
|
824 |
|
825 // If the JitCode that created this assembler needs to transition into the VM, |
|
826 // we want to store the JitCode on the stack in order to mark it during a GC. |
|
827 // This is a reference to a patch location where the JitCode* will be written. |
|
828 private: |
|
829 CodeOffsetLabel exitCodePatch_; |
|
830 |
|
831 public: |
|
832 void enterExitFrame(const VMFunction *f = nullptr) { |
|
833 linkExitFrame(); |
|
834 // Push the ioncode. (Bailout or VM wrapper) |
|
835 exitCodePatch_ = PushWithPatch(ImmWord(-1)); |
|
836 // Push VMFunction pointer, to mark arguments. |
|
837 Push(ImmPtr(f)); |
|
838 } |
|
839 void enterFakeExitFrame(JitCode *codeVal = nullptr) { |
|
840 linkExitFrame(); |
|
841 Push(ImmPtr(codeVal)); |
|
842 Push(ImmPtr(nullptr)); |
|
843 } |
|
844 |
|
845 void loadThreadPool(Register pool) { |
|
846 // JitRuntimes are tied to JSRuntimes and there is one ThreadPool per |
|
847 // JSRuntime, so we can hardcode the ThreadPool address here. |
|
848 movePtr(ImmPtr(GetIonContext()->runtime->addressOfThreadPool()), pool); |
|
849 } |
|
850 |
|
851 void loadForkJoinContext(Register cx, Register scratch); |
|
852 void loadContext(Register cxReg, Register scratch, ExecutionMode executionMode); |
|
853 |
|
854 void enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx, |
|
855 Register scratch); |
|
856 |
|
857 void enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch, |
|
858 ExecutionMode executionMode); |
|
859 |
|
860 void enterFakeParallelExitFrame(Register cx, Register scratch, |
|
861 JitCode *codeVal = nullptr); |
|
862 |
|
863 void enterFakeExitFrame(Register cxReg, Register scratch, |
|
864 ExecutionMode executionMode, |
|
865 JitCode *codeVal = nullptr); |
|
866 |
|
867 void leaveExitFrame() { |
|
868 freeStack(IonExitFooterFrame::Size()); |
|
869 } |
|
870 |
|
871 bool hasEnteredExitFrame() const { |
|
872 return exitCodePatch_.offset() != 0; |
|
873 } |
|
874 |
|
875 void link(JitCode *code) { |
|
876 JS_ASSERT(!oom()); |
|
877 // If this code can transition to C++ code and witness a GC, then we need to store |
|
878 // the JitCode onto the stack in order to GC it correctly. exitCodePatch should |
|
879 // be unset if the code never needed to push its JitCode*. |
|
880 if (hasEnteredExitFrame()) { |
|
881 exitCodePatch_.fixup(this); |
|
882 patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_), |
|
883 ImmPtr(code), |
|
884 ImmPtr((void*)-1)); |
|
885 } |
|
886 |
|
887 } |
|
888 |
|
889 // Generates code used to complete a bailout. |
|
890 void generateBailoutTail(Register scratch, Register bailoutInfo); |
|
891 |
|
892 // These functions exist as small wrappers around sites where execution can |
|
893 // leave the currently running stream of instructions. They exist so that |
|
894 // instrumentation may be put in place around them if necessary and the |
|
895 // instrumentation is enabled. For the functions that return a uint32_t, |
|
896 // they are returning the offset of the assembler just after the call has |
|
897 // been made so that a safepoint can be made at that location. |
|
898 |
|
899 template <typename T> |
|
900 void callWithABINoProfiling(const T &fun, MoveOp::Type result = MoveOp::GENERAL) { |
|
901 MacroAssemblerSpecific::callWithABI(fun, result); |
|
902 } |
|
903 |
|
904 template <typename T> |
|
905 void callWithABI(const T &fun, MoveOp::Type result = MoveOp::GENERAL) { |
|
906 leaveSPSFrame(); |
|
907 callWithABINoProfiling(fun, result); |
|
908 reenterSPSFrame(); |
|
909 } |
|
910 |
|
911 // see above comment for what is returned |
|
912 uint32_t callIon(Register callee) { |
|
913 leaveSPSFrame(); |
|
914 MacroAssemblerSpecific::callIon(callee); |
|
915 uint32_t ret = currentOffset(); |
|
916 reenterSPSFrame(); |
|
917 return ret; |
|
918 } |
|
919 |
|
920 // see above comment for what is returned |
|
921 uint32_t callWithExitFrame(JitCode *target) { |
|
922 leaveSPSFrame(); |
|
923 MacroAssemblerSpecific::callWithExitFrame(target); |
|
924 uint32_t ret = currentOffset(); |
|
925 reenterSPSFrame(); |
|
926 return ret; |
|
927 } |
|
928 |
|
929 // see above comment for what is returned |
|
930 uint32_t callWithExitFrame(JitCode *target, Register dynStack) { |
|
931 leaveSPSFrame(); |
|
932 MacroAssemblerSpecific::callWithExitFrame(target, dynStack); |
|
933 uint32_t ret = currentOffset(); |
|
934 reenterSPSFrame(); |
|
935 return ret; |
|
936 } |
|
937 |
|
938 void branchTestObjectTruthy(bool truthy, Register objReg, Register scratch, |
|
939 Label *slowCheck, Label *checked) |
|
940 { |
|
941 // The branches to out-of-line code here implement a conservative version |
|
942 // of the JSObject::isWrapper test performed in EmulatesUndefined. If none |
|
943 // of the branches are taken, we can check class flags directly. |
|
944 loadObjClass(objReg, scratch); |
|
945 Address flags(scratch, Class::offsetOfFlags()); |
|
946 |
|
947 branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_IS_PROXY), slowCheck); |
|
948 |
|
949 Condition cond = truthy ? Assembler::Zero : Assembler::NonZero; |
|
950 branchTest32(cond, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), checked); |
|
951 } |
|
952 |
|
953 private: |
|
954 // These two functions are helpers used around call sites throughout the |
|
955 // assembler. They are called from the above call wrappers to emit the |
|
956 // necessary instrumentation. |
|
957 void leaveSPSFrame() { |
|
958 if (!sps_ || !sps_->enabled()) |
|
959 return; |
|
960 // No registers are guaranteed to be available, so push/pop a register |
|
961 // so we can use one |
|
962 push(CallTempReg0); |
|
963 sps_->leave(*this, CallTempReg0); |
|
964 pop(CallTempReg0); |
|
965 } |
|
966 |
|
967 void reenterSPSFrame() { |
|
968 if (!sps_ || !sps_->enabled()) |
|
969 return; |
|
970 // Attempt to use a now-free register within a given set, but if the |
|
971 // architecture being built doesn't have an available register, resort |
|
972 // to push/pop |
|
973 GeneralRegisterSet regs(Registers::TempMask & ~Registers::JSCallMask & |
|
974 ~Registers::CallMask); |
|
975 if (regs.empty()) { |
|
976 push(CallTempReg0); |
|
977 sps_->reenter(*this, CallTempReg0); |
|
978 pop(CallTempReg0); |
|
979 } else { |
|
980 sps_->reenter(*this, regs.getAny()); |
|
981 } |
|
982 } |
|
983 |
|
984 void spsProfileEntryAddress(SPSProfiler *p, int offset, Register temp, |
|
985 Label *full) |
|
986 { |
|
987 movePtr(ImmPtr(p->sizePointer()), temp); |
|
988 load32(Address(temp, 0), temp); |
|
989 if (offset != 0) |
|
990 add32(Imm32(offset), temp); |
|
991 branch32(Assembler::GreaterThanOrEqual, temp, Imm32(p->maxSize()), full); |
|
992 |
|
993 // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*))) |
|
994 JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*)); |
|
995 lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp); |
|
996 addPtr(ImmPtr(p->stack()), temp); |
|
997 } |
|
998 |
|
999 // The safe version of the above method refrains from assuming that the fields |
|
1000 // of the SPSProfiler class are going to stay the same across different runs of |
|
1001 // the jitcode. Ion can use the more efficient unsafe version because ion jitcode |
|
1002 // will not survive changes to to the profiler settings. Baseline jitcode, however, |
|
1003 // can span these changes, so any hardcoded field values will be incorrect afterwards. |
|
1004 // All the sps-related methods used by baseline call |spsProfileEntryAddressSafe|. |
|
1005 void spsProfileEntryAddressSafe(SPSProfiler *p, int offset, Register temp, |
|
1006 Label *full) |
|
1007 { |
|
1008 // Load size pointer |
|
1009 loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp); |
|
1010 |
|
1011 // Load size |
|
1012 load32(Address(temp, 0), temp); |
|
1013 if (offset != 0) |
|
1014 add32(Imm32(offset), temp); |
|
1015 |
|
1016 // Test against max size. |
|
1017 branch32(Assembler::LessThanOrEqual, AbsoluteAddress(p->addressOfMaxSize()), temp, full); |
|
1018 |
|
1019 // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*))) |
|
1020 JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*)); |
|
1021 lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp); |
|
1022 push(temp); |
|
1023 loadPtr(AbsoluteAddress(p->addressOfStack()), temp); |
|
1024 addPtr(Address(StackPointer, 0), temp); |
|
1025 addPtr(Imm32(sizeof(size_t)), StackPointer); |
|
1026 } |
|
1027 |
|
1028 public: |
|
1029 // These functions are needed by the IonInstrumentation interface defined in |
|
1030 // vm/SPSProfiler.h. They will modify the pseudostack provided to SPS to |
|
1031 // perform the actual instrumentation. |
|
1032 |
|
1033 void spsUpdatePCIdx(SPSProfiler *p, int32_t idx, Register temp) { |
|
1034 Label stackFull; |
|
1035 spsProfileEntryAddress(p, -1, temp, &stackFull); |
|
1036 store32(Imm32(idx), Address(temp, ProfileEntry::offsetOfPCIdx())); |
|
1037 bind(&stackFull); |
|
1038 } |
|
1039 |
|
1040 void spsUpdatePCIdx(SPSProfiler *p, Register idx, Register temp) { |
|
1041 Label stackFull; |
|
1042 spsProfileEntryAddressSafe(p, -1, temp, &stackFull); |
|
1043 store32(idx, Address(temp, ProfileEntry::offsetOfPCIdx())); |
|
1044 bind(&stackFull); |
|
1045 } |
|
1046 |
|
1047 // spsPushFrame variant for Ion-optimized scripts. |
|
1048 void spsPushFrame(SPSProfiler *p, const char *str, JSScript *s, Register temp) { |
|
1049 Label stackFull; |
|
1050 spsProfileEntryAddress(p, 0, temp, &stackFull); |
|
1051 |
|
1052 storePtr(ImmPtr(str), Address(temp, ProfileEntry::offsetOfString())); |
|
1053 storePtr(ImmGCPtr(s), Address(temp, ProfileEntry::offsetOfScript())); |
|
1054 storePtr(ImmPtr((void*) ProfileEntry::SCRIPT_OPT_STACKPOINTER), |
|
1055 Address(temp, ProfileEntry::offsetOfStackAddress())); |
|
1056 store32(Imm32(ProfileEntry::NullPCIndex), Address(temp, ProfileEntry::offsetOfPCIdx())); |
|
1057 |
|
1058 /* Always increment the stack size, whether or not we actually pushed. */ |
|
1059 bind(&stackFull); |
|
1060 movePtr(ImmPtr(p->sizePointer()), temp); |
|
1061 add32(Imm32(1), Address(temp, 0)); |
|
1062 } |
|
1063 |
|
1064 // spsPushFrame variant for Baseline-optimized scripts. |
|
1065 void spsPushFrame(SPSProfiler *p, const Address &str, const Address &script, |
|
1066 Register temp, Register temp2) |
|
1067 { |
|
1068 Label stackFull; |
|
1069 spsProfileEntryAddressSafe(p, 0, temp, &stackFull); |
|
1070 |
|
1071 loadPtr(str, temp2); |
|
1072 storePtr(temp2, Address(temp, ProfileEntry::offsetOfString())); |
|
1073 |
|
1074 loadPtr(script, temp2); |
|
1075 storePtr(temp2, Address(temp, ProfileEntry::offsetOfScript())); |
|
1076 |
|
1077 storePtr(ImmPtr(nullptr), Address(temp, ProfileEntry::offsetOfStackAddress())); |
|
1078 |
|
1079 // Store 0 for PCIdx because that's what interpreter does. |
|
1080 // (See probes::EnterScript, which calls spsProfiler.enter, which pushes an entry |
|
1081 // with 0 pcIdx). |
|
1082 store32(Imm32(0), Address(temp, ProfileEntry::offsetOfPCIdx())); |
|
1083 |
|
1084 /* Always increment the stack size, whether or not we actually pushed. */ |
|
1085 bind(&stackFull); |
|
1086 movePtr(ImmPtr(p->addressOfSizePointer()), temp); |
|
1087 loadPtr(Address(temp, 0), temp); |
|
1088 add32(Imm32(1), Address(temp, 0)); |
|
1089 } |
|
1090 |
|
1091 void spsPopFrame(SPSProfiler *p, Register temp) { |
|
1092 movePtr(ImmPtr(p->sizePointer()), temp); |
|
1093 add32(Imm32(-1), Address(temp, 0)); |
|
1094 } |
|
1095 |
|
1096 // spsPropFrameSafe does not assume |profiler->sizePointer()| will stay constant. |
|
1097 void spsPopFrameSafe(SPSProfiler *p, Register temp) { |
|
1098 loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp); |
|
1099 add32(Imm32(-1), Address(temp, 0)); |
|
1100 } |
|
1101 |
|
1102 static const char enterJitLabel[]; |
|
1103 void spsMarkJit(SPSProfiler *p, Register framePtr, Register temp); |
|
1104 void spsUnmarkJit(SPSProfiler *p, Register temp); |
|
1105 |
|
1106 void loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode, Label *failure); |
|
1107 void loadBaselineOrIonNoArgCheck(Register callee, Register dest, ExecutionMode mode, Label *failure); |
|
1108 |
|
1109 void loadBaselineFramePtr(Register framePtr, Register dest); |
|
1110 |
|
1111 void pushBaselineFramePtr(Register framePtr, Register scratch) { |
|
1112 loadBaselineFramePtr(framePtr, scratch); |
|
1113 push(scratch); |
|
1114 } |
|
1115 |
|
1116 private: |
|
1117 void handleFailure(ExecutionMode executionMode); |
|
1118 |
|
1119 public: |
|
1120 Label *exceptionLabel() { |
|
1121 // Exceptions are currently handled the same way as sequential failures. |
|
1122 return &sequentialFailureLabel_; |
|
1123 } |
|
1124 |
|
1125 Label *failureLabel(ExecutionMode executionMode) { |
|
1126 switch (executionMode) { |
|
1127 case SequentialExecution: return &sequentialFailureLabel_; |
|
1128 case ParallelExecution: return ¶llelFailureLabel_; |
|
1129 default: MOZ_ASSUME_UNREACHABLE("Unexpected execution mode"); |
|
1130 } |
|
1131 } |
|
1132 |
|
1133 void finish(); |
|
1134 |
|
1135 void assumeUnreachable(const char *output); |
|
1136 void printf(const char *output); |
|
1137 void printf(const char *output, Register value); |
|
1138 |
|
1139 #ifdef JS_TRACE_LOGGING |
|
1140 void tracelogStart(Register logger, uint32_t textId); |
|
1141 void tracelogStart(Register logger, Register textId); |
|
1142 void tracelogStop(Register logger, uint32_t textId); |
|
1143 void tracelogStop(Register logger, Register textId); |
|
1144 void tracelogStop(Register logger); |
|
1145 #endif |
|
1146 |
|
1147 #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \ |
|
1148 JS_ASSERT(IsFloatingPointType(type)); \ |
|
1149 if (type == MIRType_Double) \ |
|
1150 method##Double(arg1d, arg2); \ |
|
1151 else \ |
|
1152 method##Float32(arg1f, arg2); \ |
|
1153 |
|
1154 void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) { |
|
1155 DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest); |
|
1156 } |
|
1157 void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) { |
|
1158 DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest); |
|
1159 } |
|
1160 void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) { |
|
1161 DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest); |
|
1162 } |
|
1163 void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) { |
|
1164 DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest); |
|
1165 } |
|
1166 |
|
1167 #undef DISPATCH_FLOATING_POINT_OP |
|
1168 |
|
1169 void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label *fail, |
|
1170 MIRType outputType); |
|
1171 bool convertValueToFloatingPoint(JSContext *cx, const Value &v, FloatRegister output, |
|
1172 Label *fail, MIRType outputType); |
|
1173 bool convertConstantOrRegisterToFloatingPoint(JSContext *cx, ConstantOrRegister src, |
|
1174 FloatRegister output, Label *fail, |
|
1175 MIRType outputType); |
|
1176 void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output, |
|
1177 Label *fail, MIRType outputType); |
|
1178 |
|
1179 void convertInt32ValueToDouble(const Address &address, Register scratch, Label *done); |
|
1180 void convertValueToDouble(ValueOperand value, FloatRegister output, Label *fail) { |
|
1181 convertValueToFloatingPoint(value, output, fail, MIRType_Double); |
|
1182 } |
|
1183 bool convertValueToDouble(JSContext *cx, const Value &v, FloatRegister output, Label *fail) { |
|
1184 return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Double); |
|
1185 } |
|
1186 bool convertConstantOrRegisterToDouble(JSContext *cx, ConstantOrRegister src, |
|
1187 FloatRegister output, Label *fail) |
|
1188 { |
|
1189 return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Double); |
|
1190 } |
|
1191 void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label *fail) { |
|
1192 convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Double); |
|
1193 } |
|
1194 |
|
1195 void convertValueToFloat(ValueOperand value, FloatRegister output, Label *fail) { |
|
1196 convertValueToFloatingPoint(value, output, fail, MIRType_Float32); |
|
1197 } |
|
1198 bool convertValueToFloat(JSContext *cx, const Value &v, FloatRegister output, Label *fail) { |
|
1199 return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Float32); |
|
1200 } |
|
1201 bool convertConstantOrRegisterToFloat(JSContext *cx, ConstantOrRegister src, |
|
1202 FloatRegister output, Label *fail) |
|
1203 { |
|
1204 return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Float32); |
|
1205 } |
|
1206 void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label *fail) { |
|
1207 convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Float32); |
|
1208 } |
|
1209 |
|
1210 enum IntConversionBehavior { |
|
1211 IntConversion_Normal, |
|
1212 IntConversion_NegativeZeroCheck, |
|
1213 IntConversion_Truncate, |
|
1214 IntConversion_ClampToUint8, |
|
1215 }; |
|
1216 |
|
1217 enum IntConversionInputKind { |
|
1218 IntConversion_NumbersOnly, |
|
1219 IntConversion_NumbersOrBoolsOnly, |
|
1220 IntConversion_Any |
|
1221 }; |
|
1222 |
|
1223 // |
|
1224 // Functions for converting values to int. |
|
1225 // |
|
1226 void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp, |
|
1227 Label *truncateFail, Label *fail, IntConversionBehavior behavior); |
|
1228 |
|
1229 // Strings may be handled by providing labels to jump to when the behavior |
|
1230 // is truncation or clamping. The subroutine, usually an OOL call, is |
|
1231 // passed the unboxed string in |stringReg| and should convert it to a |
|
1232 // double store into |temp|. |
|
1233 void convertValueToInt(ValueOperand value, MDefinition *input, |
|
1234 Label *handleStringEntry, Label *handleStringRejoin, |
|
1235 Label *truncateDoubleSlow, |
|
1236 Register stringReg, FloatRegister temp, Register output, |
|
1237 Label *fail, IntConversionBehavior behavior, |
|
1238 IntConversionInputKind conversion = IntConversion_Any); |
|
1239 void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label *fail, |
|
1240 IntConversionBehavior behavior) |
|
1241 { |
|
1242 convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output, |
|
1243 fail, behavior); |
|
1244 } |
|
1245 bool convertValueToInt(JSContext *cx, const Value &v, Register output, Label *fail, |
|
1246 IntConversionBehavior behavior); |
|
1247 bool convertConstantOrRegisterToInt(JSContext *cx, ConstantOrRegister src, FloatRegister temp, |
|
1248 Register output, Label *fail, IntConversionBehavior behavior); |
|
1249 void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output, |
|
1250 Label *fail, IntConversionBehavior behavior); |
|
1251 |
|
1252 // |
|
1253 // Convenience functions for converting values to int32. |
|
1254 // |
|
1255 void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail, |
|
1256 bool negativeZeroCheck) |
|
1257 { |
|
1258 convertValueToInt(value, temp, output, fail, negativeZeroCheck |
|
1259 ? IntConversion_NegativeZeroCheck |
|
1260 : IntConversion_Normal); |
|
1261 } |
|
1262 void convertValueToInt32(ValueOperand value, MDefinition *input, |
|
1263 FloatRegister temp, Register output, Label *fail, |
|
1264 bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any) |
|
1265 { |
|
1266 convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail, |
|
1267 negativeZeroCheck |
|
1268 ? IntConversion_NegativeZeroCheck |
|
1269 : IntConversion_Normal, |
|
1270 conversion); |
|
1271 } |
|
1272 bool convertValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail, |
|
1273 bool negativeZeroCheck) |
|
1274 { |
|
1275 return convertValueToInt(cx, v, output, fail, negativeZeroCheck |
|
1276 ? IntConversion_NegativeZeroCheck |
|
1277 : IntConversion_Normal); |
|
1278 } |
|
1279 bool convertConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp, |
|
1280 Register output, Label *fail, bool negativeZeroCheck) |
|
1281 { |
|
1282 return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck |
|
1283 ? IntConversion_NegativeZeroCheck |
|
1284 : IntConversion_Normal); |
|
1285 } |
|
1286 void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output, |
|
1287 Label *fail, bool negativeZeroCheck) |
|
1288 { |
|
1289 convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck |
|
1290 ? IntConversion_NegativeZeroCheck |
|
1291 : IntConversion_Normal); |
|
1292 } |
|
1293 |
|
1294 // |
|
1295 // Convenience functions for truncating values to int32. |
|
1296 // |
|
1297 void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail) { |
|
1298 convertValueToInt(value, temp, output, fail, IntConversion_Truncate); |
|
1299 } |
|
1300 void truncateValueToInt32(ValueOperand value, MDefinition *input, |
|
1301 Label *handleStringEntry, Label *handleStringRejoin, |
|
1302 Label *truncateDoubleSlow, |
|
1303 Register stringReg, FloatRegister temp, Register output, Label *fail) |
|
1304 { |
|
1305 convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow, |
|
1306 stringReg, temp, output, fail, IntConversion_Truncate); |
|
1307 } |
|
1308 void truncateValueToInt32(ValueOperand value, MDefinition *input, |
|
1309 FloatRegister temp, Register output, Label *fail) |
|
1310 { |
|
1311 convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail, |
|
1312 IntConversion_Truncate); |
|
1313 } |
|
1314 bool truncateValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail) { |
|
1315 return convertValueToInt(cx, v, output, fail, IntConversion_Truncate); |
|
1316 } |
|
1317 bool truncateConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp, |
|
1318 Register output, Label *fail) |
|
1319 { |
|
1320 return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate); |
|
1321 } |
|
1322 void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output, |
|
1323 Label *fail) |
|
1324 { |
|
1325 convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate); |
|
1326 } |
|
1327 |
|
1328 // Convenience functions for clamping values to uint8. |
|
1329 void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label *fail) { |
|
1330 convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8); |
|
1331 } |
|
1332 void clampValueToUint8(ValueOperand value, MDefinition *input, |
|
1333 Label *handleStringEntry, Label *handleStringRejoin, |
|
1334 Register stringReg, FloatRegister temp, Register output, Label *fail) |
|
1335 { |
|
1336 convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr, |
|
1337 stringReg, temp, output, fail, IntConversion_ClampToUint8); |
|
1338 } |
|
1339 void clampValueToUint8(ValueOperand value, MDefinition *input, |
|
1340 FloatRegister temp, Register output, Label *fail) |
|
1341 { |
|
1342 convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail, |
|
1343 IntConversion_ClampToUint8); |
|
1344 } |
|
1345 bool clampValueToUint8(JSContext *cx, const Value &v, Register output, Label *fail) { |
|
1346 return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8); |
|
1347 } |
|
1348 bool clampConstantOrRegisterToUint8(JSContext *cx, ConstantOrRegister src, FloatRegister temp, |
|
1349 Register output, Label *fail) |
|
1350 { |
|
1351 return convertConstantOrRegisterToInt(cx, src, temp, output, fail, |
|
1352 IntConversion_ClampToUint8); |
|
1353 } |
|
1354 void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output, |
|
1355 Label *fail) |
|
1356 { |
|
1357 convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8); |
|
1358 } |
|
1359 |
|
1360 public: |
|
1361 class AfterICSaveLive { |
|
1362 friend class MacroAssembler; |
|
1363 AfterICSaveLive(uint32_t initialStack) |
|
1364 #ifdef JS_DEBUG |
|
1365 : initialStack(initialStack) |
|
1366 #endif |
|
1367 {} |
|
1368 |
|
1369 #ifdef JS_DEBUG |
|
1370 public: |
|
1371 uint32_t initialStack; |
|
1372 #endif |
|
1373 }; |
|
1374 |
|
1375 AfterICSaveLive icSaveLive(RegisterSet &liveRegs) { |
|
1376 PushRegsInMask(liveRegs); |
|
1377 return AfterICSaveLive(framePushed()); |
|
1378 } |
|
1379 |
|
1380 bool icBuildOOLFakeExitFrame(void *fakeReturnAddr, AfterICSaveLive &aic) { |
|
1381 return buildOOLFakeExitFrame(fakeReturnAddr); |
|
1382 } |
|
1383 |
|
1384 void icRestoreLive(RegisterSet &liveRegs, AfterICSaveLive &aic) { |
|
1385 JS_ASSERT(framePushed() == aic.initialStack); |
|
1386 PopRegsInMask(liveRegs); |
|
1387 } |
|
1388 }; |
|
1389 |
|
1390 static inline Assembler::DoubleCondition |
|
1391 JSOpToDoubleCondition(JSOp op) |
|
1392 { |
|
1393 switch (op) { |
|
1394 case JSOP_EQ: |
|
1395 case JSOP_STRICTEQ: |
|
1396 return Assembler::DoubleEqual; |
|
1397 case JSOP_NE: |
|
1398 case JSOP_STRICTNE: |
|
1399 return Assembler::DoubleNotEqualOrUnordered; |
|
1400 case JSOP_LT: |
|
1401 return Assembler::DoubleLessThan; |
|
1402 case JSOP_LE: |
|
1403 return Assembler::DoubleLessThanOrEqual; |
|
1404 case JSOP_GT: |
|
1405 return Assembler::DoubleGreaterThan; |
|
1406 case JSOP_GE: |
|
1407 return Assembler::DoubleGreaterThanOrEqual; |
|
1408 default: |
|
1409 MOZ_ASSUME_UNREACHABLE("Unexpected comparison operation"); |
|
1410 } |
|
1411 } |
|
1412 |
|
1413 // Note: the op may have been inverted during lowering (to put constants in a |
|
1414 // position where they can be immediates), so it is important to use the |
|
1415 // lir->jsop() instead of the mir->jsop() when it is present. |
|
1416 static inline Assembler::Condition |
|
1417 JSOpToCondition(JSOp op, bool isSigned) |
|
1418 { |
|
1419 if (isSigned) { |
|
1420 switch (op) { |
|
1421 case JSOP_EQ: |
|
1422 case JSOP_STRICTEQ: |
|
1423 return Assembler::Equal; |
|
1424 case JSOP_NE: |
|
1425 case JSOP_STRICTNE: |
|
1426 return Assembler::NotEqual; |
|
1427 case JSOP_LT: |
|
1428 return Assembler::LessThan; |
|
1429 case JSOP_LE: |
|
1430 return Assembler::LessThanOrEqual; |
|
1431 case JSOP_GT: |
|
1432 return Assembler::GreaterThan; |
|
1433 case JSOP_GE: |
|
1434 return Assembler::GreaterThanOrEqual; |
|
1435 default: |
|
1436 MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation"); |
|
1437 } |
|
1438 } else { |
|
1439 switch (op) { |
|
1440 case JSOP_EQ: |
|
1441 case JSOP_STRICTEQ: |
|
1442 return Assembler::Equal; |
|
1443 case JSOP_NE: |
|
1444 case JSOP_STRICTNE: |
|
1445 return Assembler::NotEqual; |
|
1446 case JSOP_LT: |
|
1447 return Assembler::Below; |
|
1448 case JSOP_LE: |
|
1449 return Assembler::BelowOrEqual; |
|
1450 case JSOP_GT: |
|
1451 return Assembler::Above; |
|
1452 case JSOP_GE: |
|
1453 return Assembler::AboveOrEqual; |
|
1454 default: |
|
1455 MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation"); |
|
1456 } |
|
1457 } |
|
1458 } |
|
1459 |
|
1460 } // namespace jit |
|
1461 } // namespace js |
|
1462 |
|
1463 #endif // JS_ION |
|
1464 |
|
1465 #endif /* jit_IonMacroAssembler_h */ |