|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "jit/x86/MacroAssembler-x86.h" |
|
8 |
|
9 #include "mozilla/Casting.h" |
|
10 |
|
11 #include "jit/Bailouts.h" |
|
12 #include "jit/BaselineFrame.h" |
|
13 #include "jit/IonFrames.h" |
|
14 #include "jit/MoveEmitter.h" |
|
15 |
|
16 #include "jsscriptinlines.h" |
|
17 |
|
18 using namespace js; |
|
19 using namespace js::jit; |
|
20 |
|
21 MacroAssemblerX86::Double * |
|
22 MacroAssemblerX86::getDouble(double d) |
|
23 { |
|
24 if (!doubleMap_.initialized()) { |
|
25 enoughMemory_ &= doubleMap_.init(); |
|
26 if (!enoughMemory_) |
|
27 return nullptr; |
|
28 } |
|
29 size_t doubleIndex; |
|
30 DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d); |
|
31 if (p) { |
|
32 doubleIndex = p->value(); |
|
33 } else { |
|
34 doubleIndex = doubles_.length(); |
|
35 enoughMemory_ &= doubles_.append(Double(d)); |
|
36 enoughMemory_ &= doubleMap_.add(p, d, doubleIndex); |
|
37 if (!enoughMemory_) |
|
38 return nullptr; |
|
39 } |
|
40 Double &dbl = doubles_[doubleIndex]; |
|
41 JS_ASSERT(!dbl.uses.bound()); |
|
42 return &dbl; |
|
43 } |
|
44 |
|
45 void |
|
46 MacroAssemblerX86::loadConstantDouble(double d, const FloatRegister &dest) |
|
47 { |
|
48 if (maybeInlineDouble(d, dest)) |
|
49 return; |
|
50 Double *dbl = getDouble(d); |
|
51 if (!dbl) |
|
52 return; |
|
53 masm.movsd_mr(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code()); |
|
54 dbl->uses.setPrev(masm.size()); |
|
55 } |
|
56 |
|
57 void |
|
58 MacroAssemblerX86::addConstantDouble(double d, const FloatRegister &dest) |
|
59 { |
|
60 Double *dbl = getDouble(d); |
|
61 if (!dbl) |
|
62 return; |
|
63 masm.addsd_mr(reinterpret_cast<const void *>(dbl->uses.prev()), dest.code()); |
|
64 dbl->uses.setPrev(masm.size()); |
|
65 } |
|
66 |
|
67 MacroAssemblerX86::Float * |
|
68 MacroAssemblerX86::getFloat(float f) |
|
69 { |
|
70 if (!floatMap_.initialized()) { |
|
71 enoughMemory_ &= floatMap_.init(); |
|
72 if (!enoughMemory_) |
|
73 return nullptr; |
|
74 } |
|
75 size_t floatIndex; |
|
76 FloatMap::AddPtr p = floatMap_.lookupForAdd(f); |
|
77 if (p) { |
|
78 floatIndex = p->value(); |
|
79 } else { |
|
80 floatIndex = floats_.length(); |
|
81 enoughMemory_ &= floats_.append(Float(f)); |
|
82 enoughMemory_ &= floatMap_.add(p, f, floatIndex); |
|
83 if (!enoughMemory_) |
|
84 return nullptr; |
|
85 } |
|
86 Float &flt = floats_[floatIndex]; |
|
87 JS_ASSERT(!flt.uses.bound()); |
|
88 return &flt; |
|
89 } |
|
90 |
|
91 void |
|
92 MacroAssemblerX86::loadConstantFloat32(float f, const FloatRegister &dest) |
|
93 { |
|
94 if (maybeInlineFloat(f, dest)) |
|
95 return; |
|
96 Float *flt = getFloat(f); |
|
97 if (!flt) |
|
98 return; |
|
99 masm.movss_mr(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); |
|
100 flt->uses.setPrev(masm.size()); |
|
101 } |
|
102 |
|
103 void |
|
104 MacroAssemblerX86::addConstantFloat32(float f, const FloatRegister &dest) |
|
105 { |
|
106 Float *flt = getFloat(f); |
|
107 if (!flt) |
|
108 return; |
|
109 masm.addss_mr(reinterpret_cast<const void *>(flt->uses.prev()), dest.code()); |
|
110 flt->uses.setPrev(masm.size()); |
|
111 } |
|
112 |
|
113 void |
|
114 MacroAssemblerX86::finish() |
|
115 { |
|
116 if (!doubles_.empty()) |
|
117 masm.align(sizeof(double)); |
|
118 for (size_t i = 0; i < doubles_.length(); i++) { |
|
119 CodeLabel cl(doubles_[i].uses); |
|
120 writeDoubleConstant(doubles_[i].value, cl.src()); |
|
121 enoughMemory_ &= addCodeLabel(cl); |
|
122 if (!enoughMemory_) |
|
123 return; |
|
124 } |
|
125 |
|
126 if (!floats_.empty()) |
|
127 masm.align(sizeof(float)); |
|
128 for (size_t i = 0; i < floats_.length(); i++) { |
|
129 CodeLabel cl(floats_[i].uses); |
|
130 writeFloatConstant(floats_[i].value, cl.src()); |
|
131 enoughMemory_ &= addCodeLabel(cl); |
|
132 if (!enoughMemory_) |
|
133 return; |
|
134 } |
|
135 } |
|
136 |
|
137 void |
|
138 MacroAssemblerX86::setupABICall(uint32_t args) |
|
139 { |
|
140 JS_ASSERT(!inCall_); |
|
141 inCall_ = true; |
|
142 |
|
143 args_ = args; |
|
144 passedArgs_ = 0; |
|
145 stackForCall_ = 0; |
|
146 } |
|
147 |
|
148 void |
|
149 MacroAssemblerX86::setupAlignedABICall(uint32_t args) |
|
150 { |
|
151 setupABICall(args); |
|
152 dynamicAlignment_ = false; |
|
153 } |
|
154 |
|
155 void |
|
156 MacroAssemblerX86::setupUnalignedABICall(uint32_t args, const Register &scratch) |
|
157 { |
|
158 setupABICall(args); |
|
159 dynamicAlignment_ = true; |
|
160 |
|
161 movl(esp, scratch); |
|
162 andl(Imm32(~(StackAlignment - 1)), esp); |
|
163 push(scratch); |
|
164 } |
|
165 |
|
166 void |
|
167 MacroAssemblerX86::passABIArg(const MoveOperand &from, MoveOp::Type type) |
|
168 { |
|
169 ++passedArgs_; |
|
170 MoveOperand to = MoveOperand(StackPointer, stackForCall_); |
|
171 switch (type) { |
|
172 case MoveOp::FLOAT32: stackForCall_ += sizeof(float); break; |
|
173 case MoveOp::DOUBLE: stackForCall_ += sizeof(double); break; |
|
174 case MoveOp::INT32: stackForCall_ += sizeof(int32_t); break; |
|
175 case MoveOp::GENERAL: stackForCall_ += sizeof(intptr_t); break; |
|
176 default: MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); |
|
177 } |
|
178 enoughMemory_ &= moveResolver_.addMove(from, to, type); |
|
179 } |
|
180 |
|
181 void |
|
182 MacroAssemblerX86::passABIArg(const Register ®) |
|
183 { |
|
184 passABIArg(MoveOperand(reg), MoveOp::GENERAL); |
|
185 } |
|
186 |
|
187 void |
|
188 MacroAssemblerX86::passABIArg(const FloatRegister ®, MoveOp::Type type) |
|
189 { |
|
190 passABIArg(MoveOperand(reg), type); |
|
191 } |
|
192 |
|
193 void |
|
194 MacroAssemblerX86::callWithABIPre(uint32_t *stackAdjust) |
|
195 { |
|
196 JS_ASSERT(inCall_); |
|
197 JS_ASSERT(args_ == passedArgs_); |
|
198 |
|
199 if (dynamicAlignment_) { |
|
200 *stackAdjust = stackForCall_ |
|
201 + ComputeByteAlignment(stackForCall_ + sizeof(intptr_t), |
|
202 StackAlignment); |
|
203 } else { |
|
204 *stackAdjust = stackForCall_ |
|
205 + ComputeByteAlignment(stackForCall_ + framePushed_, |
|
206 StackAlignment); |
|
207 } |
|
208 |
|
209 reserveStack(*stackAdjust); |
|
210 |
|
211 // Position all arguments. |
|
212 { |
|
213 enoughMemory_ &= moveResolver_.resolve(); |
|
214 if (!enoughMemory_) |
|
215 return; |
|
216 |
|
217 MoveEmitter emitter(*this); |
|
218 emitter.emit(moveResolver_); |
|
219 emitter.finish(); |
|
220 } |
|
221 |
|
222 #ifdef DEBUG |
|
223 { |
|
224 // Check call alignment. |
|
225 Label good; |
|
226 testl(esp, Imm32(StackAlignment - 1)); |
|
227 j(Equal, &good); |
|
228 breakpoint(); |
|
229 bind(&good); |
|
230 } |
|
231 #endif |
|
232 } |
|
233 |
|
234 void |
|
235 MacroAssemblerX86::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) |
|
236 { |
|
237 freeStack(stackAdjust); |
|
238 if (result == MoveOp::DOUBLE) { |
|
239 reserveStack(sizeof(double)); |
|
240 fstp(Operand(esp, 0)); |
|
241 loadDouble(Operand(esp, 0), ReturnFloatReg); |
|
242 freeStack(sizeof(double)); |
|
243 } else if (result == MoveOp::FLOAT32) { |
|
244 reserveStack(sizeof(float)); |
|
245 fstp32(Operand(esp, 0)); |
|
246 loadFloat32(Operand(esp, 0), ReturnFloatReg); |
|
247 freeStack(sizeof(float)); |
|
248 } |
|
249 if (dynamicAlignment_) |
|
250 pop(esp); |
|
251 |
|
252 JS_ASSERT(inCall_); |
|
253 inCall_ = false; |
|
254 } |
|
255 |
|
256 void |
|
257 MacroAssemblerX86::callWithABI(void *fun, MoveOp::Type result) |
|
258 { |
|
259 uint32_t stackAdjust; |
|
260 callWithABIPre(&stackAdjust); |
|
261 call(ImmPtr(fun)); |
|
262 callWithABIPost(stackAdjust, result); |
|
263 } |
|
264 |
|
265 void |
|
266 MacroAssemblerX86::callWithABI(AsmJSImmPtr fun, MoveOp::Type result) |
|
267 { |
|
268 uint32_t stackAdjust; |
|
269 callWithABIPre(&stackAdjust); |
|
270 call(fun); |
|
271 callWithABIPost(stackAdjust, result); |
|
272 } |
|
273 |
|
274 void |
|
275 MacroAssemblerX86::callWithABI(const Address &fun, MoveOp::Type result) |
|
276 { |
|
277 uint32_t stackAdjust; |
|
278 callWithABIPre(&stackAdjust); |
|
279 call(Operand(fun)); |
|
280 callWithABIPost(stackAdjust, result); |
|
281 } |
|
282 |
|
283 void |
|
284 MacroAssemblerX86::handleFailureWithHandler(void *handler) |
|
285 { |
|
286 // Reserve space for exception information. |
|
287 subl(Imm32(sizeof(ResumeFromException)), esp); |
|
288 movl(esp, eax); |
|
289 |
|
290 // Ask for an exception handler. |
|
291 setupUnalignedABICall(1, ecx); |
|
292 passABIArg(eax); |
|
293 callWithABI(handler); |
|
294 |
|
295 JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail(); |
|
296 jmp(excTail); |
|
297 } |
|
298 |
|
299 void |
|
300 MacroAssemblerX86::handleFailureWithHandlerTail() |
|
301 { |
|
302 Label entryFrame; |
|
303 Label catch_; |
|
304 Label finally; |
|
305 Label return_; |
|
306 Label bailout; |
|
307 |
|
308 loadPtr(Address(esp, offsetof(ResumeFromException, kind)), eax); |
|
309 branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame); |
|
310 branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_CATCH), &catch_); |
|
311 branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_FINALLY), &finally); |
|
312 branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_); |
|
313 branch32(Assembler::Equal, eax, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); |
|
314 |
|
315 breakpoint(); // Invalid kind. |
|
316 |
|
317 // No exception handler. Load the error value, load the new stack pointer |
|
318 // and return from the entry frame. |
|
319 bind(&entryFrame); |
|
320 moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); |
|
321 loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp); |
|
322 ret(); |
|
323 |
|
324 // If we found a catch handler, this must be a baseline frame. Restore state |
|
325 // and jump to the catch block. |
|
326 bind(&catch_); |
|
327 loadPtr(Address(esp, offsetof(ResumeFromException, target)), eax); |
|
328 loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp); |
|
329 loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp); |
|
330 jmp(Operand(eax)); |
|
331 |
|
332 // If we found a finally block, this must be a baseline frame. Push |
|
333 // two values expected by JSOP_RETSUB: BooleanValue(true) and the |
|
334 // exception. |
|
335 bind(&finally); |
|
336 ValueOperand exception = ValueOperand(ecx, edx); |
|
337 loadValue(Address(esp, offsetof(ResumeFromException, exception)), exception); |
|
338 |
|
339 loadPtr(Address(esp, offsetof(ResumeFromException, target)), eax); |
|
340 loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp); |
|
341 loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp); |
|
342 |
|
343 pushValue(BooleanValue(true)); |
|
344 pushValue(exception); |
|
345 jmp(Operand(eax)); |
|
346 |
|
347 // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. |
|
348 bind(&return_); |
|
349 loadPtr(Address(esp, offsetof(ResumeFromException, framePointer)), ebp); |
|
350 loadPtr(Address(esp, offsetof(ResumeFromException, stackPointer)), esp); |
|
351 loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand); |
|
352 movl(ebp, esp); |
|
353 pop(ebp); |
|
354 ret(); |
|
355 |
|
356 // If we are bailing out to baseline to handle an exception, jump to |
|
357 // the bailout tail stub. |
|
358 bind(&bailout); |
|
359 loadPtr(Address(esp, offsetof(ResumeFromException, bailoutInfo)), ecx); |
|
360 movl(Imm32(BAILOUT_RETURN_OK), eax); |
|
361 jmp(Operand(esp, offsetof(ResumeFromException, target))); |
|
362 } |
|
363 |
|
364 void |
|
365 MacroAssemblerX86::branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label) |
|
366 { |
|
367 jsval_layout jv = JSVAL_TO_IMPL(v); |
|
368 if (v.isMarkable()) |
|
369 cmpl(value.payloadReg(), ImmGCPtr(reinterpret_cast<gc::Cell *>(v.toGCThing()))); |
|
370 else |
|
371 cmpl(value.payloadReg(), Imm32(jv.s.payload.i32)); |
|
372 |
|
373 if (cond == Equal) { |
|
374 Label done; |
|
375 j(NotEqual, &done); |
|
376 { |
|
377 cmpl(value.typeReg(), Imm32(jv.s.tag)); |
|
378 j(Equal, label); |
|
379 } |
|
380 bind(&done); |
|
381 } else { |
|
382 JS_ASSERT(cond == NotEqual); |
|
383 j(NotEqual, label); |
|
384 |
|
385 cmpl(value.typeReg(), Imm32(jv.s.tag)); |
|
386 j(NotEqual, label); |
|
387 } |
|
388 } |
|
389 |
|
390 #ifdef JSGC_GENERATIONAL |
|
391 |
|
392 void |
|
393 MacroAssemblerX86::branchPtrInNurseryRange(Register ptr, Register temp, Label *label) |
|
394 { |
|
395 JS_ASSERT(ptr != temp); |
|
396 JS_ASSERT(temp != InvalidReg); // A temp register is required for x86. |
|
397 |
|
398 const Nursery &nursery = GetIonContext()->runtime->gcNursery(); |
|
399 movePtr(ImmWord(-ptrdiff_t(nursery.start())), temp); |
|
400 addPtr(ptr, temp); |
|
401 branchPtr(Assembler::Below, temp, Imm32(Nursery::NurserySize), label); |
|
402 } |
|
403 |
|
404 void |
|
405 MacroAssemblerX86::branchValueIsNurseryObject(ValueOperand value, Register temp, Label *label) |
|
406 { |
|
407 Label done; |
|
408 |
|
409 branchTestObject(Assembler::NotEqual, value, &done); |
|
410 branchPtrInNurseryRange(value.payloadReg(), temp, label); |
|
411 |
|
412 bind(&done); |
|
413 } |
|
414 |
|
415 #endif |