|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "jit/AsmJS.h" |
|
8 |
|
9 #include "mozilla/Move.h" |
|
10 |
|
11 #ifdef MOZ_VTUNE |
|
12 # include "vtune/VTuneWrapper.h" |
|
13 #endif |
|
14 |
|
15 #include "jsmath.h" |
|
16 #include "jsprf.h" |
|
17 #include "jsworkers.h" |
|
18 #include "prmjtime.h" |
|
19 |
|
20 #include "assembler/assembler/MacroAssembler.h" |
|
21 #include "frontend/Parser.h" |
|
22 #include "jit/AsmJSLink.h" |
|
23 #include "jit/AsmJSModule.h" |
|
24 #include "jit/AsmJSSignalHandlers.h" |
|
25 #include "jit/CodeGenerator.h" |
|
26 #include "jit/CompileWrappers.h" |
|
27 #include "jit/MIR.h" |
|
28 #include "jit/MIRGraph.h" |
|
29 #ifdef JS_ION_PERF |
|
30 # include "jit/PerfSpewer.h" |
|
31 #endif |
|
32 #include "vm/Interpreter.h" |
|
33 |
|
34 #include "jsinferinlines.h" |
|
35 #include "jsobjinlines.h" |
|
36 |
|
37 #include "frontend/ParseNode-inl.h" |
|
38 #include "frontend/Parser-inl.h" |
|
39 |
|
40 using namespace js; |
|
41 using namespace js::frontend; |
|
42 using namespace js::jit; |
|
43 |
|
44 using mozilla::AddToHash; |
|
45 using mozilla::ArrayLength; |
|
46 using mozilla::CountLeadingZeroes32; |
|
47 using mozilla::DebugOnly; |
|
48 using mozilla::HashGeneric; |
|
49 using mozilla::IsNaN; |
|
50 using mozilla::IsNegativeZero; |
|
51 using mozilla::Maybe; |
|
52 using mozilla::Move; |
|
53 using mozilla::PositiveInfinity; |
|
54 using JS::GenericNaN; |
|
55 |
|
56 static const size_t LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12; |
|
57 |
|
58 /*****************************************************************************/ |
|
59 // ParseNode utilities |
|
60 |
|
61 static inline ParseNode * |
|
62 NextNode(ParseNode *pn) |
|
63 { |
|
64 return pn->pn_next; |
|
65 } |
|
66 |
|
67 static inline ParseNode * |
|
68 UnaryKid(ParseNode *pn) |
|
69 { |
|
70 JS_ASSERT(pn->isArity(PN_UNARY)); |
|
71 return pn->pn_kid; |
|
72 } |
|
73 |
|
74 static inline ParseNode * |
|
75 ReturnExpr(ParseNode *pn) |
|
76 { |
|
77 JS_ASSERT(pn->isKind(PNK_RETURN)); |
|
78 return UnaryKid(pn); |
|
79 } |
|
80 |
|
81 static inline ParseNode * |
|
82 BinaryRight(ParseNode *pn) |
|
83 { |
|
84 JS_ASSERT(pn->isArity(PN_BINARY)); |
|
85 return pn->pn_right; |
|
86 } |
|
87 |
|
88 static inline ParseNode * |
|
89 BinaryLeft(ParseNode *pn) |
|
90 { |
|
91 JS_ASSERT(pn->isArity(PN_BINARY)); |
|
92 return pn->pn_left; |
|
93 } |
|
94 |
|
95 static inline ParseNode * |
|
96 TernaryKid1(ParseNode *pn) |
|
97 { |
|
98 JS_ASSERT(pn->isArity(PN_TERNARY)); |
|
99 return pn->pn_kid1; |
|
100 } |
|
101 |
|
102 static inline ParseNode * |
|
103 TernaryKid2(ParseNode *pn) |
|
104 { |
|
105 JS_ASSERT(pn->isArity(PN_TERNARY)); |
|
106 return pn->pn_kid2; |
|
107 } |
|
108 |
|
109 static inline ParseNode * |
|
110 TernaryKid3(ParseNode *pn) |
|
111 { |
|
112 JS_ASSERT(pn->isArity(PN_TERNARY)); |
|
113 return pn->pn_kid3; |
|
114 } |
|
115 |
|
116 static inline ParseNode * |
|
117 ListHead(ParseNode *pn) |
|
118 { |
|
119 JS_ASSERT(pn->isArity(PN_LIST)); |
|
120 return pn->pn_head; |
|
121 } |
|
122 |
|
123 static inline unsigned |
|
124 ListLength(ParseNode *pn) |
|
125 { |
|
126 JS_ASSERT(pn->isArity(PN_LIST)); |
|
127 return pn->pn_count; |
|
128 } |
|
129 |
|
130 static inline ParseNode * |
|
131 CallCallee(ParseNode *pn) |
|
132 { |
|
133 JS_ASSERT(pn->isKind(PNK_CALL)); |
|
134 return ListHead(pn); |
|
135 } |
|
136 |
|
137 static inline unsigned |
|
138 CallArgListLength(ParseNode *pn) |
|
139 { |
|
140 JS_ASSERT(pn->isKind(PNK_CALL)); |
|
141 JS_ASSERT(ListLength(pn) >= 1); |
|
142 return ListLength(pn) - 1; |
|
143 } |
|
144 |
|
145 static inline ParseNode * |
|
146 CallArgList(ParseNode *pn) |
|
147 { |
|
148 JS_ASSERT(pn->isKind(PNK_CALL)); |
|
149 return NextNode(ListHead(pn)); |
|
150 } |
|
151 |
|
152 static inline ParseNode * |
|
153 VarListHead(ParseNode *pn) |
|
154 { |
|
155 JS_ASSERT(pn->isKind(PNK_VAR) || pn->isKind(PNK_CONST)); |
|
156 return ListHead(pn); |
|
157 } |
|
158 |
|
159 static inline ParseNode * |
|
160 CaseExpr(ParseNode *pn) |
|
161 { |
|
162 JS_ASSERT(pn->isKind(PNK_CASE) || pn->isKind(PNK_DEFAULT)); |
|
163 return BinaryLeft(pn); |
|
164 } |
|
165 |
|
166 static inline ParseNode * |
|
167 CaseBody(ParseNode *pn) |
|
168 { |
|
169 JS_ASSERT(pn->isKind(PNK_CASE) || pn->isKind(PNK_DEFAULT)); |
|
170 return BinaryRight(pn); |
|
171 } |
|
172 |
|
173 static inline bool |
|
174 IsExpressionStatement(ParseNode *pn) |
|
175 { |
|
176 return pn->isKind(PNK_SEMI); |
|
177 } |
|
178 |
|
179 static inline ParseNode * |
|
180 ExpressionStatementExpr(ParseNode *pn) |
|
181 { |
|
182 JS_ASSERT(pn->isKind(PNK_SEMI)); |
|
183 return UnaryKid(pn); |
|
184 } |
|
185 |
|
186 static inline PropertyName * |
|
187 LoopControlMaybeLabel(ParseNode *pn) |
|
188 { |
|
189 JS_ASSERT(pn->isKind(PNK_BREAK) || pn->isKind(PNK_CONTINUE)); |
|
190 JS_ASSERT(pn->isArity(PN_NULLARY)); |
|
191 return pn->as<LoopControlStatement>().label(); |
|
192 } |
|
193 |
|
194 static inline PropertyName * |
|
195 LabeledStatementLabel(ParseNode *pn) |
|
196 { |
|
197 return pn->as<LabeledStatement>().label(); |
|
198 } |
|
199 |
|
200 static inline ParseNode * |
|
201 LabeledStatementStatement(ParseNode *pn) |
|
202 { |
|
203 return pn->as<LabeledStatement>().statement(); |
|
204 } |
|
205 |
|
206 static double |
|
207 NumberNodeValue(ParseNode *pn) |
|
208 { |
|
209 JS_ASSERT(pn->isKind(PNK_NUMBER)); |
|
210 return pn->pn_dval; |
|
211 } |
|
212 |
|
213 static bool |
|
214 NumberNodeHasFrac(ParseNode *pn) |
|
215 { |
|
216 JS_ASSERT(pn->isKind(PNK_NUMBER)); |
|
217 return pn->pn_u.number.decimalPoint == HasDecimal; |
|
218 } |
|
219 |
|
220 static ParseNode * |
|
221 DotBase(ParseNode *pn) |
|
222 { |
|
223 JS_ASSERT(pn->isKind(PNK_DOT)); |
|
224 JS_ASSERT(pn->isArity(PN_NAME)); |
|
225 return pn->expr(); |
|
226 } |
|
227 |
|
228 static PropertyName * |
|
229 DotMember(ParseNode *pn) |
|
230 { |
|
231 JS_ASSERT(pn->isKind(PNK_DOT)); |
|
232 JS_ASSERT(pn->isArity(PN_NAME)); |
|
233 return pn->pn_atom->asPropertyName(); |
|
234 } |
|
235 |
|
236 static ParseNode * |
|
237 ElemBase(ParseNode *pn) |
|
238 { |
|
239 JS_ASSERT(pn->isKind(PNK_ELEM)); |
|
240 return BinaryLeft(pn); |
|
241 } |
|
242 |
|
243 static ParseNode * |
|
244 ElemIndex(ParseNode *pn) |
|
245 { |
|
246 JS_ASSERT(pn->isKind(PNK_ELEM)); |
|
247 return BinaryRight(pn); |
|
248 } |
|
249 |
|
250 static inline JSFunction * |
|
251 FunctionObject(ParseNode *fn) |
|
252 { |
|
253 JS_ASSERT(fn->isKind(PNK_FUNCTION)); |
|
254 JS_ASSERT(fn->isArity(PN_CODE)); |
|
255 return fn->pn_funbox->function(); |
|
256 } |
|
257 |
|
258 static inline PropertyName * |
|
259 FunctionName(ParseNode *fn) |
|
260 { |
|
261 if (JSAtom *atom = FunctionObject(fn)->atom()) |
|
262 return atom->asPropertyName(); |
|
263 return nullptr; |
|
264 } |
|
265 |
|
266 static inline ParseNode * |
|
267 FunctionStatementList(ParseNode *fn) |
|
268 { |
|
269 JS_ASSERT(fn->pn_body->isKind(PNK_ARGSBODY)); |
|
270 ParseNode *last = fn->pn_body->last(); |
|
271 JS_ASSERT(last->isKind(PNK_STATEMENTLIST)); |
|
272 return last; |
|
273 } |
|
274 |
|
275 static inline bool |
|
276 IsNormalObjectField(ExclusiveContext *cx, ParseNode *pn) |
|
277 { |
|
278 JS_ASSERT(pn->isKind(PNK_COLON)); |
|
279 return pn->getOp() == JSOP_INITPROP && |
|
280 BinaryLeft(pn)->isKind(PNK_NAME) && |
|
281 BinaryLeft(pn)->name() != cx->names().proto; |
|
282 } |
|
283 |
|
284 static inline PropertyName * |
|
285 ObjectNormalFieldName(ExclusiveContext *cx, ParseNode *pn) |
|
286 { |
|
287 JS_ASSERT(IsNormalObjectField(cx, pn)); |
|
288 return BinaryLeft(pn)->name(); |
|
289 } |
|
290 |
|
291 static inline ParseNode * |
|
292 ObjectFieldInitializer(ParseNode *pn) |
|
293 { |
|
294 JS_ASSERT(pn->isKind(PNK_COLON)); |
|
295 return BinaryRight(pn); |
|
296 } |
|
297 |
|
298 static inline bool |
|
299 IsDefinition(ParseNode *pn) |
|
300 { |
|
301 return pn->isKind(PNK_NAME) && pn->isDefn(); |
|
302 } |
|
303 |
|
304 static inline ParseNode * |
|
305 MaybeDefinitionInitializer(ParseNode *pn) |
|
306 { |
|
307 JS_ASSERT(IsDefinition(pn)); |
|
308 return pn->expr(); |
|
309 } |
|
310 |
|
311 static inline bool |
|
312 IsUseOfName(ParseNode *pn, PropertyName *name) |
|
313 { |
|
314 return pn->isKind(PNK_NAME) && pn->name() == name; |
|
315 } |
|
316 |
|
317 static inline bool |
|
318 IsEmptyStatement(ParseNode *pn) |
|
319 { |
|
320 return pn->isKind(PNK_SEMI) && !UnaryKid(pn); |
|
321 } |
|
322 |
|
323 static inline ParseNode * |
|
324 SkipEmptyStatements(ParseNode *pn) |
|
325 { |
|
326 while (pn && IsEmptyStatement(pn)) |
|
327 pn = pn->pn_next; |
|
328 return pn; |
|
329 } |
|
330 |
|
331 static inline ParseNode * |
|
332 NextNonEmptyStatement(ParseNode *pn) |
|
333 { |
|
334 return SkipEmptyStatements(pn->pn_next); |
|
335 } |
|
336 |
|
337 static TokenKind |
|
338 PeekToken(AsmJSParser &parser) |
|
339 { |
|
340 TokenStream &ts = parser.tokenStream; |
|
341 while (ts.peekToken(TokenStream::Operand) == TOK_SEMI) |
|
342 ts.consumeKnownToken(TOK_SEMI); |
|
343 return ts.peekToken(TokenStream::Operand); |
|
344 } |
|
345 |
|
346 static bool |
|
347 ParseVarOrConstStatement(AsmJSParser &parser, ParseNode **var) |
|
348 { |
|
349 TokenKind tk = PeekToken(parser); |
|
350 if (tk != TOK_VAR && tk != TOK_CONST) { |
|
351 *var = nullptr; |
|
352 return true; |
|
353 } |
|
354 |
|
355 *var = parser.statement(); |
|
356 if (!*var) |
|
357 return false; |
|
358 |
|
359 JS_ASSERT((*var)->isKind(PNK_VAR) || (*var)->isKind(PNK_CONST)); |
|
360 return true; |
|
361 } |
|
362 |
|
363 /*****************************************************************************/ |
|
364 |
|
365 namespace { |
|
366 |
|
367 // Respresents the type of a general asm.js expression. |
|
368 class Type |
|
369 { |
|
370 public: |
|
371 enum Which { |
|
372 Double, |
|
373 MaybeDouble, |
|
374 Float, |
|
375 MaybeFloat, |
|
376 Floatish, |
|
377 Fixnum, |
|
378 Int, |
|
379 Signed, |
|
380 Unsigned, |
|
381 Intish, |
|
382 Void |
|
383 }; |
|
384 |
|
385 private: |
|
386 Which which_; |
|
387 |
|
388 public: |
|
389 Type() : which_(Which(-1)) {} |
|
390 Type(Which w) : which_(w) {} |
|
391 |
|
392 bool operator==(Type rhs) const { return which_ == rhs.which_; } |
|
393 bool operator!=(Type rhs) const { return which_ != rhs.which_; } |
|
394 |
|
395 bool isSigned() const { |
|
396 return which_ == Signed || which_ == Fixnum; |
|
397 } |
|
398 |
|
399 bool isUnsigned() const { |
|
400 return which_ == Unsigned || which_ == Fixnum; |
|
401 } |
|
402 |
|
403 bool isInt() const { |
|
404 return isSigned() || isUnsigned() || which_ == Int; |
|
405 } |
|
406 |
|
407 bool isIntish() const { |
|
408 return isInt() || which_ == Intish; |
|
409 } |
|
410 |
|
411 bool isDouble() const { |
|
412 return which_ == Double; |
|
413 } |
|
414 |
|
415 bool isMaybeDouble() const { |
|
416 return isDouble() || which_ == MaybeDouble; |
|
417 } |
|
418 |
|
419 bool isFloat() const { |
|
420 return which_ == Float; |
|
421 } |
|
422 |
|
423 bool isMaybeFloat() const { |
|
424 return isFloat() || which_ == MaybeFloat; |
|
425 } |
|
426 |
|
427 bool isFloatish() const { |
|
428 return isMaybeFloat() || which_ == Floatish; |
|
429 } |
|
430 |
|
431 bool isVoid() const { |
|
432 return which_ == Void; |
|
433 } |
|
434 |
|
435 bool isExtern() const { |
|
436 return isDouble() || isSigned(); |
|
437 } |
|
438 |
|
439 bool isVarType() const { |
|
440 return isInt() || isDouble() || isFloat(); |
|
441 } |
|
442 |
|
443 MIRType toMIRType() const { |
|
444 switch (which_) { |
|
445 case Double: |
|
446 case MaybeDouble: |
|
447 return MIRType_Double; |
|
448 case Float: |
|
449 case Floatish: |
|
450 case MaybeFloat: |
|
451 return MIRType_Float32; |
|
452 case Fixnum: |
|
453 case Int: |
|
454 case Signed: |
|
455 case Unsigned: |
|
456 case Intish: |
|
457 return MIRType_Int32; |
|
458 case Void: |
|
459 return MIRType_None; |
|
460 } |
|
461 MOZ_ASSUME_UNREACHABLE("Invalid Type"); |
|
462 } |
|
463 |
|
464 const char *toChars() const { |
|
465 switch (which_) { |
|
466 case Double: return "double"; |
|
467 case MaybeDouble: return "double?"; |
|
468 case Float: return "float"; |
|
469 case Floatish: return "floatish"; |
|
470 case MaybeFloat: return "float?"; |
|
471 case Fixnum: return "fixnum"; |
|
472 case Int: return "int"; |
|
473 case Signed: return "signed"; |
|
474 case Unsigned: return "unsigned"; |
|
475 case Intish: return "intish"; |
|
476 case Void: return "void"; |
|
477 } |
|
478 MOZ_ASSUME_UNREACHABLE("Invalid Type"); |
|
479 } |
|
480 }; |
|
481 |
|
482 } /* anonymous namespace */ |
|
483 |
|
484 // Represents the subset of Type that can be used as the return type of a |
|
485 // function. |
|
486 class RetType |
|
487 { |
|
488 public: |
|
489 enum Which { |
|
490 Void = Type::Void, |
|
491 Signed = Type::Signed, |
|
492 Double = Type::Double, |
|
493 Float = Type::Float |
|
494 }; |
|
495 |
|
496 private: |
|
497 Which which_; |
|
498 |
|
499 public: |
|
500 RetType() : which_(Which(-1)) {} |
|
501 RetType(Which w) : which_(w) {} |
|
502 RetType(AsmJSCoercion coercion) { |
|
503 switch (coercion) { |
|
504 case AsmJS_ToInt32: which_ = Signed; break; |
|
505 case AsmJS_ToNumber: which_ = Double; break; |
|
506 case AsmJS_FRound: which_ = Float; break; |
|
507 } |
|
508 } |
|
509 Which which() const { |
|
510 return which_; |
|
511 } |
|
512 Type toType() const { |
|
513 return Type::Which(which_); |
|
514 } |
|
515 AsmJSModule::ReturnType toModuleReturnType() const { |
|
516 switch (which_) { |
|
517 case Void: return AsmJSModule::Return_Void; |
|
518 case Signed: return AsmJSModule::Return_Int32; |
|
519 case Float: // will be converted to a Double |
|
520 case Double: return AsmJSModule::Return_Double; |
|
521 } |
|
522 MOZ_ASSUME_UNREACHABLE("Unexpected return type"); |
|
523 } |
|
524 MIRType toMIRType() const { |
|
525 switch (which_) { |
|
526 case Void: return MIRType_None; |
|
527 case Signed: return MIRType_Int32; |
|
528 case Double: return MIRType_Double; |
|
529 case Float: return MIRType_Float32; |
|
530 } |
|
531 MOZ_ASSUME_UNREACHABLE("Unexpected return type"); |
|
532 } |
|
533 bool operator==(RetType rhs) const { return which_ == rhs.which_; } |
|
534 bool operator!=(RetType rhs) const { return which_ != rhs.which_; } |
|
535 }; |
|
536 |
|
537 namespace { |
|
538 |
|
539 // Represents the subset of Type that can be used as a variable or |
|
540 // argument's type. Note: AsmJSCoercion and VarType are kept separate to |
|
541 // make very clear the signed/int distinction: a coercion may explicitly sign |
|
542 // an *expression* but, when stored as a variable, this signedness information |
|
543 // is explicitly thrown away by the asm.js type system. E.g., in |
|
544 // |
|
545 // function f(i) { |
|
546 // i = i | 0; (1) |
|
547 // if (...) |
|
548 // i = foo() >>> 0; |
|
549 // else |
|
550 // i = bar() | 0; |
|
551 // return i | 0; (2) |
|
552 // } |
|
553 // |
|
554 // the AsmJSCoercion of (1) is Signed (since | performs ToInt32) but, when |
|
555 // translated to an VarType, the result is a plain Int since, as shown, it |
|
556 // is legal to assign both Signed and Unsigned (or some other Int) values to |
|
557 // it. For (2), the AsmJSCoercion is also Signed but, when translated to an |
|
558 // RetType, the result is Signed since callers (asm.js and non-asm.js) can |
|
559 // rely on the return value being Signed. |
|
560 class VarType |
|
561 { |
|
562 public: |
|
563 enum Which { |
|
564 Int = Type::Int, |
|
565 Double = Type::Double, |
|
566 Float = Type::Float |
|
567 }; |
|
568 |
|
569 private: |
|
570 Which which_; |
|
571 |
|
572 public: |
|
573 VarType() |
|
574 : which_(Which(-1)) {} |
|
575 VarType(Which w) |
|
576 : which_(w) {} |
|
577 VarType(AsmJSCoercion coercion) { |
|
578 switch (coercion) { |
|
579 case AsmJS_ToInt32: which_ = Int; break; |
|
580 case AsmJS_ToNumber: which_ = Double; break; |
|
581 case AsmJS_FRound: which_ = Float; break; |
|
582 } |
|
583 } |
|
584 Which which() const { |
|
585 return which_; |
|
586 } |
|
587 Type toType() const { |
|
588 return Type::Which(which_); |
|
589 } |
|
590 MIRType toMIRType() const { |
|
591 switch(which_) { |
|
592 case Int: return MIRType_Int32; |
|
593 case Double: return MIRType_Double; |
|
594 case Float: return MIRType_Float32; |
|
595 } |
|
596 MOZ_ASSUME_UNREACHABLE("VarType can only be Int, Double or Float"); |
|
597 } |
|
598 AsmJSCoercion toCoercion() const { |
|
599 switch(which_) { |
|
600 case Int: return AsmJS_ToInt32; |
|
601 case Double: return AsmJS_ToNumber; |
|
602 case Float: return AsmJS_FRound; |
|
603 } |
|
604 MOZ_ASSUME_UNREACHABLE("VarType can only be Int, Double or Float"); |
|
605 } |
|
606 static VarType FromCheckedType(Type type) { |
|
607 JS_ASSERT(type.isInt() || type.isMaybeDouble() || type.isFloatish()); |
|
608 if (type.isMaybeDouble()) |
|
609 return Double; |
|
610 else if (type.isFloatish()) |
|
611 return Float; |
|
612 else |
|
613 return Int; |
|
614 } |
|
615 bool operator==(VarType rhs) const { return which_ == rhs.which_; } |
|
616 bool operator!=(VarType rhs) const { return which_ != rhs.which_; } |
|
617 }; |
|
618 |
|
619 } /* anonymous namespace */ |
|
620 |
|
621 // Implements <: (subtype) operator when the rhs is an VarType |
|
622 static inline bool |
|
623 operator<=(Type lhs, VarType rhs) |
|
624 { |
|
625 switch (rhs.which()) { |
|
626 case VarType::Int: return lhs.isInt(); |
|
627 case VarType::Double: return lhs.isDouble(); |
|
628 case VarType::Float: return lhs.isFloat(); |
|
629 } |
|
630 MOZ_ASSUME_UNREACHABLE("Unexpected rhs type"); |
|
631 } |
|
632 |
|
633 /*****************************************************************************/ |
|
634 |
|
635 static inline MIRType ToMIRType(MIRType t) { return t; } |
|
636 static inline MIRType ToMIRType(VarType t) { return t.toMIRType(); } |
|
637 |
|
638 namespace { |
|
639 |
|
640 template <class VecT> |
|
641 class ABIArgIter |
|
642 { |
|
643 ABIArgGenerator gen_; |
|
644 const VecT &types_; |
|
645 unsigned i_; |
|
646 |
|
647 void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); } |
|
648 |
|
649 public: |
|
650 ABIArgIter(const VecT &types) : types_(types), i_(0) { settle(); } |
|
651 void operator++(int) { JS_ASSERT(!done()); i_++; settle(); } |
|
652 bool done() const { return i_ == types_.length(); } |
|
653 |
|
654 ABIArg *operator->() { JS_ASSERT(!done()); return &gen_.current(); } |
|
655 ABIArg &operator*() { JS_ASSERT(!done()); return gen_.current(); } |
|
656 |
|
657 unsigned index() const { JS_ASSERT(!done()); return i_; } |
|
658 MIRType mirType() const { JS_ASSERT(!done()); return ToMIRType(types_[i_]); } |
|
659 uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); } |
|
660 }; |
|
661 |
|
662 typedef js::Vector<MIRType, 8> MIRTypeVector; |
|
663 typedef ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter; |
|
664 |
|
665 typedef js::Vector<VarType, 8, LifoAllocPolicy> VarTypeVector; |
|
666 typedef ABIArgIter<VarTypeVector> ABIArgTypeIter; |
|
667 |
|
668 class Signature |
|
669 { |
|
670 VarTypeVector argTypes_; |
|
671 RetType retType_; |
|
672 |
|
673 public: |
|
674 Signature(LifoAlloc &alloc) |
|
675 : argTypes_(alloc) {} |
|
676 Signature(LifoAlloc &alloc, RetType retType) |
|
677 : argTypes_(alloc), retType_(retType) {} |
|
678 Signature(VarTypeVector &&argTypes, RetType retType) |
|
679 : argTypes_(Move(argTypes)), retType_(Move(retType)) {} |
|
680 Signature(Signature &&rhs) |
|
681 : argTypes_(Move(rhs.argTypes_)), retType_(Move(rhs.retType_)) {} |
|
682 |
|
683 bool copy(const Signature &rhs) { |
|
684 if (!argTypes_.resize(rhs.argTypes_.length())) |
|
685 return false; |
|
686 for (unsigned i = 0; i < argTypes_.length(); i++) |
|
687 argTypes_[i] = rhs.argTypes_[i]; |
|
688 retType_ = rhs.retType_; |
|
689 return true; |
|
690 } |
|
691 |
|
692 bool appendArg(VarType type) { return argTypes_.append(type); } |
|
693 VarType arg(unsigned i) const { return argTypes_[i]; } |
|
694 const VarTypeVector &args() const { return argTypes_; } |
|
695 VarTypeVector &&extractArgs() { return Move(argTypes_); } |
|
696 |
|
697 RetType retType() const { return retType_; } |
|
698 }; |
|
699 |
|
700 } /* namespace anonymous */ |
|
701 |
|
702 static |
|
703 bool operator==(const Signature &lhs, const Signature &rhs) |
|
704 { |
|
705 if (lhs.retType() != rhs.retType()) |
|
706 return false; |
|
707 if (lhs.args().length() != rhs.args().length()) |
|
708 return false; |
|
709 for (unsigned i = 0; i < lhs.args().length(); i++) { |
|
710 if (lhs.arg(i) != rhs.arg(i)) |
|
711 return false; |
|
712 } |
|
713 return true; |
|
714 } |
|
715 |
|
716 static inline |
|
717 bool operator!=(const Signature &lhs, const Signature &rhs) |
|
718 { |
|
719 return !(lhs == rhs); |
|
720 } |
|
721 |
|
722 /*****************************************************************************/ |
|
723 // Typed array utilities |
|
724 |
|
725 static Type |
|
726 TypedArrayLoadType(ArrayBufferView::ViewType viewType) |
|
727 { |
|
728 switch (viewType) { |
|
729 case ArrayBufferView::TYPE_INT8: |
|
730 case ArrayBufferView::TYPE_INT16: |
|
731 case ArrayBufferView::TYPE_INT32: |
|
732 case ArrayBufferView::TYPE_UINT8: |
|
733 case ArrayBufferView::TYPE_UINT16: |
|
734 case ArrayBufferView::TYPE_UINT32: |
|
735 return Type::Intish; |
|
736 case ArrayBufferView::TYPE_FLOAT32: |
|
737 return Type::MaybeFloat; |
|
738 case ArrayBufferView::TYPE_FLOAT64: |
|
739 return Type::MaybeDouble; |
|
740 default:; |
|
741 } |
|
742 MOZ_ASSUME_UNREACHABLE("Unexpected array type"); |
|
743 } |
|
744 |
|
745 enum NeedsBoundsCheck { |
|
746 NO_BOUNDS_CHECK, |
|
747 NEEDS_BOUNDS_CHECK |
|
748 }; |
|
749 |
|
750 namespace { |
|
751 |
|
752 typedef js::Vector<PropertyName*,1> LabelVector; |
|
753 typedef js::Vector<MBasicBlock*,8> BlockVector; |
|
754 |
|
755 // ModuleCompiler encapsulates the compilation of an entire asm.js module. Over |
|
756 // the course of an ModuleCompiler object's lifetime, many FunctionCompiler |
|
757 // objects will be created and destroyed in sequence, one for each function in |
|
758 // the module. |
|
759 // |
|
760 // *** asm.js FFI calls *** |
|
761 // |
|
762 // asm.js allows calling out to non-asm.js via "FFI calls". The asm.js type |
|
763 // system does not place any constraints on the FFI call. In particular: |
|
764 // - an FFI call's target is not known or speculated at module-compile time; |
|
765 // - a single external function can be called with different signatures. |
|
766 // |
|
767 // If performance didn't matter, all FFI calls could simply box their arguments |
|
768 // and call js::Invoke. However, we'd like to be able to specialize FFI calls |
|
769 // to be more efficient in several cases: |
|
770 // |
|
771 // - for calls to JS functions which have been jitted, we'd like to call |
|
772 // directly into JIT code without going through C++. |
|
773 // |
|
774 // - for calls to certain builtins, we'd like to be call directly into the C++ |
|
775 // code for the builtin without going through the general call path. |
|
776 // |
|
777 // All of this requires dynamic specialization techniques which must happen |
|
778 // after module compilation. To support this, at module-compilation time, each |
|
779 // FFI call generates a call signature according to the system ABI, as if the |
|
780 // callee was a C++ function taking/returning the same types as the caller was |
|
781 // passing/expecting. The callee is loaded from a fixed offset in the global |
|
782 // data array which allows the callee to change at runtime. Initially, the |
|
783 // callee is stub which boxes its arguments and calls js::Invoke. |
|
784 // |
|
785 // To do this, we need to generate a callee stub for each pairing of FFI callee |
|
786 // and signature. We call this pairing an "exit". For example, this code has |
|
787 // two external functions and three exits: |
|
788 // |
|
789 // function f(global, imports) { |
|
790 // "use asm"; |
|
791 // var foo = imports.foo; |
|
792 // var bar = imports.bar; |
|
793 // function g() { |
|
794 // foo(1); // Exit #1: (int) -> void |
|
795 // foo(1.5); // Exit #2: (double) -> void |
|
796 // bar(1)|0; // Exit #3: (int) -> int |
|
797 // bar(2)|0; // Exit #3: (int) -> int |
|
798 // } |
|
799 // } |
|
800 // |
|
801 // The ModuleCompiler maintains a hash table (ExitMap) which allows a call site |
|
802 // to add a new exit or reuse an existing one. The key is an ExitDescriptor |
|
803 // (which holds the exit pairing) and the value is an index into the |
|
804 // Vector<Exit> stored in the AsmJSModule. |
|
805 // |
|
806 // Rooting note: ModuleCompiler is a stack class that contains unrooted |
|
807 // PropertyName (JSAtom) pointers. This is safe because it cannot be |
|
808 // constructed without a TokenStream reference. TokenStream is itself a stack |
|
809 // class that cannot be constructed without an AutoKeepAtoms being live on the |
|
810 // stack, which prevents collection of atoms. |
|
811 // |
|
812 // ModuleCompiler is marked as rooted in the rooting analysis. Don't add |
|
813 // non-JSAtom pointers, or this will break! |
|
814 class MOZ_STACK_CLASS ModuleCompiler |
|
815 { |
|
816 public: |
|
817 class Func |
|
818 { |
|
819 PropertyName *name_; |
|
820 bool defined_; |
|
821 uint32_t srcOffset_; |
|
822 uint32_t endOffset_; |
|
823 Signature sig_; |
|
824 Label *code_; |
|
825 unsigned compileTime_; |
|
826 |
|
827 public: |
|
828 Func(PropertyName *name, Signature &&sig, Label *code) |
|
829 : name_(name), defined_(false), srcOffset_(0), endOffset_(0), sig_(Move(sig)), |
|
830 code_(code), compileTime_(0) |
|
831 {} |
|
832 |
|
833 PropertyName *name() const { return name_; } |
|
834 |
|
835 bool defined() const { return defined_; } |
|
836 void finish(uint32_t start, uint32_t end) { |
|
837 JS_ASSERT(!defined_); |
|
838 defined_ = true; |
|
839 srcOffset_ = start; |
|
840 endOffset_ = end; |
|
841 } |
|
842 |
|
843 uint32_t srcOffset() const { JS_ASSERT(defined_); return srcOffset_; } |
|
844 uint32_t endOffset() const { JS_ASSERT(defined_); return endOffset_; } |
|
845 Signature &sig() { return sig_; } |
|
846 const Signature &sig() const { return sig_; } |
|
847 Label *code() const { return code_; } |
|
848 unsigned compileTime() const { return compileTime_; } |
|
849 void accumulateCompileTime(unsigned ms) { compileTime_ += ms; } |
|
850 }; |
|
851 |
|
852 class Global |
|
853 { |
|
854 public: |
|
855 enum Which { |
|
856 Variable, |
|
857 ConstantLiteral, |
|
858 ConstantImport, |
|
859 Function, |
|
860 FuncPtrTable, |
|
861 FFI, |
|
862 ArrayView, |
|
863 MathBuiltinFunction |
|
864 }; |
|
865 |
|
866 private: |
|
867 Which which_; |
|
868 union { |
|
869 struct { |
|
870 VarType::Which type_; |
|
871 uint32_t index_; |
|
872 Value literalValue_; |
|
873 } varOrConst; |
|
874 uint32_t funcIndex_; |
|
875 uint32_t funcPtrTableIndex_; |
|
876 uint32_t ffiIndex_; |
|
877 ArrayBufferView::ViewType viewType_; |
|
878 AsmJSMathBuiltinFunction mathBuiltinFunc_; |
|
879 } u; |
|
880 |
|
881 friend class ModuleCompiler; |
|
882 friend class js::LifoAlloc; |
|
883 |
|
884 Global(Which which) : which_(which) {} |
|
885 |
|
886 public: |
|
887 Which which() const { |
|
888 return which_; |
|
889 } |
|
890 VarType varOrConstType() const { |
|
891 JS_ASSERT(which_ == Variable || which_ == ConstantLiteral || which_ == ConstantImport); |
|
892 return VarType(u.varOrConst.type_); |
|
893 } |
|
894 uint32_t varOrConstIndex() const { |
|
895 JS_ASSERT(which_ == Variable || which_ == ConstantImport); |
|
896 return u.varOrConst.index_; |
|
897 } |
|
898 bool isConst() const { |
|
899 return which_ == ConstantLiteral || which_ == ConstantImport; |
|
900 } |
|
901 Value constLiteralValue() const { |
|
902 JS_ASSERT(which_ == ConstantLiteral); |
|
903 return u.varOrConst.literalValue_; |
|
904 } |
|
905 uint32_t funcIndex() const { |
|
906 JS_ASSERT(which_ == Function); |
|
907 return u.funcIndex_; |
|
908 } |
|
909 uint32_t funcPtrTableIndex() const { |
|
910 JS_ASSERT(which_ == FuncPtrTable); |
|
911 return u.funcPtrTableIndex_; |
|
912 } |
|
913 unsigned ffiIndex() const { |
|
914 JS_ASSERT(which_ == FFI); |
|
915 return u.ffiIndex_; |
|
916 } |
|
917 ArrayBufferView::ViewType viewType() const { |
|
918 JS_ASSERT(which_ == ArrayView); |
|
919 return u.viewType_; |
|
920 } |
|
921 AsmJSMathBuiltinFunction mathBuiltinFunction() const { |
|
922 JS_ASSERT(which_ == MathBuiltinFunction); |
|
923 return u.mathBuiltinFunc_; |
|
924 } |
|
925 }; |
|
926 |
|
927 typedef js::Vector<const Func*> FuncPtrVector; |
|
928 |
|
929 class FuncPtrTable |
|
930 { |
|
931 Signature sig_; |
|
932 uint32_t mask_; |
|
933 uint32_t globalDataOffset_; |
|
934 FuncPtrVector elems_; |
|
935 |
|
936 public: |
|
937 FuncPtrTable(ExclusiveContext *cx, Signature &&sig, uint32_t mask, uint32_t gdo) |
|
938 : sig_(Move(sig)), mask_(mask), globalDataOffset_(gdo), elems_(cx) |
|
939 {} |
|
940 |
|
941 FuncPtrTable(FuncPtrTable &&rhs) |
|
942 : sig_(Move(rhs.sig_)), mask_(rhs.mask_), globalDataOffset_(rhs.globalDataOffset_), |
|
943 elems_(Move(rhs.elems_)) |
|
944 {} |
|
945 |
|
946 Signature &sig() { return sig_; } |
|
947 const Signature &sig() const { return sig_; } |
|
948 unsigned mask() const { return mask_; } |
|
949 unsigned globalDataOffset() const { return globalDataOffset_; } |
|
950 |
|
951 bool initialized() const { return !elems_.empty(); } |
|
952 void initElems(FuncPtrVector &&elems) { elems_ = Move(elems); JS_ASSERT(initialized()); } |
|
953 unsigned numElems() const { JS_ASSERT(initialized()); return elems_.length(); } |
|
954 const Func &elem(unsigned i) const { return *elems_[i]; } |
|
955 }; |
|
956 |
|
957 typedef js::Vector<FuncPtrTable> FuncPtrTableVector; |
|
958 |
|
959 class ExitDescriptor |
|
960 { |
|
961 PropertyName *name_; |
|
962 Signature sig_; |
|
963 |
|
964 public: |
|
965 ExitDescriptor(PropertyName *name, Signature &&sig) |
|
966 : name_(name), sig_(Move(sig)) {} |
|
967 ExitDescriptor(ExitDescriptor &&rhs) |
|
968 : name_(rhs.name_), sig_(Move(rhs.sig_)) |
|
969 {} |
|
970 const Signature &sig() const { |
|
971 return sig_; |
|
972 } |
|
973 |
|
974 // ExitDescriptor is a HashPolicy: |
|
975 typedef ExitDescriptor Lookup; |
|
976 static HashNumber hash(const ExitDescriptor &d) { |
|
977 HashNumber hn = HashGeneric(d.name_, d.sig_.retType().which()); |
|
978 const VarTypeVector &args = d.sig_.args(); |
|
979 for (unsigned i = 0; i < args.length(); i++) |
|
980 hn = AddToHash(hn, args[i].which()); |
|
981 return hn; |
|
982 } |
|
983 static bool match(const ExitDescriptor &lhs, const ExitDescriptor &rhs) { |
|
984 return lhs.name_ == rhs.name_ && lhs.sig_ == rhs.sig_; |
|
985 } |
|
986 }; |
|
987 |
|
988 typedef HashMap<ExitDescriptor, unsigned, ExitDescriptor> ExitMap; |
|
989 |
|
990 struct MathBuiltin |
|
991 { |
|
992 enum Kind { Function, Constant }; |
|
993 Kind kind; |
|
994 |
|
995 union { |
|
996 double cst; |
|
997 AsmJSMathBuiltinFunction func; |
|
998 } u; |
|
999 |
|
1000 MathBuiltin() : kind(Kind(-1)) {} |
|
1001 MathBuiltin(double cst) : kind(Constant) { |
|
1002 u.cst = cst; |
|
1003 } |
|
1004 MathBuiltin(AsmJSMathBuiltinFunction func) : kind(Function) { |
|
1005 u.func = func; |
|
1006 } |
|
1007 }; |
|
1008 |
|
1009 private: |
|
1010 struct SlowFunction |
|
1011 { |
|
1012 PropertyName *name; |
|
1013 unsigned ms; |
|
1014 unsigned line; |
|
1015 unsigned column; |
|
1016 }; |
|
1017 |
|
1018 typedef HashMap<PropertyName*, MathBuiltin> MathNameMap; |
|
1019 typedef HashMap<PropertyName*, Global*> GlobalMap; |
|
1020 typedef js::Vector<Func*> FuncVector; |
|
1021 typedef js::Vector<AsmJSGlobalAccess> GlobalAccessVector; |
|
1022 typedef js::Vector<SlowFunction> SlowFunctionVector; |
|
1023 |
|
1024 ExclusiveContext * cx_; |
|
1025 AsmJSParser & parser_; |
|
1026 |
|
1027 MacroAssembler masm_; |
|
1028 |
|
1029 ScopedJSDeletePtr<AsmJSModule> module_; |
|
1030 LifoAlloc moduleLifo_; |
|
1031 ParseNode * moduleFunctionNode_; |
|
1032 PropertyName * moduleFunctionName_; |
|
1033 |
|
1034 GlobalMap globals_; |
|
1035 FuncVector functions_; |
|
1036 FuncPtrTableVector funcPtrTables_; |
|
1037 ExitMap exits_; |
|
1038 MathNameMap standardLibraryMathNames_; |
|
1039 Label stackOverflowLabel_; |
|
1040 Label interruptLabel_; |
|
1041 |
|
1042 char * errorString_; |
|
1043 uint32_t errorOffset_; |
|
1044 bool errorOverRecursed_; |
|
1045 |
|
1046 int64_t usecBefore_; |
|
1047 SlowFunctionVector slowFunctions_; |
|
1048 |
|
1049 DebugOnly<bool> finishedFunctionBodies_; |
|
1050 |
|
1051 bool addStandardLibraryMathName(const char *name, AsmJSMathBuiltinFunction func) { |
|
1052 JSAtom *atom = Atomize(cx_, name, strlen(name)); |
|
1053 if (!atom) |
|
1054 return false; |
|
1055 MathBuiltin builtin(func); |
|
1056 return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin); |
|
1057 } |
|
1058 bool addStandardLibraryMathName(const char *name, double cst) { |
|
1059 JSAtom *atom = Atomize(cx_, name, strlen(name)); |
|
1060 if (!atom) |
|
1061 return false; |
|
1062 MathBuiltin builtin(cst); |
|
1063 return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin); |
|
1064 } |
|
1065 |
|
1066 public: |
|
1067 ModuleCompiler(ExclusiveContext *cx, AsmJSParser &parser) |
|
1068 : cx_(cx), |
|
1069 parser_(parser), |
|
1070 masm_(MacroAssembler::AsmJSToken()), |
|
1071 moduleLifo_(LIFO_ALLOC_PRIMARY_CHUNK_SIZE), |
|
1072 moduleFunctionNode_(parser.pc->maybeFunction), |
|
1073 moduleFunctionName_(nullptr), |
|
1074 globals_(cx), |
|
1075 functions_(cx), |
|
1076 funcPtrTables_(cx), |
|
1077 exits_(cx), |
|
1078 standardLibraryMathNames_(cx), |
|
1079 errorString_(nullptr), |
|
1080 errorOffset_(UINT32_MAX), |
|
1081 errorOverRecursed_(false), |
|
1082 usecBefore_(PRMJ_Now()), |
|
1083 slowFunctions_(cx), |
|
1084 finishedFunctionBodies_(false) |
|
1085 { |
|
1086 JS_ASSERT(moduleFunctionNode_->pn_funbox == parser.pc->sc->asFunctionBox()); |
|
1087 } |
|
1088 |
|
1089 ~ModuleCompiler() { |
|
1090 if (errorString_) { |
|
1091 JS_ASSERT(errorOffset_ != UINT32_MAX); |
|
1092 tokenStream().reportAsmJSError(errorOffset_, |
|
1093 JSMSG_USE_ASM_TYPE_FAIL, |
|
1094 errorString_); |
|
1095 js_free(errorString_); |
|
1096 } |
|
1097 if (errorOverRecursed_) |
|
1098 js_ReportOverRecursed(cx_); |
|
1099 |
|
1100 // Avoid spurious Label assertions on compilation failure. |
|
1101 if (!stackOverflowLabel_.bound()) |
|
1102 stackOverflowLabel_.bind(0); |
|
1103 if (!interruptLabel_.bound()) |
|
1104 interruptLabel_.bind(0); |
|
1105 } |
|
1106 |
|
1107 bool init() { |
|
1108 if (!globals_.init() || !exits_.init()) |
|
1109 return false; |
|
1110 |
|
1111 if (!standardLibraryMathNames_.init() || |
|
1112 !addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) || |
|
1113 !addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) || |
|
1114 !addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) || |
|
1115 !addStandardLibraryMathName("asin", AsmJSMathBuiltin_asin) || |
|
1116 !addStandardLibraryMathName("acos", AsmJSMathBuiltin_acos) || |
|
1117 !addStandardLibraryMathName("atan", AsmJSMathBuiltin_atan) || |
|
1118 !addStandardLibraryMathName("ceil", AsmJSMathBuiltin_ceil) || |
|
1119 !addStandardLibraryMathName("floor", AsmJSMathBuiltin_floor) || |
|
1120 !addStandardLibraryMathName("exp", AsmJSMathBuiltin_exp) || |
|
1121 !addStandardLibraryMathName("log", AsmJSMathBuiltin_log) || |
|
1122 !addStandardLibraryMathName("pow", AsmJSMathBuiltin_pow) || |
|
1123 !addStandardLibraryMathName("sqrt", AsmJSMathBuiltin_sqrt) || |
|
1124 !addStandardLibraryMathName("abs", AsmJSMathBuiltin_abs) || |
|
1125 !addStandardLibraryMathName("atan2", AsmJSMathBuiltin_atan2) || |
|
1126 !addStandardLibraryMathName("imul", AsmJSMathBuiltin_imul) || |
|
1127 !addStandardLibraryMathName("fround", AsmJSMathBuiltin_fround) || |
|
1128 !addStandardLibraryMathName("min", AsmJSMathBuiltin_min) || |
|
1129 !addStandardLibraryMathName("max", AsmJSMathBuiltin_max) || |
|
1130 !addStandardLibraryMathName("E", M_E) || |
|
1131 !addStandardLibraryMathName("LN10", M_LN10) || |
|
1132 !addStandardLibraryMathName("LN2", M_LN2) || |
|
1133 !addStandardLibraryMathName("LOG2E", M_LOG2E) || |
|
1134 !addStandardLibraryMathName("LOG10E", M_LOG10E) || |
|
1135 !addStandardLibraryMathName("PI", M_PI) || |
|
1136 !addStandardLibraryMathName("SQRT1_2", M_SQRT1_2) || |
|
1137 !addStandardLibraryMathName("SQRT2", M_SQRT2)) |
|
1138 { |
|
1139 return false; |
|
1140 } |
|
1141 |
|
1142 uint32_t funcStart = parser_.pc->maybeFunction->pn_body->pn_pos.begin; |
|
1143 uint32_t offsetToEndOfUseAsm = tokenStream().currentToken().pos.end; |
|
1144 |
|
1145 // "use strict" should be added to the source if we are in an implicit |
|
1146 // strict context, see also comment above addUseStrict in |
|
1147 // js::FunctionToString. |
|
1148 bool strict = parser_.pc->sc->strict && !parser_.pc->sc->hasExplicitUseStrict(); |
|
1149 |
|
1150 module_ = cx_->new_<AsmJSModule>(parser_.ss, funcStart, offsetToEndOfUseAsm, strict); |
|
1151 if (!module_) |
|
1152 return false; |
|
1153 |
|
1154 return true; |
|
1155 } |
|
1156 |
|
1157 bool failOffset(uint32_t offset, const char *str) { |
|
1158 JS_ASSERT(!errorString_); |
|
1159 JS_ASSERT(errorOffset_ == UINT32_MAX); |
|
1160 JS_ASSERT(str); |
|
1161 errorOffset_ = offset; |
|
1162 errorString_ = js_strdup(cx_, str); |
|
1163 return false; |
|
1164 } |
|
1165 |
|
1166 bool fail(ParseNode *pn, const char *str) { |
|
1167 if (pn) |
|
1168 return failOffset(pn->pn_pos.begin, str); |
|
1169 |
|
1170 // The exact rooting static analysis does not perform dataflow analysis, so it believes |
|
1171 // that unrooted things on the stack during compilation may still be accessed after this. |
|
1172 // Since pn is typically only null under OOM, this suppression simply forces any GC to be |
|
1173 // delayed until the compilation is off the stack and more memory can be freed. |
|
1174 gc::AutoSuppressGC nogc(cx_); |
|
1175 return failOffset(tokenStream().peekTokenPos().begin, str); |
|
1176 } |
|
1177 |
|
1178 bool failfVA(ParseNode *pn, const char *fmt, va_list ap) { |
|
1179 JS_ASSERT(!errorString_); |
|
1180 JS_ASSERT(errorOffset_ == UINT32_MAX); |
|
1181 JS_ASSERT(fmt); |
|
1182 errorOffset_ = pn ? pn->pn_pos.begin : tokenStream().currentToken().pos.end; |
|
1183 errorString_ = JS_vsmprintf(fmt, ap); |
|
1184 return false; |
|
1185 } |
|
1186 |
|
1187 bool failf(ParseNode *pn, const char *fmt, ...) { |
|
1188 va_list ap; |
|
1189 va_start(ap, fmt); |
|
1190 failfVA(pn, fmt, ap); |
|
1191 va_end(ap); |
|
1192 return false; |
|
1193 } |
|
1194 |
|
1195 bool failName(ParseNode *pn, const char *fmt, PropertyName *name) { |
|
1196 // This function is invoked without the caller properly rooting its locals. |
|
1197 gc::AutoSuppressGC suppress(cx_); |
|
1198 JSAutoByteString bytes; |
|
1199 if (AtomToPrintableString(cx_, name, &bytes)) |
|
1200 failf(pn, fmt, bytes.ptr()); |
|
1201 return false; |
|
1202 } |
|
1203 |
|
1204 bool failOverRecursed() { |
|
1205 errorOverRecursed_ = true; |
|
1206 return false; |
|
1207 } |
|
1208 |
|
1209 static const unsigned SLOW_FUNCTION_THRESHOLD_MS = 250; |
|
1210 |
|
1211 bool maybeReportCompileTime(const Func &func) { |
|
1212 if (func.compileTime() < SLOW_FUNCTION_THRESHOLD_MS) |
|
1213 return true; |
|
1214 SlowFunction sf; |
|
1215 sf.name = func.name(); |
|
1216 sf.ms = func.compileTime(); |
|
1217 tokenStream().srcCoords.lineNumAndColumnIndex(func.srcOffset(), &sf.line, &sf.column); |
|
1218 return slowFunctions_.append(sf); |
|
1219 } |
|
1220 |
|
1221 /*************************************************** Read-only interface */ |
|
1222 |
|
1223 ExclusiveContext *cx() const { return cx_; } |
|
1224 AsmJSParser &parser() const { return parser_; } |
|
1225 TokenStream &tokenStream() const { return parser_.tokenStream; } |
|
1226 MacroAssembler &masm() { return masm_; } |
|
1227 Label &stackOverflowLabel() { return stackOverflowLabel_; } |
|
1228 Label &interruptLabel() { return interruptLabel_; } |
|
1229 bool hasError() const { return errorString_ != nullptr; } |
|
1230 const AsmJSModule &module() const { return *module_.get(); } |
|
1231 uint32_t moduleStart() const { return module_->funcStart(); } |
|
1232 |
|
1233 ParseNode *moduleFunctionNode() const { return moduleFunctionNode_; } |
|
1234 PropertyName *moduleFunctionName() const { return moduleFunctionName_; } |
|
1235 |
|
1236 const Global *lookupGlobal(PropertyName *name) const { |
|
1237 if (GlobalMap::Ptr p = globals_.lookup(name)) |
|
1238 return p->value(); |
|
1239 return nullptr; |
|
1240 } |
|
1241 Func *lookupFunction(PropertyName *name) { |
|
1242 if (GlobalMap::Ptr p = globals_.lookup(name)) { |
|
1243 Global *value = p->value(); |
|
1244 if (value->which() == Global::Function) |
|
1245 return functions_[value->funcIndex()]; |
|
1246 } |
|
1247 return nullptr; |
|
1248 } |
|
1249 unsigned numFunctions() const { |
|
1250 return functions_.length(); |
|
1251 } |
|
1252 Func &function(unsigned i) { |
|
1253 return *functions_[i]; |
|
1254 } |
|
1255 unsigned numFuncPtrTables() const { |
|
1256 return funcPtrTables_.length(); |
|
1257 } |
|
1258 FuncPtrTable &funcPtrTable(unsigned i) { |
|
1259 return funcPtrTables_[i]; |
|
1260 } |
|
1261 bool lookupStandardLibraryMathName(PropertyName *name, MathBuiltin *mathBuiltin) const { |
|
1262 if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) { |
|
1263 *mathBuiltin = p->value(); |
|
1264 return true; |
|
1265 } |
|
1266 return false; |
|
1267 } |
|
1268 ExitMap::Range allExits() const { |
|
1269 return exits_.all(); |
|
1270 } |
|
1271 |
|
1272 /***************************************************** Mutable interface */ |
|
1273 |
|
1274 void initModuleFunctionName(PropertyName *name) { moduleFunctionName_ = name; } |
|
1275 |
|
1276 void initGlobalArgumentName(PropertyName *n) { module_->initGlobalArgumentName(n); } |
|
1277 void initImportArgumentName(PropertyName *n) { module_->initImportArgumentName(n); } |
|
1278 void initBufferArgumentName(PropertyName *n) { module_->initBufferArgumentName(n); } |
|
1279 |
|
1280 bool addGlobalVarInit(PropertyName *varName, VarType type, const Value &v, bool isConst) { |
|
1281 uint32_t index; |
|
1282 if (!module_->addGlobalVarInit(v, type.toCoercion(), &index)) |
|
1283 return false; |
|
1284 |
|
1285 Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable; |
|
1286 Global *global = moduleLifo_.new_<Global>(which); |
|
1287 if (!global) |
|
1288 return false; |
|
1289 global->u.varOrConst.index_ = index; |
|
1290 global->u.varOrConst.type_ = type.which(); |
|
1291 if (isConst) |
|
1292 global->u.varOrConst.literalValue_ = v; |
|
1293 |
|
1294 return globals_.putNew(varName, global); |
|
1295 } |
|
1296 bool addGlobalVarImport(PropertyName *varName, PropertyName *fieldName, AsmJSCoercion coercion, |
|
1297 bool isConst) { |
|
1298 uint32_t index; |
|
1299 if (!module_->addGlobalVarImport(fieldName, coercion, &index)) |
|
1300 return false; |
|
1301 |
|
1302 Global::Which which = isConst ? Global::ConstantImport : Global::Variable; |
|
1303 Global *global = moduleLifo_.new_<Global>(which); |
|
1304 if (!global) |
|
1305 return false; |
|
1306 global->u.varOrConst.index_ = index; |
|
1307 global->u.varOrConst.type_ = VarType(coercion).which(); |
|
1308 |
|
1309 return globals_.putNew(varName, global); |
|
1310 } |
|
1311 bool addFunction(PropertyName *name, Signature &&sig, Func **func) { |
|
1312 JS_ASSERT(!finishedFunctionBodies_); |
|
1313 Global *global = moduleLifo_.new_<Global>(Global::Function); |
|
1314 if (!global) |
|
1315 return false; |
|
1316 global->u.funcIndex_ = functions_.length(); |
|
1317 if (!globals_.putNew(name, global)) |
|
1318 return false; |
|
1319 Label *code = moduleLifo_.new_<Label>(); |
|
1320 if (!code) |
|
1321 return false; |
|
1322 *func = moduleLifo_.new_<Func>(name, Move(sig), code); |
|
1323 if (!*func) |
|
1324 return false; |
|
1325 return functions_.append(*func); |
|
1326 } |
|
1327 bool addFuncPtrTable(PropertyName *name, Signature &&sig, uint32_t mask, FuncPtrTable **table) { |
|
1328 Global *global = moduleLifo_.new_<Global>(Global::FuncPtrTable); |
|
1329 if (!global) |
|
1330 return false; |
|
1331 global->u.funcPtrTableIndex_ = funcPtrTables_.length(); |
|
1332 if (!globals_.putNew(name, global)) |
|
1333 return false; |
|
1334 uint32_t globalDataOffset; |
|
1335 if (!module_->addFuncPtrTable(/* numElems = */ mask + 1, &globalDataOffset)) |
|
1336 return false; |
|
1337 FuncPtrTable tmpTable(cx_, Move(sig), mask, globalDataOffset); |
|
1338 if (!funcPtrTables_.append(Move(tmpTable))) |
|
1339 return false; |
|
1340 *table = &funcPtrTables_.back(); |
|
1341 return true; |
|
1342 } |
|
1343 bool addFFI(PropertyName *varName, PropertyName *field) { |
|
1344 Global *global = moduleLifo_.new_<Global>(Global::FFI); |
|
1345 if (!global) |
|
1346 return false; |
|
1347 uint32_t index; |
|
1348 if (!module_->addFFI(field, &index)) |
|
1349 return false; |
|
1350 global->u.ffiIndex_ = index; |
|
1351 return globals_.putNew(varName, global); |
|
1352 } |
|
1353 bool addArrayView(PropertyName *varName, ArrayBufferView::ViewType vt, PropertyName *fieldName) { |
|
1354 Global *global = moduleLifo_.new_<Global>(Global::ArrayView); |
|
1355 if (!global) |
|
1356 return false; |
|
1357 if (!module_->addArrayView(vt, fieldName)) |
|
1358 return false; |
|
1359 global->u.viewType_ = vt; |
|
1360 return globals_.putNew(varName, global); |
|
1361 } |
|
1362 bool addMathBuiltinFunction(PropertyName *varName, AsmJSMathBuiltinFunction func, PropertyName *fieldName) { |
|
1363 if (!module_->addMathBuiltinFunction(func, fieldName)) |
|
1364 return false; |
|
1365 Global *global = moduleLifo_.new_<Global>(Global::MathBuiltinFunction); |
|
1366 if (!global) |
|
1367 return false; |
|
1368 global->u.mathBuiltinFunc_ = func; |
|
1369 return globals_.putNew(varName, global); |
|
1370 } |
|
1371 private: |
|
1372 bool addGlobalDoubleConstant(PropertyName *varName, double constant) { |
|
1373 Global *global = moduleLifo_.new_<Global>(Global::ConstantLiteral); |
|
1374 if (!global) |
|
1375 return false; |
|
1376 global->u.varOrConst.literalValue_ = DoubleValue(constant); |
|
1377 global->u.varOrConst.type_ = VarType::Double; |
|
1378 return globals_.putNew(varName, global); |
|
1379 } |
|
1380 public: |
|
1381 bool addMathBuiltinConstant(PropertyName *varName, double constant, PropertyName *fieldName) { |
|
1382 if (!module_->addMathBuiltinConstant(constant, fieldName)) |
|
1383 return false; |
|
1384 return addGlobalDoubleConstant(varName, constant); |
|
1385 } |
|
1386 bool addGlobalConstant(PropertyName *varName, double constant, PropertyName *fieldName) { |
|
1387 if (!module_->addGlobalConstant(constant, fieldName)) |
|
1388 return false; |
|
1389 return addGlobalDoubleConstant(varName, constant); |
|
1390 } |
|
1391 bool addExportedFunction(const Func *func, PropertyName *maybeFieldName) { |
|
1392 AsmJSModule::ArgCoercionVector argCoercions; |
|
1393 const VarTypeVector &args = func->sig().args(); |
|
1394 if (!argCoercions.resize(args.length())) |
|
1395 return false; |
|
1396 for (unsigned i = 0; i < args.length(); i++) |
|
1397 argCoercions[i] = args[i].toCoercion(); |
|
1398 AsmJSModule::ReturnType retType = func->sig().retType().toModuleReturnType(); |
|
1399 return module_->addExportedFunction(func->name(), func->srcOffset(), func->endOffset(), |
|
1400 maybeFieldName, Move(argCoercions), retType); |
|
1401 } |
|
1402 bool addExit(unsigned ffiIndex, PropertyName *name, Signature &&sig, unsigned *exitIndex) { |
|
1403 ExitDescriptor exitDescriptor(name, Move(sig)); |
|
1404 ExitMap::AddPtr p = exits_.lookupForAdd(exitDescriptor); |
|
1405 if (p) { |
|
1406 *exitIndex = p->value(); |
|
1407 return true; |
|
1408 } |
|
1409 if (!module_->addExit(ffiIndex, exitIndex)) |
|
1410 return false; |
|
1411 return exits_.add(p, Move(exitDescriptor), *exitIndex); |
|
1412 } |
|
1413 bool addFunctionName(PropertyName *name, uint32_t *index) { |
|
1414 return module_->addFunctionName(name, index); |
|
1415 } |
|
1416 |
|
1417 // Note a constraint on the minimum size of the heap. The heap size is |
|
1418 // constrained when linking to be at least the maximum of all such constraints. |
|
1419 void requireHeapLengthToBeAtLeast(uint32_t len) { |
|
1420 module_->requireHeapLengthToBeAtLeast(len); |
|
1421 } |
|
1422 uint32_t minHeapLength() const { |
|
1423 return module_->minHeapLength(); |
|
1424 } |
|
1425 LifoAlloc &lifo() { |
|
1426 return moduleLifo_; |
|
1427 } |
|
1428 |
|
1429 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF) |
|
1430 bool trackProfiledFunction(const Func &func, unsigned endCodeOffset) { |
|
1431 unsigned lineno = 0U, columnIndex = 0U; |
|
1432 tokenStream().srcCoords.lineNumAndColumnIndex(func.srcOffset(), &lineno, &columnIndex); |
|
1433 unsigned startCodeOffset = func.code()->offset(); |
|
1434 return module_->trackProfiledFunction(func.name(), startCodeOffset, endCodeOffset, |
|
1435 lineno, columnIndex); |
|
1436 } |
|
1437 #endif |
|
1438 |
|
1439 #ifdef JS_ION_PERF |
|
1440 bool trackPerfProfiledBlocks(AsmJSPerfSpewer &perfSpewer, const Func &func, unsigned endCodeOffset) { |
|
1441 unsigned startCodeOffset = func.code()->offset(); |
|
1442 perfSpewer.noteBlocksOffsets(); |
|
1443 unsigned endInlineCodeOffset = perfSpewer.endInlineCode.offset(); |
|
1444 return module_->trackPerfProfiledBlocks(func.name(), startCodeOffset, endInlineCodeOffset, |
|
1445 endCodeOffset, perfSpewer.basicBlocks()); |
|
1446 } |
|
1447 #endif |
|
1448 |
|
1449 void finishFunctionBodies() { |
|
1450 JS_ASSERT(!finishedFunctionBodies_); |
|
1451 masm_.align(AsmJSPageSize); |
|
1452 finishedFunctionBodies_ = true; |
|
1453 module_->initFunctionBytes(masm_.currentOffset()); |
|
1454 } |
|
1455 |
|
1456 void setInterpExitOffset(unsigned exitIndex) { |
|
1457 module_->exit(exitIndex).initInterpOffset(masm_.currentOffset()); |
|
1458 } |
|
1459 void setIonExitOffset(unsigned exitIndex) { |
|
1460 module_->exit(exitIndex).initIonOffset(masm_.currentOffset()); |
|
1461 } |
|
1462 void setEntryOffset(unsigned exportIndex) { |
|
1463 module_->exportedFunction(exportIndex).initCodeOffset(masm_.currentOffset()); |
|
1464 } |
|
1465 |
|
1466 void buildCompilationTimeReport(bool storedInCache, ScopedJSFreePtr<char> *out) { |
|
1467 ScopedJSFreePtr<char> slowFuns; |
|
1468 #ifndef JS_MORE_DETERMINISTIC |
|
1469 int64_t usecAfter = PRMJ_Now(); |
|
1470 int msTotal = (usecAfter - usecBefore_) / PRMJ_USEC_PER_MSEC; |
|
1471 if (!slowFunctions_.empty()) { |
|
1472 slowFuns.reset(JS_smprintf("; %d functions compiled slowly: ", slowFunctions_.length())); |
|
1473 if (!slowFuns) |
|
1474 return; |
|
1475 for (unsigned i = 0; i < slowFunctions_.length(); i++) { |
|
1476 SlowFunction &func = slowFunctions_[i]; |
|
1477 JSAutoByteString name; |
|
1478 if (!AtomToPrintableString(cx_, func.name, &name)) |
|
1479 return; |
|
1480 slowFuns.reset(JS_smprintf("%s%s:%u:%u (%ums)%s", slowFuns.get(), |
|
1481 name.ptr(), func.line, func.column, func.ms, |
|
1482 i+1 < slowFunctions_.length() ? ", " : "")); |
|
1483 if (!slowFuns) |
|
1484 return; |
|
1485 } |
|
1486 } |
|
1487 out->reset(JS_smprintf("total compilation time %dms; %s%s", |
|
1488 msTotal, |
|
1489 storedInCache ? "stored in cache" : "not stored in cache", |
|
1490 slowFuns ? slowFuns.get() : "")); |
|
1491 #endif |
|
1492 } |
|
1493 |
|
1494 bool finish(ScopedJSDeletePtr<AsmJSModule> *module) |
|
1495 { |
|
1496 module_->initFuncEnd(tokenStream().currentToken().pos.end, |
|
1497 tokenStream().peekTokenPos().end); |
|
1498 masm_.finish(); |
|
1499 if (masm_.oom()) |
|
1500 return false; |
|
1501 |
|
1502 module_->assignCallSites(masm_.extractCallSites()); |
|
1503 module_->assignHeapAccesses(masm_.extractAsmJSHeapAccesses()); |
|
1504 |
|
1505 #if defined(JS_CODEGEN_ARM) |
|
1506 // Now that compilation has finished, we need to update offsets to |
|
1507 // reflect actual offsets (an ARM distinction). |
|
1508 for (unsigned i = 0; i < module_->numHeapAccesses(); i++) { |
|
1509 AsmJSHeapAccess &a = module_->heapAccess(i); |
|
1510 a.setOffset(masm_.actualOffset(a.offset())); |
|
1511 } |
|
1512 for (unsigned i = 0; i < module_->numExportedFunctions(); i++) |
|
1513 module_->exportedFunction(i).updateCodeOffset(masm_); |
|
1514 for (unsigned i = 0; i < module_->numExits(); i++) |
|
1515 module_->exit(i).updateOffsets(masm_); |
|
1516 for (unsigned i = 0; i < module_->numCallSites(); i++) { |
|
1517 CallSite &c = module_->callSite(i); |
|
1518 c.setReturnAddressOffset(masm_.actualOffset(c.returnAddressOffset())); |
|
1519 } |
|
1520 #endif |
|
1521 |
|
1522 // The returned memory is owned by module_. |
|
1523 if (!module_->allocateAndCopyCode(cx_, masm_)) |
|
1524 return false; |
|
1525 |
|
1526 module_->updateFunctionBytes(masm_); |
|
1527 // c.f. JitCode::copyFrom |
|
1528 JS_ASSERT(masm_.jumpRelocationTableBytes() == 0); |
|
1529 JS_ASSERT(masm_.dataRelocationTableBytes() == 0); |
|
1530 JS_ASSERT(masm_.preBarrierTableBytes() == 0); |
|
1531 JS_ASSERT(!masm_.hasEnteredExitFrame()); |
|
1532 |
|
1533 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF) |
|
1534 // Fix up the code offsets. |
|
1535 for (unsigned i = 0; i < module_->numProfiledFunctions(); i++) { |
|
1536 AsmJSModule::ProfiledFunction &func = module_->profiledFunction(i); |
|
1537 func.pod.startCodeOffset = masm_.actualOffset(func.pod.startCodeOffset); |
|
1538 func.pod.endCodeOffset = masm_.actualOffset(func.pod.endCodeOffset); |
|
1539 } |
|
1540 #endif |
|
1541 |
|
1542 #ifdef JS_ION_PERF |
|
1543 for (unsigned i = 0; i < module_->numPerfBlocksFunctions(); i++) { |
|
1544 AsmJSModule::ProfiledBlocksFunction &func = module_->perfProfiledBlocksFunction(i); |
|
1545 func.pod.startCodeOffset = masm_.actualOffset(func.pod.startCodeOffset); |
|
1546 func.endInlineCodeOffset = masm_.actualOffset(func.endInlineCodeOffset); |
|
1547 func.pod.endCodeOffset = masm_.actualOffset(func.pod.endCodeOffset); |
|
1548 BasicBlocksVector &basicBlocks = func.blocks; |
|
1549 for (uint32_t i = 0; i < basicBlocks.length(); i++) { |
|
1550 Record &r = basicBlocks[i]; |
|
1551 r.startOffset = masm_.actualOffset(r.startOffset); |
|
1552 r.endOffset = masm_.actualOffset(r.endOffset); |
|
1553 } |
|
1554 } |
|
1555 #endif |
|
1556 |
|
1557 module_->setInterruptOffset(masm_.actualOffset(interruptLabel_.offset())); |
|
1558 |
|
1559 // CodeLabels produced during codegen |
|
1560 for (size_t i = 0; i < masm_.numCodeLabels(); i++) { |
|
1561 CodeLabel src = masm_.codeLabel(i); |
|
1562 int32_t labelOffset = src.dest()->offset(); |
|
1563 int32_t targetOffset = masm_.actualOffset(src.src()->offset()); |
|
1564 // The patched uses of a label embed a linked list where the |
|
1565 // to-be-patched immediate is the offset of the next to-be-patched |
|
1566 // instruction. |
|
1567 while (labelOffset != LabelBase::INVALID_OFFSET) { |
|
1568 size_t patchAtOffset = masm_.labelOffsetToPatchOffset(labelOffset); |
|
1569 AsmJSModule::RelativeLink link; |
|
1570 link.patchAtOffset = patchAtOffset; |
|
1571 link.targetOffset = targetOffset; |
|
1572 if (!module_->addRelativeLink(link)) |
|
1573 return false; |
|
1574 labelOffset = *(uintptr_t *)(module_->codeBase() + patchAtOffset); |
|
1575 } |
|
1576 } |
|
1577 |
|
1578 // Function-pointer-table entries |
|
1579 for (unsigned tableIndex = 0; tableIndex < funcPtrTables_.length(); tableIndex++) { |
|
1580 FuncPtrTable &table = funcPtrTables_[tableIndex]; |
|
1581 unsigned tableBaseOffset = module_->offsetOfGlobalData() + table.globalDataOffset(); |
|
1582 for (unsigned elemIndex = 0; elemIndex < table.numElems(); elemIndex++) { |
|
1583 AsmJSModule::RelativeLink link; |
|
1584 link.patchAtOffset = tableBaseOffset + elemIndex * sizeof(uint8_t*); |
|
1585 link.targetOffset = masm_.actualOffset(table.elem(elemIndex).code()->offset()); |
|
1586 if (!module_->addRelativeLink(link)) |
|
1587 return false; |
|
1588 } |
|
1589 } |
|
1590 |
|
1591 #if defined(JS_CODEGEN_X86) |
|
1592 // Global data accesses in x86 need to be patched with the absolute |
|
1593 // address of the global. Globals are allocated sequentially after the |
|
1594 // code section so we can just use an RelativeLink. |
|
1595 for (unsigned i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) { |
|
1596 AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i); |
|
1597 AsmJSModule::RelativeLink link; |
|
1598 link.patchAtOffset = masm_.labelOffsetToPatchOffset(a.patchAt.offset()); |
|
1599 link.targetOffset = module_->offsetOfGlobalData() + a.globalDataOffset; |
|
1600 if (!module_->addRelativeLink(link)) |
|
1601 return false; |
|
1602 } |
|
1603 #endif |
|
1604 |
|
1605 #if defined(JS_CODEGEN_X64) |
|
1606 // Global data accesses on x64 use rip-relative addressing and thus do |
|
1607 // not need patching after deserialization. |
|
1608 uint8_t *code = module_->codeBase(); |
|
1609 for (unsigned i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) { |
|
1610 AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i); |
|
1611 masm_.patchAsmJSGlobalAccess(a.patchAt, code, module_->globalData(), a.globalDataOffset); |
|
1612 } |
|
1613 #endif |
|
1614 |
|
1615 // Absolute links |
|
1616 for (size_t i = 0; i < masm_.numAsmJSAbsoluteLinks(); i++) { |
|
1617 AsmJSAbsoluteLink src = masm_.asmJSAbsoluteLink(i); |
|
1618 AsmJSModule::AbsoluteLink link; |
|
1619 link.patchAt = masm_.actualOffset(src.patchAt.offset()); |
|
1620 link.target = src.target; |
|
1621 if (!module_->addAbsoluteLink(link)) |
|
1622 return false; |
|
1623 } |
|
1624 |
|
1625 *module = module_.forget(); |
|
1626 return true; |
|
1627 } |
|
1628 }; |
|
1629 |
|
1630 } /* anonymous namespace */ |
|
1631 |
|
1632 /*****************************************************************************/ |
|
1633 // Numeric literal utilities |
|
1634 |
|
1635 namespace { |
|
1636 |
|
1637 // Represents the type and value of an asm.js numeric literal. |
|
1638 // |
|
1639 // A literal is a double iff the literal contains an exponent or decimal point |
|
1640 // (even if the fractional part is 0). Otherwise, integers may be classified: |
|
1641 // fixnum: [0, 2^31) |
|
1642 // negative int: [-2^31, 0) |
|
1643 // big unsigned: [2^31, 2^32) |
|
1644 // out of range: otherwise |
|
1645 // Lastly, a literal may be a float literal which is any double or integer |
|
1646 // literal coerced with Math.fround. |
|
1647 class NumLit |
|
1648 { |
|
1649 public: |
|
1650 enum Which { |
|
1651 Fixnum = Type::Fixnum, |
|
1652 NegativeInt = Type::Signed, |
|
1653 BigUnsigned = Type::Unsigned, |
|
1654 Double = Type::Double, |
|
1655 Float = Type::Float, |
|
1656 OutOfRangeInt = -1 |
|
1657 }; |
|
1658 |
|
1659 private: |
|
1660 Which which_; |
|
1661 Value v_; |
|
1662 |
|
1663 public: |
|
1664 NumLit() {} |
|
1665 |
|
1666 NumLit(Which w, Value v) |
|
1667 : which_(w), v_(v) |
|
1668 {} |
|
1669 |
|
1670 Which which() const { |
|
1671 return which_; |
|
1672 } |
|
1673 |
|
1674 int32_t toInt32() const { |
|
1675 JS_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned); |
|
1676 return v_.toInt32(); |
|
1677 } |
|
1678 |
|
1679 double toDouble() const { |
|
1680 JS_ASSERT(which_ == Double); |
|
1681 return v_.toDouble(); |
|
1682 } |
|
1683 |
|
1684 float toFloat() const { |
|
1685 JS_ASSERT(which_ == Float); |
|
1686 return float(v_.toDouble()); |
|
1687 } |
|
1688 |
|
1689 Value value() const { |
|
1690 JS_ASSERT(which_ != OutOfRangeInt); |
|
1691 return v_; |
|
1692 } |
|
1693 |
|
1694 bool hasType() const { |
|
1695 return which_ != OutOfRangeInt; |
|
1696 } |
|
1697 |
|
1698 Type type() const { |
|
1699 JS_ASSERT(hasType()); |
|
1700 return Type::Which(which_); |
|
1701 } |
|
1702 |
|
1703 VarType varType() const { |
|
1704 JS_ASSERT(hasType()); |
|
1705 switch (which_) { |
|
1706 case NumLit::Fixnum: |
|
1707 case NumLit::NegativeInt: |
|
1708 case NumLit::BigUnsigned: |
|
1709 return VarType::Int; |
|
1710 case NumLit::Double: |
|
1711 return VarType::Double; |
|
1712 case NumLit::Float: |
|
1713 return VarType::Float; |
|
1714 case NumLit::OutOfRangeInt:; |
|
1715 } |
|
1716 MOZ_ASSUME_UNREACHABLE("Unexpected NumLit type"); |
|
1717 } |
|
1718 }; |
|
1719 |
|
1720 } /* anonymous namespace */ |
|
1721 |
|
1722 static bool |
|
1723 IsNumericNonFloatLiteral(ParseNode *pn) |
|
1724 { |
|
1725 // Note: '-' is never rolled into the number; numbers are always positive |
|
1726 // and negations must be applied manually. |
|
1727 return pn->isKind(PNK_NUMBER) || |
|
1728 (pn->isKind(PNK_NEG) && UnaryKid(pn)->isKind(PNK_NUMBER)); |
|
1729 } |
|
1730 |
|
1731 static bool |
|
1732 IsFloatCoercion(ModuleCompiler &m, ParseNode *pn, ParseNode **coercedExpr) |
|
1733 { |
|
1734 if (!pn->isKind(PNK_CALL)) |
|
1735 return false; |
|
1736 |
|
1737 ParseNode *callee = CallCallee(pn); |
|
1738 if (!callee->isKind(PNK_NAME)) |
|
1739 return false; |
|
1740 |
|
1741 const ModuleCompiler::Global *global = m.lookupGlobal(callee->name()); |
|
1742 if (!global || |
|
1743 global->which() != ModuleCompiler::Global::MathBuiltinFunction || |
|
1744 global->mathBuiltinFunction() != AsmJSMathBuiltin_fround) |
|
1745 { |
|
1746 return false; |
|
1747 } |
|
1748 |
|
1749 if (CallArgListLength(pn) != 1) |
|
1750 return false; |
|
1751 |
|
1752 if (coercedExpr) |
|
1753 *coercedExpr = CallArgList(pn); |
|
1754 |
|
1755 return true; |
|
1756 } |
|
1757 |
|
1758 static bool |
|
1759 IsNumericFloatLiteral(ModuleCompiler &m, ParseNode *pn) |
|
1760 { |
|
1761 ParseNode *coercedExpr; |
|
1762 if (!IsFloatCoercion(m, pn, &coercedExpr)) |
|
1763 return false; |
|
1764 |
|
1765 return IsNumericNonFloatLiteral(coercedExpr); |
|
1766 } |
|
1767 |
|
1768 static bool |
|
1769 IsNumericLiteral(ModuleCompiler &m, ParseNode *pn) |
|
1770 { |
|
1771 return IsNumericNonFloatLiteral(pn) || |
|
1772 IsNumericFloatLiteral(m, pn); |
|
1773 } |
|
1774 |
|
1775 // The JS grammar treats -42 as -(42) (i.e., with separate grammar |
|
1776 // productions) for the unary - and literal 42). However, the asm.js spec |
|
1777 // recognizes -42 (modulo parens, so -(42) and -((42))) as a single literal |
|
1778 // so fold the two potential parse nodes into a single double value. |
|
1779 static double |
|
1780 ExtractNumericNonFloatValue(ParseNode **pn) |
|
1781 { |
|
1782 JS_ASSERT(IsNumericNonFloatLiteral(*pn)); |
|
1783 |
|
1784 if ((*pn)->isKind(PNK_NEG)) { |
|
1785 *pn = UnaryKid(*pn); |
|
1786 return -NumberNodeValue(*pn); |
|
1787 } |
|
1788 |
|
1789 return NumberNodeValue(*pn); |
|
1790 } |
|
1791 |
|
1792 static NumLit |
|
1793 ExtractNumericLiteral(ModuleCompiler &m, ParseNode *pn) |
|
1794 { |
|
1795 JS_ASSERT(IsNumericLiteral(m, pn)); |
|
1796 |
|
1797 // Float literals are explicitly coerced and thus the coerced literal may be |
|
1798 // any valid (non-float) numeric literal. |
|
1799 if (pn->isKind(PNK_CALL)) { |
|
1800 pn = CallArgList(pn); |
|
1801 double d = ExtractNumericNonFloatValue(&pn); |
|
1802 return NumLit(NumLit::Float, DoubleValue(d)); |
|
1803 } |
|
1804 |
|
1805 double d = ExtractNumericNonFloatValue(&pn); |
|
1806 |
|
1807 // The asm.js spec syntactically distinguishes any literal containing a |
|
1808 // decimal point or the literal -0 as having double type. |
|
1809 if (NumberNodeHasFrac(pn) || IsNegativeZero(d)) |
|
1810 return NumLit(NumLit::Double, DoubleValue(d)); |
|
1811 |
|
1812 // The syntactic checks above rule out these double values. |
|
1813 JS_ASSERT(!IsNegativeZero(d)); |
|
1814 JS_ASSERT(!IsNaN(d)); |
|
1815 |
|
1816 // Although doubles can only *precisely* represent 53-bit integers, they |
|
1817 // can *imprecisely* represent integers much bigger than an int64_t. |
|
1818 // Furthermore, d may be inf or -inf. In both cases, casting to an int64_t |
|
1819 // is undefined, so test against the integer bounds using doubles. |
|
1820 if (d < double(INT32_MIN) || d > double(UINT32_MAX)) |
|
1821 return NumLit(NumLit::OutOfRangeInt, UndefinedValue()); |
|
1822 |
|
1823 // With the above syntactic and range limitations, d is definitely an |
|
1824 // integer in the range [INT32_MIN, UINT32_MAX] range. |
|
1825 int64_t i64 = int64_t(d); |
|
1826 if (i64 >= 0) { |
|
1827 if (i64 <= INT32_MAX) |
|
1828 return NumLit(NumLit::Fixnum, Int32Value(i64)); |
|
1829 JS_ASSERT(i64 <= UINT32_MAX); |
|
1830 return NumLit(NumLit::BigUnsigned, Int32Value(uint32_t(i64))); |
|
1831 } |
|
1832 JS_ASSERT(i64 >= INT32_MIN); |
|
1833 return NumLit(NumLit::NegativeInt, Int32Value(i64)); |
|
1834 } |
|
1835 |
|
1836 static inline bool |
|
1837 IsLiteralInt(ModuleCompiler &m, ParseNode *pn, uint32_t *u32) |
|
1838 { |
|
1839 if (!IsNumericLiteral(m, pn)) |
|
1840 return false; |
|
1841 |
|
1842 NumLit literal = ExtractNumericLiteral(m, pn); |
|
1843 switch (literal.which()) { |
|
1844 case NumLit::Fixnum: |
|
1845 case NumLit::BigUnsigned: |
|
1846 case NumLit::NegativeInt: |
|
1847 *u32 = uint32_t(literal.toInt32()); |
|
1848 return true; |
|
1849 case NumLit::Double: |
|
1850 case NumLit::Float: |
|
1851 case NumLit::OutOfRangeInt: |
|
1852 return false; |
|
1853 } |
|
1854 |
|
1855 MOZ_ASSUME_UNREACHABLE("Bad literal type"); |
|
1856 } |
|
1857 |
|
1858 /*****************************************************************************/ |
|
1859 |
|
1860 namespace { |
|
1861 |
|
1862 // Encapsulates the compilation of a single function in an asm.js module. The |
|
1863 // function compiler handles the creation and final backend compilation of the |
|
1864 // MIR graph. Also see ModuleCompiler comment. |
|
1865 class FunctionCompiler |
|
1866 { |
|
1867 public: |
|
1868 struct Local |
|
1869 { |
|
1870 VarType type; |
|
1871 unsigned slot; |
|
1872 Local(VarType t, unsigned slot) : type(t), slot(slot) {} |
|
1873 }; |
|
1874 |
|
1875 struct TypedValue |
|
1876 { |
|
1877 VarType type; |
|
1878 Value value; |
|
1879 TypedValue(VarType t, const Value &v) : type(t), value(v) {} |
|
1880 }; |
|
1881 |
|
1882 private: |
|
1883 typedef HashMap<PropertyName*, Local> LocalMap; |
|
1884 typedef js::Vector<TypedValue> VarInitializerVector; |
|
1885 typedef HashMap<PropertyName*, BlockVector> LabeledBlockMap; |
|
1886 typedef HashMap<ParseNode*, BlockVector> UnlabeledBlockMap; |
|
1887 typedef js::Vector<ParseNode*, 4> NodeStack; |
|
1888 |
|
1889 ModuleCompiler & m_; |
|
1890 LifoAlloc & lifo_; |
|
1891 ParseNode * fn_; |
|
1892 uint32_t functionNameIndex_; |
|
1893 |
|
1894 LocalMap locals_; |
|
1895 VarInitializerVector varInitializers_; |
|
1896 Maybe<RetType> alreadyReturned_; |
|
1897 |
|
1898 TempAllocator * alloc_; |
|
1899 MIRGraph * graph_; |
|
1900 CompileInfo * info_; |
|
1901 MIRGenerator * mirGen_; |
|
1902 Maybe<IonContext> ionContext_; |
|
1903 |
|
1904 MBasicBlock * curBlock_; |
|
1905 |
|
1906 NodeStack loopStack_; |
|
1907 NodeStack breakableStack_; |
|
1908 UnlabeledBlockMap unlabeledBreaks_; |
|
1909 UnlabeledBlockMap unlabeledContinues_; |
|
1910 LabeledBlockMap labeledBreaks_; |
|
1911 LabeledBlockMap labeledContinues_; |
|
1912 |
|
1913 static const uint32_t NO_FUNCTION_NAME_INDEX = UINT32_MAX; |
|
1914 JS_STATIC_ASSERT(NO_FUNCTION_NAME_INDEX > CallSiteDesc::FUNCTION_NAME_INDEX_MAX); |
|
1915 |
|
1916 public: |
|
1917 FunctionCompiler(ModuleCompiler &m, ParseNode *fn, LifoAlloc &lifo) |
|
1918 : m_(m), |
|
1919 lifo_(lifo), |
|
1920 fn_(fn), |
|
1921 functionNameIndex_(NO_FUNCTION_NAME_INDEX), |
|
1922 locals_(m.cx()), |
|
1923 varInitializers_(m.cx()), |
|
1924 alloc_(nullptr), |
|
1925 graph_(nullptr), |
|
1926 info_(nullptr), |
|
1927 mirGen_(nullptr), |
|
1928 curBlock_(nullptr), |
|
1929 loopStack_(m.cx()), |
|
1930 breakableStack_(m.cx()), |
|
1931 unlabeledBreaks_(m.cx()), |
|
1932 unlabeledContinues_(m.cx()), |
|
1933 labeledBreaks_(m.cx()), |
|
1934 labeledContinues_(m.cx()) |
|
1935 {} |
|
1936 |
|
1937 ModuleCompiler & m() const { return m_; } |
|
1938 TempAllocator & alloc() const { return *alloc_; } |
|
1939 LifoAlloc & lifo() const { return lifo_; } |
|
1940 ParseNode * fn() const { return fn_; } |
|
1941 ExclusiveContext * cx() const { return m_.cx(); } |
|
1942 const AsmJSModule & module() const { return m_.module(); } |
|
1943 |
|
1944 bool init() |
|
1945 { |
|
1946 return locals_.init() && |
|
1947 unlabeledBreaks_.init() && |
|
1948 unlabeledContinues_.init() && |
|
1949 labeledBreaks_.init() && |
|
1950 labeledContinues_.init(); |
|
1951 } |
|
1952 |
|
1953 bool fail(ParseNode *pn, const char *str) |
|
1954 { |
|
1955 return m_.fail(pn, str); |
|
1956 } |
|
1957 |
|
1958 bool failf(ParseNode *pn, const char *fmt, ...) |
|
1959 { |
|
1960 va_list ap; |
|
1961 va_start(ap, fmt); |
|
1962 m_.failfVA(pn, fmt, ap); |
|
1963 va_end(ap); |
|
1964 return false; |
|
1965 } |
|
1966 |
|
1967 bool failName(ParseNode *pn, const char *fmt, PropertyName *name) |
|
1968 { |
|
1969 return m_.failName(pn, fmt, name); |
|
1970 } |
|
1971 |
|
1972 ~FunctionCompiler() |
|
1973 { |
|
1974 #ifdef DEBUG |
|
1975 if (!m().hasError() && cx()->isJSContext() && !cx()->asJSContext()->isExceptionPending()) { |
|
1976 JS_ASSERT(loopStack_.empty()); |
|
1977 JS_ASSERT(unlabeledBreaks_.empty()); |
|
1978 JS_ASSERT(unlabeledContinues_.empty()); |
|
1979 JS_ASSERT(labeledBreaks_.empty()); |
|
1980 JS_ASSERT(labeledContinues_.empty()); |
|
1981 JS_ASSERT(inDeadCode()); |
|
1982 } |
|
1983 #endif |
|
1984 } |
|
1985 |
|
1986 /***************************************************** Local scope setup */ |
|
1987 |
|
1988 bool addFormal(ParseNode *pn, PropertyName *name, VarType type) |
|
1989 { |
|
1990 LocalMap::AddPtr p = locals_.lookupForAdd(name); |
|
1991 if (p) |
|
1992 return failName(pn, "duplicate local name '%s' not allowed", name); |
|
1993 return locals_.add(p, name, Local(type, locals_.count())); |
|
1994 } |
|
1995 |
|
1996 bool addVariable(ParseNode *pn, PropertyName *name, VarType type, const Value &init) |
|
1997 { |
|
1998 LocalMap::AddPtr p = locals_.lookupForAdd(name); |
|
1999 if (p) |
|
2000 return failName(pn, "duplicate local name '%s' not allowed", name); |
|
2001 if (!locals_.add(p, name, Local(type, locals_.count()))) |
|
2002 return false; |
|
2003 return varInitializers_.append(TypedValue(type, init)); |
|
2004 } |
|
2005 |
|
2006 bool prepareToEmitMIR(const VarTypeVector &argTypes) |
|
2007 { |
|
2008 JS_ASSERT(locals_.count() == argTypes.length() + varInitializers_.length()); |
|
2009 |
|
2010 alloc_ = lifo_.new_<TempAllocator>(&lifo_); |
|
2011 ionContext_.construct(m_.cx(), alloc_); |
|
2012 |
|
2013 graph_ = lifo_.new_<MIRGraph>(alloc_); |
|
2014 info_ = lifo_.new_<CompileInfo>(locals_.count(), SequentialExecution); |
|
2015 const OptimizationInfo *optimizationInfo = js_IonOptimizations.get(Optimization_AsmJS); |
|
2016 const JitCompileOptions options; |
|
2017 mirGen_ = lifo_.new_<MIRGenerator>(CompileCompartment::get(cx()->compartment()), |
|
2018 options, alloc_, |
|
2019 graph_, info_, optimizationInfo); |
|
2020 |
|
2021 if (!newBlock(/* pred = */ nullptr, &curBlock_, fn_)) |
|
2022 return false; |
|
2023 |
|
2024 for (ABIArgTypeIter i = argTypes; !i.done(); i++) { |
|
2025 MAsmJSParameter *ins = MAsmJSParameter::New(alloc(), *i, i.mirType()); |
|
2026 curBlock_->add(ins); |
|
2027 curBlock_->initSlot(info().localSlot(i.index()), ins); |
|
2028 if (!mirGen_->ensureBallast()) |
|
2029 return false; |
|
2030 } |
|
2031 unsigned firstLocalSlot = argTypes.length(); |
|
2032 for (unsigned i = 0; i < varInitializers_.length(); i++) { |
|
2033 MConstant *ins = MConstant::NewAsmJS(alloc(), varInitializers_[i].value, |
|
2034 varInitializers_[i].type.toMIRType()); |
|
2035 curBlock_->add(ins); |
|
2036 curBlock_->initSlot(info().localSlot(firstLocalSlot + i), ins); |
|
2037 if (!mirGen_->ensureBallast()) |
|
2038 return false; |
|
2039 } |
|
2040 return true; |
|
2041 } |
|
2042 |
|
2043 /******************************* For consistency of returns in a function */ |
|
2044 |
|
2045 bool hasAlreadyReturned() const { |
|
2046 return !alreadyReturned_.empty(); |
|
2047 } |
|
2048 |
|
2049 RetType returnedType() const { |
|
2050 return alreadyReturned_.ref(); |
|
2051 } |
|
2052 |
|
2053 void setReturnedType(RetType retType) { |
|
2054 alreadyReturned_.construct(retType); |
|
2055 } |
|
2056 |
|
2057 /************************* Read-only interface (after local scope setup) */ |
|
2058 |
|
2059 MIRGenerator & mirGen() const { JS_ASSERT(mirGen_); return *mirGen_; } |
|
2060 MIRGraph & mirGraph() const { JS_ASSERT(graph_); return *graph_; } |
|
2061 CompileInfo & info() const { JS_ASSERT(info_); return *info_; } |
|
2062 |
|
2063 const Local *lookupLocal(PropertyName *name) const |
|
2064 { |
|
2065 if (LocalMap::Ptr p = locals_.lookup(name)) |
|
2066 return &p->value(); |
|
2067 return nullptr; |
|
2068 } |
|
2069 |
|
2070 MDefinition *getLocalDef(const Local &local) |
|
2071 { |
|
2072 if (inDeadCode()) |
|
2073 return nullptr; |
|
2074 return curBlock_->getSlot(info().localSlot(local.slot)); |
|
2075 } |
|
2076 |
|
2077 const ModuleCompiler::Global *lookupGlobal(PropertyName *name) const |
|
2078 { |
|
2079 if (locals_.has(name)) |
|
2080 return nullptr; |
|
2081 return m_.lookupGlobal(name); |
|
2082 } |
|
2083 |
|
2084 /***************************** Code generation (after local scope setup) */ |
|
2085 |
|
2086 MDefinition *constant(Value v, Type t) |
|
2087 { |
|
2088 if (inDeadCode()) |
|
2089 return nullptr; |
|
2090 MConstant *constant = MConstant::NewAsmJS(alloc(), v, t.toMIRType()); |
|
2091 curBlock_->add(constant); |
|
2092 return constant; |
|
2093 } |
|
2094 |
|
2095 template <class T> |
|
2096 MDefinition *unary(MDefinition *op) |
|
2097 { |
|
2098 if (inDeadCode()) |
|
2099 return nullptr; |
|
2100 T *ins = T::NewAsmJS(alloc(), op); |
|
2101 curBlock_->add(ins); |
|
2102 return ins; |
|
2103 } |
|
2104 |
|
2105 template <class T> |
|
2106 MDefinition *unary(MDefinition *op, MIRType type) |
|
2107 { |
|
2108 if (inDeadCode()) |
|
2109 return nullptr; |
|
2110 T *ins = T::NewAsmJS(alloc(), op, type); |
|
2111 curBlock_->add(ins); |
|
2112 return ins; |
|
2113 } |
|
2114 |
|
2115 template <class T> |
|
2116 MDefinition *binary(MDefinition *lhs, MDefinition *rhs) |
|
2117 { |
|
2118 if (inDeadCode()) |
|
2119 return nullptr; |
|
2120 T *ins = T::New(alloc(), lhs, rhs); |
|
2121 curBlock_->add(ins); |
|
2122 return ins; |
|
2123 } |
|
2124 |
|
2125 template <class T> |
|
2126 MDefinition *binary(MDefinition *lhs, MDefinition *rhs, MIRType type) |
|
2127 { |
|
2128 if (inDeadCode()) |
|
2129 return nullptr; |
|
2130 T *ins = T::NewAsmJS(alloc(), lhs, rhs, type); |
|
2131 curBlock_->add(ins); |
|
2132 return ins; |
|
2133 } |
|
2134 |
|
2135 MDefinition *minMax(MDefinition *lhs, MDefinition *rhs, MIRType type, bool isMax) { |
|
2136 if (inDeadCode()) |
|
2137 return nullptr; |
|
2138 MMinMax *ins = MMinMax::New(alloc(), lhs, rhs, type, isMax); |
|
2139 curBlock_->add(ins); |
|
2140 return ins; |
|
2141 } |
|
2142 |
|
2143 MDefinition *mul(MDefinition *lhs, MDefinition *rhs, MIRType type, MMul::Mode mode) |
|
2144 { |
|
2145 if (inDeadCode()) |
|
2146 return nullptr; |
|
2147 MMul *ins = MMul::New(alloc(), lhs, rhs, type, mode); |
|
2148 curBlock_->add(ins); |
|
2149 return ins; |
|
2150 } |
|
2151 |
|
2152 MDefinition *div(MDefinition *lhs, MDefinition *rhs, MIRType type, bool unsignd) |
|
2153 { |
|
2154 if (inDeadCode()) |
|
2155 return nullptr; |
|
2156 MDiv *ins = MDiv::NewAsmJS(alloc(), lhs, rhs, type, unsignd); |
|
2157 curBlock_->add(ins); |
|
2158 return ins; |
|
2159 } |
|
2160 |
|
2161 MDefinition *mod(MDefinition *lhs, MDefinition *rhs, MIRType type, bool unsignd) |
|
2162 { |
|
2163 if (inDeadCode()) |
|
2164 return nullptr; |
|
2165 MMod *ins = MMod::NewAsmJS(alloc(), lhs, rhs, type, unsignd); |
|
2166 curBlock_->add(ins); |
|
2167 return ins; |
|
2168 } |
|
2169 |
|
2170 template <class T> |
|
2171 MDefinition *bitwise(MDefinition *lhs, MDefinition *rhs) |
|
2172 { |
|
2173 if (inDeadCode()) |
|
2174 return nullptr; |
|
2175 T *ins = T::NewAsmJS(alloc(), lhs, rhs); |
|
2176 curBlock_->add(ins); |
|
2177 return ins; |
|
2178 } |
|
2179 |
|
2180 template <class T> |
|
2181 MDefinition *bitwise(MDefinition *op) |
|
2182 { |
|
2183 if (inDeadCode()) |
|
2184 return nullptr; |
|
2185 T *ins = T::NewAsmJS(alloc(), op); |
|
2186 curBlock_->add(ins); |
|
2187 return ins; |
|
2188 } |
|
2189 |
|
2190 MDefinition *compare(MDefinition *lhs, MDefinition *rhs, JSOp op, MCompare::CompareType type) |
|
2191 { |
|
2192 if (inDeadCode()) |
|
2193 return nullptr; |
|
2194 MCompare *ins = MCompare::NewAsmJS(alloc(), lhs, rhs, op, type); |
|
2195 curBlock_->add(ins); |
|
2196 return ins; |
|
2197 } |
|
2198 |
|
2199 void assign(const Local &local, MDefinition *def) |
|
2200 { |
|
2201 if (inDeadCode()) |
|
2202 return; |
|
2203 curBlock_->setSlot(info().localSlot(local.slot), def); |
|
2204 } |
|
2205 |
|
2206 MDefinition *loadHeap(ArrayBufferView::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk) |
|
2207 { |
|
2208 if (inDeadCode()) |
|
2209 return nullptr; |
|
2210 MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), vt, ptr); |
|
2211 curBlock_->add(load); |
|
2212 if (chk == NO_BOUNDS_CHECK) |
|
2213 load->setSkipBoundsCheck(true); |
|
2214 return load; |
|
2215 } |
|
2216 |
|
2217 void storeHeap(ArrayBufferView::ViewType vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk) |
|
2218 { |
|
2219 if (inDeadCode()) |
|
2220 return; |
|
2221 MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), vt, ptr, v); |
|
2222 curBlock_->add(store); |
|
2223 if (chk == NO_BOUNDS_CHECK) |
|
2224 store->setSkipBoundsCheck(true); |
|
2225 } |
|
2226 |
|
2227 MDefinition *loadGlobalVar(const ModuleCompiler::Global &global) |
|
2228 { |
|
2229 if (inDeadCode()) |
|
2230 return nullptr; |
|
2231 |
|
2232 uint32_t index = global.varOrConstIndex(); |
|
2233 unsigned globalDataOffset = module().globalVarIndexToGlobalDataOffset(index); |
|
2234 MIRType type = global.varOrConstType().toMIRType(); |
|
2235 MAsmJSLoadGlobalVar *load = MAsmJSLoadGlobalVar::New(alloc(), type, globalDataOffset, |
|
2236 global.isConst()); |
|
2237 curBlock_->add(load); |
|
2238 return load; |
|
2239 } |
|
2240 |
|
2241 void storeGlobalVar(const ModuleCompiler::Global &global, MDefinition *v) |
|
2242 { |
|
2243 if (inDeadCode()) |
|
2244 return; |
|
2245 JS_ASSERT(!global.isConst()); |
|
2246 unsigned globalDataOffset = module().globalVarIndexToGlobalDataOffset(global.varOrConstIndex()); |
|
2247 curBlock_->add(MAsmJSStoreGlobalVar::New(alloc(), globalDataOffset, v)); |
|
2248 } |
|
2249 |
|
2250 /***************************************************************** Calls */ |
|
2251 |
|
2252 // The IonMonkey backend maintains a single stack offset (from the stack |
|
2253 // pointer to the base of the frame) by adding the total amount of spill |
|
2254 // space required plus the maximum stack required for argument passing. |
|
2255 // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must |
|
2256 // manually accumulate, for the entire function, the maximum required stack |
|
2257 // space for argument passing. (This is passed to the CodeGenerator via |
|
2258 // MIRGenerator::maxAsmJSStackArgBytes.) Naively, this would just be the |
|
2259 // maximum of the stack space required for each individual call (as |
|
2260 // determined by the call ABI). However, as an optimization, arguments are |
|
2261 // stored to the stack immediately after evaluation (to decrease live |
|
2262 // ranges and reduce spilling). This introduces the complexity that, |
|
2263 // between evaluating an argument and making the call, another argument |
|
2264 // evaluation could perform a call that also needs to store to the stack. |
|
2265 // When this occurs childClobbers_ = true and the parent expression's |
|
2266 // arguments are stored above the maximum depth clobbered by a child |
|
2267 // expression. |
|
2268 |
|
2269 class Call |
|
2270 { |
|
2271 ParseNode *node_; |
|
2272 ABIArgGenerator abi_; |
|
2273 uint32_t prevMaxStackBytes_; |
|
2274 uint32_t maxChildStackBytes_; |
|
2275 uint32_t spIncrement_; |
|
2276 Signature sig_; |
|
2277 MAsmJSCall::Args regArgs_; |
|
2278 js::Vector<MAsmJSPassStackArg*> stackArgs_; |
|
2279 bool childClobbers_; |
|
2280 |
|
2281 friend class FunctionCompiler; |
|
2282 |
|
2283 public: |
|
2284 Call(FunctionCompiler &f, ParseNode *callNode, RetType retType) |
|
2285 : node_(callNode), |
|
2286 prevMaxStackBytes_(0), |
|
2287 maxChildStackBytes_(0), |
|
2288 spIncrement_(0), |
|
2289 sig_(f.m().lifo(), retType), |
|
2290 regArgs_(f.cx()), |
|
2291 stackArgs_(f.cx()), |
|
2292 childClobbers_(false) |
|
2293 { } |
|
2294 Signature &sig() { return sig_; } |
|
2295 const Signature &sig() const { return sig_; } |
|
2296 }; |
|
2297 |
|
2298 void startCallArgs(Call *call) |
|
2299 { |
|
2300 if (inDeadCode()) |
|
2301 return; |
|
2302 call->prevMaxStackBytes_ = mirGen().resetAsmJSMaxStackArgBytes(); |
|
2303 } |
|
2304 |
|
2305 bool passArg(MDefinition *argDef, VarType type, Call *call) |
|
2306 { |
|
2307 if (!call->sig().appendArg(type)) |
|
2308 return false; |
|
2309 |
|
2310 if (inDeadCode()) |
|
2311 return true; |
|
2312 |
|
2313 uint32_t childStackBytes = mirGen().resetAsmJSMaxStackArgBytes(); |
|
2314 call->maxChildStackBytes_ = Max(call->maxChildStackBytes_, childStackBytes); |
|
2315 if (childStackBytes > 0 && !call->stackArgs_.empty()) |
|
2316 call->childClobbers_ = true; |
|
2317 |
|
2318 ABIArg arg = call->abi_.next(type.toMIRType()); |
|
2319 if (arg.kind() == ABIArg::Stack) { |
|
2320 MAsmJSPassStackArg *mir = MAsmJSPassStackArg::New(alloc(), arg.offsetFromArgBase(), |
|
2321 argDef); |
|
2322 curBlock_->add(mir); |
|
2323 if (!call->stackArgs_.append(mir)) |
|
2324 return false; |
|
2325 } else { |
|
2326 if (!call->regArgs_.append(MAsmJSCall::Arg(arg.reg(), argDef))) |
|
2327 return false; |
|
2328 } |
|
2329 return true; |
|
2330 } |
|
2331 |
|
2332 void finishCallArgs(Call *call) |
|
2333 { |
|
2334 if (inDeadCode()) |
|
2335 return; |
|
2336 uint32_t parentStackBytes = call->abi_.stackBytesConsumedSoFar(); |
|
2337 uint32_t newStackBytes; |
|
2338 if (call->childClobbers_) { |
|
2339 call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, StackAlignment); |
|
2340 for (unsigned i = 0; i < call->stackArgs_.length(); i++) |
|
2341 call->stackArgs_[i]->incrementOffset(call->spIncrement_); |
|
2342 newStackBytes = Max(call->prevMaxStackBytes_, |
|
2343 call->spIncrement_ + parentStackBytes); |
|
2344 } else { |
|
2345 call->spIncrement_ = 0; |
|
2346 newStackBytes = Max(call->prevMaxStackBytes_, |
|
2347 Max(call->maxChildStackBytes_, parentStackBytes)); |
|
2348 } |
|
2349 mirGen_->setAsmJSMaxStackArgBytes(newStackBytes); |
|
2350 } |
|
2351 |
|
2352 private: |
|
2353 bool callPrivate(MAsmJSCall::Callee callee, const Call &call, MIRType returnType, MDefinition **def) |
|
2354 { |
|
2355 if (inDeadCode()) { |
|
2356 *def = nullptr; |
|
2357 return true; |
|
2358 } |
|
2359 |
|
2360 uint32_t line, column; |
|
2361 m_.tokenStream().srcCoords.lineNumAndColumnIndex(call.node_->pn_pos.begin, &line, &column); |
|
2362 |
|
2363 if (functionNameIndex_ == NO_FUNCTION_NAME_INDEX) { |
|
2364 if (!m_.addFunctionName(FunctionName(fn_), &functionNameIndex_)) |
|
2365 return false; |
|
2366 } |
|
2367 |
|
2368 CallSiteDesc desc(line, column, functionNameIndex_); |
|
2369 MAsmJSCall *ins = MAsmJSCall::New(alloc(), desc, callee, call.regArgs_, returnType, |
|
2370 call.spIncrement_); |
|
2371 if (!ins) |
|
2372 return false; |
|
2373 |
|
2374 curBlock_->add(ins); |
|
2375 *def = ins; |
|
2376 return true; |
|
2377 } |
|
2378 |
|
2379 public: |
|
2380 bool internalCall(const ModuleCompiler::Func &func, const Call &call, MDefinition **def) |
|
2381 { |
|
2382 MIRType returnType = func.sig().retType().toMIRType(); |
|
2383 return callPrivate(MAsmJSCall::Callee(func.code()), call, returnType, def); |
|
2384 } |
|
2385 |
|
2386 bool funcPtrCall(const ModuleCompiler::FuncPtrTable &table, MDefinition *index, |
|
2387 const Call &call, MDefinition **def) |
|
2388 { |
|
2389 if (inDeadCode()) { |
|
2390 *def = nullptr; |
|
2391 return true; |
|
2392 } |
|
2393 |
|
2394 MConstant *mask = MConstant::New(alloc(), Int32Value(table.mask())); |
|
2395 curBlock_->add(mask); |
|
2396 MBitAnd *maskedIndex = MBitAnd::NewAsmJS(alloc(), index, mask); |
|
2397 curBlock_->add(maskedIndex); |
|
2398 MAsmJSLoadFuncPtr *ptrFun = MAsmJSLoadFuncPtr::New(alloc(), table.globalDataOffset(), maskedIndex); |
|
2399 curBlock_->add(ptrFun); |
|
2400 |
|
2401 MIRType returnType = table.sig().retType().toMIRType(); |
|
2402 return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def); |
|
2403 } |
|
2404 |
|
2405 bool ffiCall(unsigned exitIndex, const Call &call, MIRType returnType, MDefinition **def) |
|
2406 { |
|
2407 if (inDeadCode()) { |
|
2408 *def = nullptr; |
|
2409 return true; |
|
2410 } |
|
2411 |
|
2412 JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0); |
|
2413 unsigned globalDataOffset = module().exitIndexToGlobalDataOffset(exitIndex); |
|
2414 |
|
2415 MAsmJSLoadFFIFunc *ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset); |
|
2416 curBlock_->add(ptrFun); |
|
2417 |
|
2418 return callPrivate(MAsmJSCall::Callee(ptrFun), call, returnType, def); |
|
2419 } |
|
2420 |
|
2421 bool builtinCall(AsmJSImmKind builtin, const Call &call, MIRType returnType, MDefinition **def) |
|
2422 { |
|
2423 return callPrivate(MAsmJSCall::Callee(builtin), call, returnType, def); |
|
2424 } |
|
2425 |
|
2426 /*********************************************** Control flow generation */ |
|
2427 |
|
2428 inline bool inDeadCode() const { |
|
2429 return curBlock_ == nullptr; |
|
2430 } |
|
2431 |
|
2432 void returnExpr(MDefinition *expr) |
|
2433 { |
|
2434 if (inDeadCode()) |
|
2435 return; |
|
2436 MAsmJSReturn *ins = MAsmJSReturn::New(alloc(), expr); |
|
2437 curBlock_->end(ins); |
|
2438 curBlock_ = nullptr; |
|
2439 } |
|
2440 |
|
2441 void returnVoid() |
|
2442 { |
|
2443 if (inDeadCode()) |
|
2444 return; |
|
2445 MAsmJSVoidReturn *ins = MAsmJSVoidReturn::New(alloc()); |
|
2446 curBlock_->end(ins); |
|
2447 curBlock_ = nullptr; |
|
2448 } |
|
2449 |
|
2450 bool branchAndStartThen(MDefinition *cond, MBasicBlock **thenBlock, MBasicBlock **elseBlock, |
|
2451 ParseNode *thenPn, ParseNode* elsePn) |
|
2452 { |
|
2453 if (inDeadCode()) |
|
2454 return true; |
|
2455 |
|
2456 bool hasThenBlock = *thenBlock != nullptr; |
|
2457 bool hasElseBlock = *elseBlock != nullptr; |
|
2458 |
|
2459 if (!hasThenBlock && !newBlock(curBlock_, thenBlock, thenPn)) |
|
2460 return false; |
|
2461 if (!hasElseBlock && !newBlock(curBlock_, elseBlock, thenPn)) |
|
2462 return false; |
|
2463 |
|
2464 curBlock_->end(MTest::New(alloc(), cond, *thenBlock, *elseBlock)); |
|
2465 |
|
2466 // Only add as a predecessor if newBlock hasn't been called (as it does it for us) |
|
2467 if (hasThenBlock && !(*thenBlock)->addPredecessor(alloc(), curBlock_)) |
|
2468 return false; |
|
2469 if (hasElseBlock && !(*elseBlock)->addPredecessor(alloc(), curBlock_)) |
|
2470 return false; |
|
2471 |
|
2472 curBlock_ = *thenBlock; |
|
2473 mirGraph().moveBlockToEnd(curBlock_); |
|
2474 return true; |
|
2475 } |
|
2476 |
|
2477 void assertCurrentBlockIs(MBasicBlock *block) { |
|
2478 if (inDeadCode()) |
|
2479 return; |
|
2480 JS_ASSERT(curBlock_ == block); |
|
2481 } |
|
2482 |
|
2483 bool appendThenBlock(BlockVector *thenBlocks) |
|
2484 { |
|
2485 if (inDeadCode()) |
|
2486 return true; |
|
2487 return thenBlocks->append(curBlock_); |
|
2488 } |
|
2489 |
|
2490 bool joinIf(const BlockVector &thenBlocks, MBasicBlock *joinBlock) |
|
2491 { |
|
2492 if (!joinBlock) |
|
2493 return true; |
|
2494 JS_ASSERT_IF(curBlock_, thenBlocks.back() == curBlock_); |
|
2495 for (size_t i = 0; i < thenBlocks.length(); i++) { |
|
2496 thenBlocks[i]->end(MGoto::New(alloc(), joinBlock)); |
|
2497 if (!joinBlock->addPredecessor(alloc(), thenBlocks[i])) |
|
2498 return false; |
|
2499 } |
|
2500 curBlock_ = joinBlock; |
|
2501 mirGraph().moveBlockToEnd(curBlock_); |
|
2502 return true; |
|
2503 } |
|
2504 |
|
2505 void switchToElse(MBasicBlock *elseBlock) |
|
2506 { |
|
2507 if (!elseBlock) |
|
2508 return; |
|
2509 curBlock_ = elseBlock; |
|
2510 mirGraph().moveBlockToEnd(curBlock_); |
|
2511 } |
|
2512 |
|
2513 bool joinIfElse(const BlockVector &thenBlocks, ParseNode *pn) |
|
2514 { |
|
2515 if (inDeadCode() && thenBlocks.empty()) |
|
2516 return true; |
|
2517 MBasicBlock *pred = curBlock_ ? curBlock_ : thenBlocks[0]; |
|
2518 MBasicBlock *join; |
|
2519 if (!newBlock(pred, &join, pn)) |
|
2520 return false; |
|
2521 if (curBlock_) |
|
2522 curBlock_->end(MGoto::New(alloc(), join)); |
|
2523 for (size_t i = 0; i < thenBlocks.length(); i++) { |
|
2524 thenBlocks[i]->end(MGoto::New(alloc(), join)); |
|
2525 if (pred == curBlock_ || i > 0) { |
|
2526 if (!join->addPredecessor(alloc(), thenBlocks[i])) |
|
2527 return false; |
|
2528 } |
|
2529 } |
|
2530 curBlock_ = join; |
|
2531 return true; |
|
2532 } |
|
2533 |
|
2534 void pushPhiInput(MDefinition *def) |
|
2535 { |
|
2536 if (inDeadCode()) |
|
2537 return; |
|
2538 JS_ASSERT(curBlock_->stackDepth() == info().firstStackSlot()); |
|
2539 curBlock_->push(def); |
|
2540 } |
|
2541 |
|
2542 MDefinition *popPhiOutput() |
|
2543 { |
|
2544 if (inDeadCode()) |
|
2545 return nullptr; |
|
2546 JS_ASSERT(curBlock_->stackDepth() == info().firstStackSlot() + 1); |
|
2547 return curBlock_->pop(); |
|
2548 } |
|
2549 |
|
2550 bool startPendingLoop(ParseNode *pn, MBasicBlock **loopEntry, ParseNode *bodyStmt) |
|
2551 { |
|
2552 if (!loopStack_.append(pn) || !breakableStack_.append(pn)) |
|
2553 return false; |
|
2554 JS_ASSERT_IF(curBlock_, curBlock_->loopDepth() == loopStack_.length() - 1); |
|
2555 if (inDeadCode()) { |
|
2556 *loopEntry = nullptr; |
|
2557 return true; |
|
2558 } |
|
2559 *loopEntry = MBasicBlock::NewAsmJS(mirGraph(), info(), curBlock_, |
|
2560 MBasicBlock::PENDING_LOOP_HEADER); |
|
2561 if (!*loopEntry) |
|
2562 return false; |
|
2563 mirGraph().addBlock(*loopEntry); |
|
2564 noteBasicBlockPosition(*loopEntry, bodyStmt); |
|
2565 (*loopEntry)->setLoopDepth(loopStack_.length()); |
|
2566 curBlock_->end(MGoto::New(alloc(), *loopEntry)); |
|
2567 curBlock_ = *loopEntry; |
|
2568 return true; |
|
2569 } |
|
2570 |
|
2571 bool branchAndStartLoopBody(MDefinition *cond, MBasicBlock **afterLoop, ParseNode *bodyPn, ParseNode *afterPn) |
|
2572 { |
|
2573 if (inDeadCode()) { |
|
2574 *afterLoop = nullptr; |
|
2575 return true; |
|
2576 } |
|
2577 JS_ASSERT(curBlock_->loopDepth() > 0); |
|
2578 MBasicBlock *body; |
|
2579 if (!newBlock(curBlock_, &body, bodyPn)) |
|
2580 return false; |
|
2581 if (cond->isConstant() && cond->toConstant()->valueToBoolean()) { |
|
2582 *afterLoop = nullptr; |
|
2583 curBlock_->end(MGoto::New(alloc(), body)); |
|
2584 } else { |
|
2585 if (!newBlockWithDepth(curBlock_, curBlock_->loopDepth() - 1, afterLoop, afterPn)) |
|
2586 return false; |
|
2587 curBlock_->end(MTest::New(alloc(), cond, body, *afterLoop)); |
|
2588 } |
|
2589 curBlock_ = body; |
|
2590 return true; |
|
2591 } |
|
2592 |
|
2593 private: |
|
2594 ParseNode *popLoop() |
|
2595 { |
|
2596 ParseNode *pn = loopStack_.popCopy(); |
|
2597 JS_ASSERT(!unlabeledContinues_.has(pn)); |
|
2598 breakableStack_.popBack(); |
|
2599 return pn; |
|
2600 } |
|
2601 |
|
2602 public: |
|
2603 bool closeLoop(MBasicBlock *loopEntry, MBasicBlock *afterLoop) |
|
2604 { |
|
2605 ParseNode *pn = popLoop(); |
|
2606 if (!loopEntry) { |
|
2607 JS_ASSERT(!afterLoop); |
|
2608 JS_ASSERT(inDeadCode()); |
|
2609 JS_ASSERT(!unlabeledBreaks_.has(pn)); |
|
2610 return true; |
|
2611 } |
|
2612 JS_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1); |
|
2613 JS_ASSERT_IF(afterLoop, afterLoop->loopDepth() == loopStack_.length()); |
|
2614 if (curBlock_) { |
|
2615 JS_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1); |
|
2616 curBlock_->end(MGoto::New(alloc(), loopEntry)); |
|
2617 if (!loopEntry->setBackedgeAsmJS(curBlock_)) |
|
2618 return false; |
|
2619 } |
|
2620 curBlock_ = afterLoop; |
|
2621 if (curBlock_) |
|
2622 mirGraph().moveBlockToEnd(curBlock_); |
|
2623 return bindUnlabeledBreaks(pn); |
|
2624 } |
|
2625 |
|
2626 bool branchAndCloseDoWhileLoop(MDefinition *cond, MBasicBlock *loopEntry, ParseNode *afterLoopStmt) |
|
2627 { |
|
2628 ParseNode *pn = popLoop(); |
|
2629 if (!loopEntry) { |
|
2630 JS_ASSERT(inDeadCode()); |
|
2631 JS_ASSERT(!unlabeledBreaks_.has(pn)); |
|
2632 return true; |
|
2633 } |
|
2634 JS_ASSERT(loopEntry->loopDepth() == loopStack_.length() + 1); |
|
2635 if (curBlock_) { |
|
2636 JS_ASSERT(curBlock_->loopDepth() == loopStack_.length() + 1); |
|
2637 if (cond->isConstant()) { |
|
2638 if (cond->toConstant()->valueToBoolean()) { |
|
2639 curBlock_->end(MGoto::New(alloc(), loopEntry)); |
|
2640 if (!loopEntry->setBackedgeAsmJS(curBlock_)) |
|
2641 return false; |
|
2642 curBlock_ = nullptr; |
|
2643 } else { |
|
2644 MBasicBlock *afterLoop; |
|
2645 if (!newBlock(curBlock_, &afterLoop, afterLoopStmt)) |
|
2646 return false; |
|
2647 curBlock_->end(MGoto::New(alloc(), afterLoop)); |
|
2648 curBlock_ = afterLoop; |
|
2649 } |
|
2650 } else { |
|
2651 MBasicBlock *afterLoop; |
|
2652 if (!newBlock(curBlock_, &afterLoop, afterLoopStmt)) |
|
2653 return false; |
|
2654 curBlock_->end(MTest::New(alloc(), cond, loopEntry, afterLoop)); |
|
2655 if (!loopEntry->setBackedgeAsmJS(curBlock_)) |
|
2656 return false; |
|
2657 curBlock_ = afterLoop; |
|
2658 } |
|
2659 } |
|
2660 return bindUnlabeledBreaks(pn); |
|
2661 } |
|
2662 |
|
2663 bool bindContinues(ParseNode *pn, const LabelVector *maybeLabels) |
|
2664 { |
|
2665 bool createdJoinBlock = false; |
|
2666 if (UnlabeledBlockMap::Ptr p = unlabeledContinues_.lookup(pn)) { |
|
2667 if (!bindBreaksOrContinues(&p->value(), &createdJoinBlock, pn)) |
|
2668 return false; |
|
2669 unlabeledContinues_.remove(p); |
|
2670 } |
|
2671 return bindLabeledBreaksOrContinues(maybeLabels, &labeledContinues_, &createdJoinBlock, pn); |
|
2672 } |
|
2673 |
|
2674 bool bindLabeledBreaks(const LabelVector *maybeLabels, ParseNode *pn) |
|
2675 { |
|
2676 bool createdJoinBlock = false; |
|
2677 return bindLabeledBreaksOrContinues(maybeLabels, &labeledBreaks_, &createdJoinBlock, pn); |
|
2678 } |
|
2679 |
|
2680 bool addBreak(PropertyName *maybeLabel) { |
|
2681 if (maybeLabel) |
|
2682 return addBreakOrContinue(maybeLabel, &labeledBreaks_); |
|
2683 return addBreakOrContinue(breakableStack_.back(), &unlabeledBreaks_); |
|
2684 } |
|
2685 |
|
2686 bool addContinue(PropertyName *maybeLabel) { |
|
2687 if (maybeLabel) |
|
2688 return addBreakOrContinue(maybeLabel, &labeledContinues_); |
|
2689 return addBreakOrContinue(loopStack_.back(), &unlabeledContinues_); |
|
2690 } |
|
2691 |
|
2692 bool startSwitch(ParseNode *pn, MDefinition *expr, int32_t low, int32_t high, |
|
2693 MBasicBlock **switchBlock) |
|
2694 { |
|
2695 if (!breakableStack_.append(pn)) |
|
2696 return false; |
|
2697 if (inDeadCode()) { |
|
2698 *switchBlock = nullptr; |
|
2699 return true; |
|
2700 } |
|
2701 curBlock_->end(MTableSwitch::New(alloc(), expr, low, high)); |
|
2702 *switchBlock = curBlock_; |
|
2703 curBlock_ = nullptr; |
|
2704 return true; |
|
2705 } |
|
2706 |
|
2707 bool startSwitchCase(MBasicBlock *switchBlock, MBasicBlock **next, ParseNode *pn) |
|
2708 { |
|
2709 if (!switchBlock) { |
|
2710 *next = nullptr; |
|
2711 return true; |
|
2712 } |
|
2713 if (!newBlock(switchBlock, next, pn)) |
|
2714 return false; |
|
2715 if (curBlock_) { |
|
2716 curBlock_->end(MGoto::New(alloc(), *next)); |
|
2717 if (!(*next)->addPredecessor(alloc(), curBlock_)) |
|
2718 return false; |
|
2719 } |
|
2720 curBlock_ = *next; |
|
2721 return true; |
|
2722 } |
|
2723 |
|
2724 bool startSwitchDefault(MBasicBlock *switchBlock, BlockVector *cases, MBasicBlock **defaultBlock, ParseNode *pn) |
|
2725 { |
|
2726 if (!startSwitchCase(switchBlock, defaultBlock, pn)) |
|
2727 return false; |
|
2728 if (!*defaultBlock) |
|
2729 return true; |
|
2730 mirGraph().moveBlockToEnd(*defaultBlock); |
|
2731 return true; |
|
2732 } |
|
2733 |
|
2734 bool joinSwitch(MBasicBlock *switchBlock, const BlockVector &cases, MBasicBlock *defaultBlock) |
|
2735 { |
|
2736 ParseNode *pn = breakableStack_.popCopy(); |
|
2737 if (!switchBlock) |
|
2738 return true; |
|
2739 MTableSwitch *mir = switchBlock->lastIns()->toTableSwitch(); |
|
2740 size_t defaultIndex = mir->addDefault(defaultBlock); |
|
2741 for (unsigned i = 0; i < cases.length(); i++) { |
|
2742 if (!cases[i]) |
|
2743 mir->addCase(defaultIndex); |
|
2744 else |
|
2745 mir->addCase(mir->addSuccessor(cases[i])); |
|
2746 } |
|
2747 if (curBlock_) { |
|
2748 MBasicBlock *next; |
|
2749 if (!newBlock(curBlock_, &next, pn)) |
|
2750 return false; |
|
2751 curBlock_->end(MGoto::New(alloc(), next)); |
|
2752 curBlock_ = next; |
|
2753 } |
|
2754 return bindUnlabeledBreaks(pn); |
|
2755 } |
|
2756 |
|
2757 /*************************************************************************/ |
|
2758 |
|
2759 MIRGenerator *extractMIR() |
|
2760 { |
|
2761 JS_ASSERT(mirGen_ != nullptr); |
|
2762 MIRGenerator *mirGen = mirGen_; |
|
2763 mirGen_ = nullptr; |
|
2764 return mirGen; |
|
2765 } |
|
2766 |
|
2767 /*************************************************************************/ |
|
2768 private: |
|
2769 void noteBasicBlockPosition(MBasicBlock *blk, ParseNode *pn) |
|
2770 { |
|
2771 #if defined(JS_ION_PERF) |
|
2772 if (pn) { |
|
2773 unsigned line = 0U, column = 0U; |
|
2774 m().tokenStream().srcCoords.lineNumAndColumnIndex(pn->pn_pos.begin, &line, &column); |
|
2775 blk->setLineno(line); |
|
2776 blk->setColumnIndex(column); |
|
2777 } |
|
2778 #endif |
|
2779 } |
|
2780 |
|
2781 bool newBlockWithDepth(MBasicBlock *pred, unsigned loopDepth, MBasicBlock **block, ParseNode *pn) |
|
2782 { |
|
2783 *block = MBasicBlock::NewAsmJS(mirGraph(), info(), pred, MBasicBlock::NORMAL); |
|
2784 if (!*block) |
|
2785 return false; |
|
2786 noteBasicBlockPosition(*block, pn); |
|
2787 mirGraph().addBlock(*block); |
|
2788 (*block)->setLoopDepth(loopDepth); |
|
2789 return true; |
|
2790 } |
|
2791 |
|
2792 bool newBlock(MBasicBlock *pred, MBasicBlock **block, ParseNode *pn) |
|
2793 { |
|
2794 return newBlockWithDepth(pred, loopStack_.length(), block, pn); |
|
2795 } |
|
2796 |
|
2797 bool bindBreaksOrContinues(BlockVector *preds, bool *createdJoinBlock, ParseNode *pn) |
|
2798 { |
|
2799 for (unsigned i = 0; i < preds->length(); i++) { |
|
2800 MBasicBlock *pred = (*preds)[i]; |
|
2801 if (*createdJoinBlock) { |
|
2802 pred->end(MGoto::New(alloc(), curBlock_)); |
|
2803 if (!curBlock_->addPredecessor(alloc(), pred)) |
|
2804 return false; |
|
2805 } else { |
|
2806 MBasicBlock *next; |
|
2807 if (!newBlock(pred, &next, pn)) |
|
2808 return false; |
|
2809 pred->end(MGoto::New(alloc(), next)); |
|
2810 if (curBlock_) { |
|
2811 curBlock_->end(MGoto::New(alloc(), next)); |
|
2812 if (!next->addPredecessor(alloc(), curBlock_)) |
|
2813 return false; |
|
2814 } |
|
2815 curBlock_ = next; |
|
2816 *createdJoinBlock = true; |
|
2817 } |
|
2818 JS_ASSERT(curBlock_->begin() == curBlock_->end()); |
|
2819 if (!mirGen_->ensureBallast()) |
|
2820 return false; |
|
2821 } |
|
2822 preds->clear(); |
|
2823 return true; |
|
2824 } |
|
2825 |
|
2826 bool bindLabeledBreaksOrContinues(const LabelVector *maybeLabels, LabeledBlockMap *map, |
|
2827 bool *createdJoinBlock, ParseNode *pn) |
|
2828 { |
|
2829 if (!maybeLabels) |
|
2830 return true; |
|
2831 const LabelVector &labels = *maybeLabels; |
|
2832 for (unsigned i = 0; i < labels.length(); i++) { |
|
2833 if (LabeledBlockMap::Ptr p = map->lookup(labels[i])) { |
|
2834 if (!bindBreaksOrContinues(&p->value(), createdJoinBlock, pn)) |
|
2835 return false; |
|
2836 map->remove(p); |
|
2837 } |
|
2838 if (!mirGen_->ensureBallast()) |
|
2839 return false; |
|
2840 } |
|
2841 return true; |
|
2842 } |
|
2843 |
|
2844 template <class Key, class Map> |
|
2845 bool addBreakOrContinue(Key key, Map *map) |
|
2846 { |
|
2847 if (inDeadCode()) |
|
2848 return true; |
|
2849 typename Map::AddPtr p = map->lookupForAdd(key); |
|
2850 if (!p) { |
|
2851 BlockVector empty(m().cx()); |
|
2852 if (!map->add(p, key, Move(empty))) |
|
2853 return false; |
|
2854 } |
|
2855 if (!p->value().append(curBlock_)) |
|
2856 return false; |
|
2857 curBlock_ = nullptr; |
|
2858 return true; |
|
2859 } |
|
2860 |
|
2861 bool bindUnlabeledBreaks(ParseNode *pn) |
|
2862 { |
|
2863 bool createdJoinBlock = false; |
|
2864 if (UnlabeledBlockMap::Ptr p = unlabeledBreaks_.lookup(pn)) { |
|
2865 if (!bindBreaksOrContinues(&p->value(), &createdJoinBlock, pn)) |
|
2866 return false; |
|
2867 unlabeledBreaks_.remove(p); |
|
2868 } |
|
2869 return true; |
|
2870 } |
|
2871 }; |
|
2872 |
|
2873 } /* anonymous namespace */ |
|
2874 |
|
2875 /*****************************************************************************/ |
|
2876 // asm.js type-checking and code-generation algorithm |
|
2877 |
|
2878 static bool |
|
2879 CheckIdentifier(ModuleCompiler &m, ParseNode *usepn, PropertyName *name) |
|
2880 { |
|
2881 if (name == m.cx()->names().arguments || name == m.cx()->names().eval) |
|
2882 return m.failName(usepn, "'%s' is not an allowed identifier", name); |
|
2883 return true; |
|
2884 } |
|
2885 |
|
2886 static bool |
|
2887 CheckModuleLevelName(ModuleCompiler &m, ParseNode *usepn, PropertyName *name) |
|
2888 { |
|
2889 if (!CheckIdentifier(m, usepn, name)) |
|
2890 return false; |
|
2891 |
|
2892 if (name == m.moduleFunctionName() || |
|
2893 name == m.module().globalArgumentName() || |
|
2894 name == m.module().importArgumentName() || |
|
2895 name == m.module().bufferArgumentName() || |
|
2896 m.lookupGlobal(name)) |
|
2897 { |
|
2898 return m.failName(usepn, "duplicate name '%s' not allowed", name); |
|
2899 } |
|
2900 |
|
2901 return true; |
|
2902 } |
|
2903 |
|
2904 static bool |
|
2905 CheckFunctionHead(ModuleCompiler &m, ParseNode *fn) |
|
2906 { |
|
2907 JSFunction *fun = FunctionObject(fn); |
|
2908 if (fun->hasRest()) |
|
2909 return m.fail(fn, "rest args not allowed"); |
|
2910 if (fun->isExprClosure()) |
|
2911 return m.fail(fn, "expression closures not allowed"); |
|
2912 if (fn->pn_funbox->hasDestructuringArgs) |
|
2913 return m.fail(fn, "destructuring args not allowed"); |
|
2914 return true; |
|
2915 } |
|
2916 |
|
2917 static bool |
|
2918 CheckArgument(ModuleCompiler &m, ParseNode *arg, PropertyName **name) |
|
2919 { |
|
2920 if (!IsDefinition(arg)) |
|
2921 return m.fail(arg, "duplicate argument name not allowed"); |
|
2922 |
|
2923 if (arg->pn_dflags & PND_DEFAULT) |
|
2924 return m.fail(arg, "default arguments not allowed"); |
|
2925 |
|
2926 if (!CheckIdentifier(m, arg, arg->name())) |
|
2927 return false; |
|
2928 |
|
2929 *name = arg->name(); |
|
2930 return true; |
|
2931 } |
|
2932 |
|
2933 static bool |
|
2934 CheckModuleArgument(ModuleCompiler &m, ParseNode *arg, PropertyName **name) |
|
2935 { |
|
2936 if (!CheckArgument(m, arg, name)) |
|
2937 return false; |
|
2938 |
|
2939 if (!CheckModuleLevelName(m, arg, *name)) |
|
2940 return false; |
|
2941 |
|
2942 return true; |
|
2943 } |
|
2944 |
|
2945 static bool |
|
2946 CheckModuleArguments(ModuleCompiler &m, ParseNode *fn) |
|
2947 { |
|
2948 unsigned numFormals; |
|
2949 ParseNode *arg1 = FunctionArgsList(fn, &numFormals); |
|
2950 ParseNode *arg2 = arg1 ? NextNode(arg1) : nullptr; |
|
2951 ParseNode *arg3 = arg2 ? NextNode(arg2) : nullptr; |
|
2952 |
|
2953 if (numFormals > 3) |
|
2954 return m.fail(fn, "asm.js modules takes at most 3 argument"); |
|
2955 |
|
2956 PropertyName *arg1Name = nullptr; |
|
2957 if (numFormals >= 1 && !CheckModuleArgument(m, arg1, &arg1Name)) |
|
2958 return false; |
|
2959 m.initGlobalArgumentName(arg1Name); |
|
2960 |
|
2961 PropertyName *arg2Name = nullptr; |
|
2962 if (numFormals >= 2 && !CheckModuleArgument(m, arg2, &arg2Name)) |
|
2963 return false; |
|
2964 m.initImportArgumentName(arg2Name); |
|
2965 |
|
2966 PropertyName *arg3Name = nullptr; |
|
2967 if (numFormals >= 3 && !CheckModuleArgument(m, arg3, &arg3Name)) |
|
2968 return false; |
|
2969 m.initBufferArgumentName(arg3Name); |
|
2970 |
|
2971 return true; |
|
2972 } |
|
2973 |
|
2974 static bool |
|
2975 CheckPrecedingStatements(ModuleCompiler &m, ParseNode *stmtList) |
|
2976 { |
|
2977 JS_ASSERT(stmtList->isKind(PNK_STATEMENTLIST)); |
|
2978 |
|
2979 if (ListLength(stmtList) != 0) |
|
2980 return m.fail(ListHead(stmtList), "invalid asm.js statement"); |
|
2981 |
|
2982 return true; |
|
2983 } |
|
2984 |
|
2985 static bool |
|
2986 CheckGlobalVariableInitConstant(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode, |
|
2987 bool isConst) |
|
2988 { |
|
2989 NumLit literal = ExtractNumericLiteral(m, initNode); |
|
2990 if (!literal.hasType()) |
|
2991 return m.fail(initNode, "global initializer is out of representable integer range"); |
|
2992 |
|
2993 return m.addGlobalVarInit(varName, literal.varType(), literal.value(), isConst); |
|
2994 } |
|
2995 |
|
2996 static bool |
|
2997 CheckTypeAnnotation(ModuleCompiler &m, ParseNode *coercionNode, AsmJSCoercion *coercion, |
|
2998 ParseNode **coercedExpr = nullptr) |
|
2999 { |
|
3000 switch (coercionNode->getKind()) { |
|
3001 case PNK_BITOR: { |
|
3002 ParseNode *rhs = BinaryRight(coercionNode); |
|
3003 uint32_t i; |
|
3004 if (!IsLiteralInt(m, rhs, &i) || i != 0) |
|
3005 return m.fail(rhs, "must use |0 for argument/return coercion"); |
|
3006 *coercion = AsmJS_ToInt32; |
|
3007 if (coercedExpr) |
|
3008 *coercedExpr = BinaryLeft(coercionNode); |
|
3009 return true; |
|
3010 } |
|
3011 case PNK_POS: { |
|
3012 *coercion = AsmJS_ToNumber; |
|
3013 if (coercedExpr) |
|
3014 *coercedExpr = UnaryKid(coercionNode); |
|
3015 return true; |
|
3016 } |
|
3017 case PNK_CALL: { |
|
3018 *coercion = AsmJS_FRound; |
|
3019 if (!IsFloatCoercion(m, coercionNode, coercedExpr)) |
|
3020 return m.fail(coercionNode, "call must be to fround coercion"); |
|
3021 return true; |
|
3022 } |
|
3023 default:; |
|
3024 } |
|
3025 |
|
3026 return m.fail(coercionNode, "must be of the form +x, fround(x) or x|0"); |
|
3027 } |
|
3028 |
|
3029 static bool |
|
3030 CheckGlobalVariableImportExpr(ModuleCompiler &m, PropertyName *varName, AsmJSCoercion coercion, |
|
3031 ParseNode *coercedExpr, bool isConst) |
|
3032 { |
|
3033 if (!coercedExpr->isKind(PNK_DOT)) |
|
3034 return m.failName(coercedExpr, "invalid import expression for global '%s'", varName); |
|
3035 |
|
3036 ParseNode *base = DotBase(coercedExpr); |
|
3037 PropertyName *field = DotMember(coercedExpr); |
|
3038 |
|
3039 PropertyName *importName = m.module().importArgumentName(); |
|
3040 if (!importName) |
|
3041 return m.fail(coercedExpr, "cannot import without an asm.js foreign parameter"); |
|
3042 if (!IsUseOfName(base, importName)) |
|
3043 return m.failName(coercedExpr, "base of import expression must be '%s'", importName); |
|
3044 |
|
3045 return m.addGlobalVarImport(varName, field, coercion, isConst); |
|
3046 } |
|
3047 |
|
3048 static bool |
|
3049 CheckGlobalVariableInitImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode, |
|
3050 bool isConst) |
|
3051 { |
|
3052 AsmJSCoercion coercion; |
|
3053 ParseNode *coercedExpr; |
|
3054 if (!CheckTypeAnnotation(m, initNode, &coercion, &coercedExpr)) |
|
3055 return false; |
|
3056 return CheckGlobalVariableImportExpr(m, varName, coercion, coercedExpr, isConst); |
|
3057 } |
|
3058 |
|
3059 static bool |
|
3060 CheckNewArrayView(ModuleCompiler &m, PropertyName *varName, ParseNode *newExpr) |
|
3061 { |
|
3062 ParseNode *ctorExpr = ListHead(newExpr); |
|
3063 if (!ctorExpr->isKind(PNK_DOT)) |
|
3064 return m.fail(ctorExpr, "only valid 'new' import is 'new global.*Array(buf)'"); |
|
3065 |
|
3066 ParseNode *base = DotBase(ctorExpr); |
|
3067 PropertyName *field = DotMember(ctorExpr); |
|
3068 |
|
3069 PropertyName *globalName = m.module().globalArgumentName(); |
|
3070 if (!globalName) |
|
3071 return m.fail(base, "cannot create array view without an asm.js global parameter"); |
|
3072 if (!IsUseOfName(base, globalName)) |
|
3073 return m.failName(base, "expecting '%s.*Array", globalName); |
|
3074 |
|
3075 ParseNode *bufArg = NextNode(ctorExpr); |
|
3076 if (!bufArg || NextNode(bufArg) != nullptr) |
|
3077 return m.fail(ctorExpr, "array view constructor takes exactly one argument"); |
|
3078 |
|
3079 PropertyName *bufferName = m.module().bufferArgumentName(); |
|
3080 if (!bufferName) |
|
3081 return m.fail(bufArg, "cannot create array view without an asm.js heap parameter"); |
|
3082 if (!IsUseOfName(bufArg, bufferName)) |
|
3083 return m.failName(bufArg, "argument to array view constructor must be '%s'", bufferName); |
|
3084 |
|
3085 JSAtomState &names = m.cx()->names(); |
|
3086 ArrayBufferView::ViewType type; |
|
3087 if (field == names.Int8Array) |
|
3088 type = ArrayBufferView::TYPE_INT8; |
|
3089 else if (field == names.Uint8Array) |
|
3090 type = ArrayBufferView::TYPE_UINT8; |
|
3091 else if (field == names.Int16Array) |
|
3092 type = ArrayBufferView::TYPE_INT16; |
|
3093 else if (field == names.Uint16Array) |
|
3094 type = ArrayBufferView::TYPE_UINT16; |
|
3095 else if (field == names.Int32Array) |
|
3096 type = ArrayBufferView::TYPE_INT32; |
|
3097 else if (field == names.Uint32Array) |
|
3098 type = ArrayBufferView::TYPE_UINT32; |
|
3099 else if (field == names.Float32Array) |
|
3100 type = ArrayBufferView::TYPE_FLOAT32; |
|
3101 else if (field == names.Float64Array) |
|
3102 type = ArrayBufferView::TYPE_FLOAT64; |
|
3103 else |
|
3104 return m.fail(ctorExpr, "could not match typed array name"); |
|
3105 |
|
3106 return m.addArrayView(varName, type, field); |
|
3107 } |
|
3108 |
|
3109 static bool |
|
3110 CheckGlobalDotImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode) |
|
3111 { |
|
3112 ParseNode *base = DotBase(initNode); |
|
3113 PropertyName *field = DotMember(initNode); |
|
3114 |
|
3115 if (base->isKind(PNK_DOT)) { |
|
3116 ParseNode *global = DotBase(base); |
|
3117 PropertyName *math = DotMember(base); |
|
3118 if (!IsUseOfName(global, m.module().globalArgumentName()) || math != m.cx()->names().Math) |
|
3119 return m.fail(base, "expecting global.Math"); |
|
3120 |
|
3121 ModuleCompiler::MathBuiltin mathBuiltin; |
|
3122 if (!m.lookupStandardLibraryMathName(field, &mathBuiltin)) |
|
3123 return m.failName(initNode, "'%s' is not a standard Math builtin", field); |
|
3124 |
|
3125 switch (mathBuiltin.kind) { |
|
3126 case ModuleCompiler::MathBuiltin::Function: |
|
3127 return m.addMathBuiltinFunction(varName, mathBuiltin.u.func, field); |
|
3128 case ModuleCompiler::MathBuiltin::Constant: |
|
3129 return m.addMathBuiltinConstant(varName, mathBuiltin.u.cst, field); |
|
3130 default: |
|
3131 break; |
|
3132 } |
|
3133 MOZ_ASSUME_UNREACHABLE("unexpected or uninitialized math builtin type"); |
|
3134 } |
|
3135 |
|
3136 if (IsUseOfName(base, m.module().globalArgumentName())) { |
|
3137 if (field == m.cx()->names().NaN) |
|
3138 return m.addGlobalConstant(varName, GenericNaN(), field); |
|
3139 if (field == m.cx()->names().Infinity) |
|
3140 return m.addGlobalConstant(varName, PositiveInfinity<double>(), field); |
|
3141 return m.failName(initNode, "'%s' is not a standard global constant", field); |
|
3142 } |
|
3143 |
|
3144 if (IsUseOfName(base, m.module().importArgumentName())) |
|
3145 return m.addFFI(varName, field); |
|
3146 |
|
3147 return m.fail(initNode, "expecting c.y where c is either the global or foreign parameter"); |
|
3148 } |
|
3149 |
|
3150 static bool |
|
3151 CheckModuleGlobal(ModuleCompiler &m, ParseNode *var, bool isConst) |
|
3152 { |
|
3153 if (!IsDefinition(var)) |
|
3154 return m.fail(var, "import variable names must be unique"); |
|
3155 |
|
3156 if (!CheckModuleLevelName(m, var, var->name())) |
|
3157 return false; |
|
3158 |
|
3159 ParseNode *initNode = MaybeDefinitionInitializer(var); |
|
3160 if (!initNode) |
|
3161 return m.fail(var, "module import needs initializer"); |
|
3162 |
|
3163 if (IsNumericLiteral(m, initNode)) |
|
3164 return CheckGlobalVariableInitConstant(m, var->name(), initNode, isConst); |
|
3165 |
|
3166 if (initNode->isKind(PNK_BITOR) || initNode->isKind(PNK_POS) || initNode->isKind(PNK_CALL)) |
|
3167 return CheckGlobalVariableInitImport(m, var->name(), initNode, isConst); |
|
3168 |
|
3169 if (initNode->isKind(PNK_NEW)) |
|
3170 return CheckNewArrayView(m, var->name(), initNode); |
|
3171 |
|
3172 if (initNode->isKind(PNK_DOT)) |
|
3173 return CheckGlobalDotImport(m, var->name(), initNode); |
|
3174 |
|
3175 return m.fail(initNode, "unsupported import expression"); |
|
3176 } |
|
3177 |
|
3178 static bool |
|
3179 CheckModuleGlobals(ModuleCompiler &m) |
|
3180 { |
|
3181 while (true) { |
|
3182 ParseNode *varStmt; |
|
3183 if (!ParseVarOrConstStatement(m.parser(), &varStmt)) |
|
3184 return false; |
|
3185 if (!varStmt) |
|
3186 break; |
|
3187 for (ParseNode *var = VarListHead(varStmt); var; var = NextNode(var)) { |
|
3188 if (!CheckModuleGlobal(m, var, varStmt->isKind(PNK_CONST))) |
|
3189 return false; |
|
3190 } |
|
3191 } |
|
3192 |
|
3193 return true; |
|
3194 } |
|
3195 |
|
3196 static bool |
|
3197 ArgFail(FunctionCompiler &f, PropertyName *argName, ParseNode *stmt) |
|
3198 { |
|
3199 return f.failName(stmt, "expecting argument type declaration for '%s' of the " |
|
3200 "form 'arg = arg|0' or 'arg = +arg' or 'arg = fround(arg)'", argName); |
|
3201 } |
|
3202 |
|
3203 static bool |
|
3204 CheckArgumentType(FunctionCompiler &f, ParseNode *stmt, PropertyName *name, VarType *type) |
|
3205 { |
|
3206 if (!stmt || !IsExpressionStatement(stmt)) |
|
3207 return ArgFail(f, name, stmt ? stmt : f.fn()); |
|
3208 |
|
3209 ParseNode *initNode = ExpressionStatementExpr(stmt); |
|
3210 if (!initNode || !initNode->isKind(PNK_ASSIGN)) |
|
3211 return ArgFail(f, name, stmt); |
|
3212 |
|
3213 ParseNode *argNode = BinaryLeft(initNode); |
|
3214 ParseNode *coercionNode = BinaryRight(initNode); |
|
3215 |
|
3216 if (!IsUseOfName(argNode, name)) |
|
3217 return ArgFail(f, name, stmt); |
|
3218 |
|
3219 ParseNode *coercedExpr; |
|
3220 AsmJSCoercion coercion; |
|
3221 if (!CheckTypeAnnotation(f.m(), coercionNode, &coercion, &coercedExpr)) |
|
3222 return false; |
|
3223 |
|
3224 if (!IsUseOfName(coercedExpr, name)) |
|
3225 return ArgFail(f, name, stmt); |
|
3226 |
|
3227 *type = VarType(coercion); |
|
3228 return true; |
|
3229 } |
|
3230 |
|
3231 static bool |
|
3232 CheckArguments(FunctionCompiler &f, ParseNode **stmtIter, VarTypeVector *argTypes) |
|
3233 { |
|
3234 ParseNode *stmt = *stmtIter; |
|
3235 |
|
3236 unsigned numFormals; |
|
3237 ParseNode *argpn = FunctionArgsList(f.fn(), &numFormals); |
|
3238 |
|
3239 for (unsigned i = 0; i < numFormals; i++, argpn = NextNode(argpn), stmt = NextNode(stmt)) { |
|
3240 PropertyName *name; |
|
3241 if (!CheckArgument(f.m(), argpn, &name)) |
|
3242 return false; |
|
3243 |
|
3244 VarType type; |
|
3245 if (!CheckArgumentType(f, stmt, name, &type)) |
|
3246 return false; |
|
3247 |
|
3248 if (!argTypes->append(type)) |
|
3249 return false; |
|
3250 |
|
3251 if (!f.addFormal(argpn, name, type)) |
|
3252 return false; |
|
3253 } |
|
3254 |
|
3255 *stmtIter = stmt; |
|
3256 return true; |
|
3257 } |
|
3258 |
|
3259 static bool |
|
3260 CheckFinalReturn(FunctionCompiler &f, ParseNode *stmt, RetType *retType) |
|
3261 { |
|
3262 if (stmt && stmt->isKind(PNK_RETURN)) { |
|
3263 if (ParseNode *coercionNode = UnaryKid(stmt)) { |
|
3264 if (IsNumericLiteral(f.m(), coercionNode)) { |
|
3265 switch (ExtractNumericLiteral(f.m(), coercionNode).which()) { |
|
3266 case NumLit::BigUnsigned: |
|
3267 case NumLit::OutOfRangeInt: |
|
3268 return f.fail(coercionNode, "returned literal is out of integer range"); |
|
3269 case NumLit::Fixnum: |
|
3270 case NumLit::NegativeInt: |
|
3271 *retType = RetType::Signed; |
|
3272 break; |
|
3273 case NumLit::Double: |
|
3274 *retType = RetType::Double; |
|
3275 break; |
|
3276 case NumLit::Float: |
|
3277 *retType = RetType::Float; |
|
3278 break; |
|
3279 } |
|
3280 return true; |
|
3281 } |
|
3282 |
|
3283 AsmJSCoercion coercion; |
|
3284 if (!CheckTypeAnnotation(f.m(), coercionNode, &coercion)) |
|
3285 return false; |
|
3286 |
|
3287 *retType = RetType(coercion); |
|
3288 return true; |
|
3289 } |
|
3290 |
|
3291 *retType = RetType::Void; |
|
3292 return true; |
|
3293 } |
|
3294 |
|
3295 *retType = RetType::Void; |
|
3296 f.returnVoid(); |
|
3297 return true; |
|
3298 } |
|
3299 |
|
3300 static bool |
|
3301 CheckVariable(FunctionCompiler &f, ParseNode *var) |
|
3302 { |
|
3303 if (!IsDefinition(var)) |
|
3304 return f.fail(var, "local variable names must not restate argument names"); |
|
3305 |
|
3306 PropertyName *name = var->name(); |
|
3307 |
|
3308 if (!CheckIdentifier(f.m(), var, name)) |
|
3309 return false; |
|
3310 |
|
3311 ParseNode *initNode = MaybeDefinitionInitializer(var); |
|
3312 if (!initNode) |
|
3313 return f.failName(var, "var '%s' needs explicit type declaration via an initial value", name); |
|
3314 |
|
3315 if (initNode->isKind(PNK_NAME)) { |
|
3316 PropertyName *initName = initNode->name(); |
|
3317 if (const ModuleCompiler::Global *global = f.lookupGlobal(initName)) { |
|
3318 if (global->which() != ModuleCompiler::Global::ConstantLiteral) |
|
3319 return f.failName(initNode, "'%s' isn't a possible global variable initializer, " |
|
3320 "needs to be a const numeric literal", initName); |
|
3321 return f.addVariable(var, name, global->varOrConstType(), global->constLiteralValue()); |
|
3322 } |
|
3323 return f.failName(initNode, "'%s' needs to be a global name", initName); |
|
3324 } |
|
3325 |
|
3326 if (!IsNumericLiteral(f.m(), initNode)) |
|
3327 return f.failName(initNode, "initializer for '%s' needs to be a numeric literal or a global const literal", name); |
|
3328 |
|
3329 NumLit literal = ExtractNumericLiteral(f.m(), initNode); |
|
3330 if (!literal.hasType()) |
|
3331 return f.failName(initNode, "initializer for '%s' is out of range", name); |
|
3332 |
|
3333 return f.addVariable(var, name, literal.varType(), literal.value()); |
|
3334 } |
|
3335 |
|
3336 static bool |
|
3337 CheckVariables(FunctionCompiler &f, ParseNode **stmtIter) |
|
3338 { |
|
3339 ParseNode *stmt = *stmtIter; |
|
3340 |
|
3341 for (; stmt && stmt->isKind(PNK_VAR); stmt = NextNonEmptyStatement(stmt)) { |
|
3342 for (ParseNode *var = VarListHead(stmt); var; var = NextNode(var)) { |
|
3343 if (!CheckVariable(f, var)) |
|
3344 return false; |
|
3345 } |
|
3346 } |
|
3347 |
|
3348 *stmtIter = stmt; |
|
3349 return true; |
|
3350 } |
|
3351 |
|
3352 static bool |
|
3353 CheckExpr(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type); |
|
3354 |
|
3355 static bool |
|
3356 CheckNumericLiteral(FunctionCompiler &f, ParseNode *num, MDefinition **def, Type *type) |
|
3357 { |
|
3358 NumLit literal = ExtractNumericLiteral(f.m(), num); |
|
3359 if (!literal.hasType()) |
|
3360 return f.fail(num, "numeric literal out of representable integer range"); |
|
3361 |
|
3362 *type = literal.type(); |
|
3363 *def = f.constant(literal.value(), literal.type()); |
|
3364 return true; |
|
3365 } |
|
3366 |
|
3367 static bool |
|
3368 CheckVarRef(FunctionCompiler &f, ParseNode *varRef, MDefinition **def, Type *type) |
|
3369 { |
|
3370 PropertyName *name = varRef->name(); |
|
3371 |
|
3372 if (const FunctionCompiler::Local *local = f.lookupLocal(name)) { |
|
3373 *def = f.getLocalDef(*local); |
|
3374 *type = local->type.toType(); |
|
3375 return true; |
|
3376 } |
|
3377 |
|
3378 if (const ModuleCompiler::Global *global = f.lookupGlobal(name)) { |
|
3379 switch (global->which()) { |
|
3380 case ModuleCompiler::Global::ConstantLiteral: |
|
3381 *def = f.constant(global->constLiteralValue(), global->varOrConstType().toType()); |
|
3382 *type = global->varOrConstType().toType(); |
|
3383 break; |
|
3384 case ModuleCompiler::Global::ConstantImport: |
|
3385 case ModuleCompiler::Global::Variable: |
|
3386 *def = f.loadGlobalVar(*global); |
|
3387 *type = global->varOrConstType().toType(); |
|
3388 break; |
|
3389 case ModuleCompiler::Global::Function: |
|
3390 case ModuleCompiler::Global::FFI: |
|
3391 case ModuleCompiler::Global::MathBuiltinFunction: |
|
3392 case ModuleCompiler::Global::FuncPtrTable: |
|
3393 case ModuleCompiler::Global::ArrayView: |
|
3394 return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name); |
|
3395 } |
|
3396 return true; |
|
3397 } |
|
3398 |
|
3399 return f.failName(varRef, "'%s' not found in local or asm.js module scope", name); |
|
3400 } |
|
3401 |
|
3402 static inline bool |
|
3403 IsLiteralOrConstInt(FunctionCompiler &f, ParseNode *pn, uint32_t *u32) |
|
3404 { |
|
3405 if (IsLiteralInt(f.m(), pn, u32)) |
|
3406 return true; |
|
3407 |
|
3408 if (pn->getKind() != PNK_NAME) |
|
3409 return false; |
|
3410 |
|
3411 PropertyName *name = pn->name(); |
|
3412 const ModuleCompiler::Global *global = f.lookupGlobal(name); |
|
3413 if (!global || global->which() != ModuleCompiler::Global::ConstantLiteral) |
|
3414 return false; |
|
3415 |
|
3416 const Value &v = global->constLiteralValue(); |
|
3417 if (!v.isInt32()) |
|
3418 return false; |
|
3419 |
|
3420 *u32 = (uint32_t) v.toInt32(); |
|
3421 return true; |
|
3422 } |
|
3423 |
|
3424 static bool |
|
3425 FoldMaskedArrayIndex(FunctionCompiler &f, ParseNode **indexExpr, int32_t *mask, |
|
3426 NeedsBoundsCheck *needsBoundsCheck) |
|
3427 { |
|
3428 ParseNode *indexNode = BinaryLeft(*indexExpr); |
|
3429 ParseNode *maskNode = BinaryRight(*indexExpr); |
|
3430 |
|
3431 uint32_t mask2; |
|
3432 if (IsLiteralOrConstInt(f, maskNode, &mask2)) { |
|
3433 // Flag the access to skip the bounds check if the mask ensures that an 'out of |
|
3434 // bounds' access can not occur based on the current heap length constraint. |
|
3435 if (mask2 == 0 || |
|
3436 CountLeadingZeroes32(f.m().minHeapLength() - 1) <= CountLeadingZeroes32(mask2)) { |
|
3437 *needsBoundsCheck = NO_BOUNDS_CHECK; |
|
3438 } |
|
3439 *mask &= mask2; |
|
3440 *indexExpr = indexNode; |
|
3441 return true; |
|
3442 } |
|
3443 |
|
3444 return false; |
|
3445 } |
|
3446 |
|
3447 static bool |
|
3448 CheckArrayAccess(FunctionCompiler &f, ParseNode *elem, ArrayBufferView::ViewType *viewType, |
|
3449 MDefinition **def, NeedsBoundsCheck *needsBoundsCheck) |
|
3450 { |
|
3451 ParseNode *viewName = ElemBase(elem); |
|
3452 ParseNode *indexExpr = ElemIndex(elem); |
|
3453 *needsBoundsCheck = NEEDS_BOUNDS_CHECK; |
|
3454 |
|
3455 if (!viewName->isKind(PNK_NAME)) |
|
3456 return f.fail(viewName, "base of array access must be a typed array view name"); |
|
3457 |
|
3458 const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name()); |
|
3459 if (!global || global->which() != ModuleCompiler::Global::ArrayView) |
|
3460 return f.fail(viewName, "base of array access must be a typed array view name"); |
|
3461 |
|
3462 *viewType = global->viewType(); |
|
3463 |
|
3464 uint32_t pointer; |
|
3465 if (IsLiteralOrConstInt(f, indexExpr, &pointer)) { |
|
3466 if (pointer > (uint32_t(INT32_MAX) >> TypedArrayShift(*viewType))) |
|
3467 return f.fail(indexExpr, "constant index out of range"); |
|
3468 pointer <<= TypedArrayShift(*viewType); |
|
3469 // It is adequate to note pointer+1 rather than rounding up to the next |
|
3470 // access-size boundary because access is always aligned and the constraint |
|
3471 // will be rounded up to a larger alignment later. |
|
3472 f.m().requireHeapLengthToBeAtLeast(uint32_t(pointer) + 1); |
|
3473 *needsBoundsCheck = NO_BOUNDS_CHECK; |
|
3474 *def = f.constant(Int32Value(pointer), Type::Int); |
|
3475 return true; |
|
3476 } |
|
3477 |
|
3478 // Mask off the low bits to account for the clearing effect of a right shift |
|
3479 // followed by the left shift implicit in the array access. E.g., H32[i>>2] |
|
3480 // loses the low two bits. |
|
3481 int32_t mask = ~((uint32_t(1) << TypedArrayShift(*viewType)) - 1); |
|
3482 |
|
3483 MDefinition *pointerDef; |
|
3484 if (indexExpr->isKind(PNK_RSH)) { |
|
3485 ParseNode *shiftNode = BinaryRight(indexExpr); |
|
3486 ParseNode *pointerNode = BinaryLeft(indexExpr); |
|
3487 |
|
3488 uint32_t shift; |
|
3489 if (!IsLiteralInt(f.m(), shiftNode, &shift)) |
|
3490 return f.failf(shiftNode, "shift amount must be constant"); |
|
3491 |
|
3492 unsigned requiredShift = TypedArrayShift(*viewType); |
|
3493 if (shift != requiredShift) |
|
3494 return f.failf(shiftNode, "shift amount must be %u", requiredShift); |
|
3495 |
|
3496 if (pointerNode->isKind(PNK_BITAND)) |
|
3497 FoldMaskedArrayIndex(f, &pointerNode, &mask, needsBoundsCheck); |
|
3498 |
|
3499 // Fold a 'literal constant right shifted' now, and skip the bounds check if |
|
3500 // currently possible. This handles the optimization of many of these uses without |
|
3501 // the need for range analysis, and saves the generation of a MBitAnd op. |
|
3502 if (IsLiteralOrConstInt(f, pointerNode, &pointer) && pointer <= uint32_t(INT32_MAX)) { |
|
3503 // Cases: b[c>>n], and b[(c&m)>>n] |
|
3504 pointer &= mask; |
|
3505 if (pointer < f.m().minHeapLength()) |
|
3506 *needsBoundsCheck = NO_BOUNDS_CHECK; |
|
3507 *def = f.constant(Int32Value(pointer), Type::Int); |
|
3508 return true; |
|
3509 } |
|
3510 |
|
3511 Type pointerType; |
|
3512 if (!CheckExpr(f, pointerNode, &pointerDef, &pointerType)) |
|
3513 return false; |
|
3514 |
|
3515 if (!pointerType.isIntish()) |
|
3516 return f.failf(indexExpr, "%s is not a subtype of int", pointerType.toChars()); |
|
3517 } else { |
|
3518 if (TypedArrayShift(*viewType) != 0) |
|
3519 return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access"); |
|
3520 |
|
3521 JS_ASSERT(mask == -1); |
|
3522 bool folded = false; |
|
3523 |
|
3524 if (indexExpr->isKind(PNK_BITAND)) |
|
3525 folded = FoldMaskedArrayIndex(f, &indexExpr, &mask, needsBoundsCheck); |
|
3526 |
|
3527 Type pointerType; |
|
3528 if (!CheckExpr(f, indexExpr, &pointerDef, &pointerType)) |
|
3529 return false; |
|
3530 |
|
3531 if (folded) { |
|
3532 if (!pointerType.isIntish()) |
|
3533 return f.failf(indexExpr, "%s is not a subtype of intish", pointerType.toChars()); |
|
3534 } else { |
|
3535 if (!pointerType.isInt()) |
|
3536 return f.failf(indexExpr, "%s is not a subtype of int", pointerType.toChars()); |
|
3537 } |
|
3538 } |
|
3539 |
|
3540 // Don't generate the mask op if there is no need for it which could happen for |
|
3541 // a shift of zero. |
|
3542 if (mask == -1) |
|
3543 *def = pointerDef; |
|
3544 else |
|
3545 *def = f.bitwise<MBitAnd>(pointerDef, f.constant(Int32Value(mask), Type::Int)); |
|
3546 |
|
3547 return true; |
|
3548 } |
|
3549 |
|
3550 static bool |
|
3551 CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type) |
|
3552 { |
|
3553 ArrayBufferView::ViewType viewType; |
|
3554 MDefinition *pointerDef; |
|
3555 NeedsBoundsCheck needsBoundsCheck; |
|
3556 if (!CheckArrayAccess(f, elem, &viewType, &pointerDef, &needsBoundsCheck)) |
|
3557 return false; |
|
3558 |
|
3559 *def = f.loadHeap(viewType, pointerDef, needsBoundsCheck); |
|
3560 *type = TypedArrayLoadType(viewType); |
|
3561 return true; |
|
3562 } |
|
3563 |
|
3564 static bool |
|
3565 CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type) |
|
3566 { |
|
3567 ArrayBufferView::ViewType viewType; |
|
3568 MDefinition *pointerDef; |
|
3569 NeedsBoundsCheck needsBoundsCheck; |
|
3570 if (!CheckArrayAccess(f, lhs, &viewType, &pointerDef, &needsBoundsCheck)) |
|
3571 return false; |
|
3572 |
|
3573 MDefinition *rhsDef; |
|
3574 Type rhsType; |
|
3575 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
3576 return false; |
|
3577 |
|
3578 switch (viewType) { |
|
3579 case ArrayBufferView::TYPE_INT8: |
|
3580 case ArrayBufferView::TYPE_INT16: |
|
3581 case ArrayBufferView::TYPE_INT32: |
|
3582 case ArrayBufferView::TYPE_UINT8: |
|
3583 case ArrayBufferView::TYPE_UINT16: |
|
3584 case ArrayBufferView::TYPE_UINT32: |
|
3585 if (!rhsType.isIntish()) |
|
3586 return f.failf(lhs, "%s is not a subtype of intish", rhsType.toChars()); |
|
3587 break; |
|
3588 case ArrayBufferView::TYPE_FLOAT32: |
|
3589 if (rhsType.isMaybeDouble()) |
|
3590 rhsDef = f.unary<MToFloat32>(rhsDef); |
|
3591 else if (!rhsType.isFloatish()) |
|
3592 return f.failf(lhs, "%s is not a subtype of double? or floatish", rhsType.toChars()); |
|
3593 break; |
|
3594 case ArrayBufferView::TYPE_FLOAT64: |
|
3595 if (rhsType.isFloat()) |
|
3596 rhsDef = f.unary<MToDouble>(rhsDef); |
|
3597 else if (!rhsType.isMaybeDouble()) |
|
3598 return f.failf(lhs, "%s is not a subtype of float or double?", rhsType.toChars()); |
|
3599 break; |
|
3600 default: |
|
3601 MOZ_ASSUME_UNREACHABLE("Unexpected view type"); |
|
3602 } |
|
3603 |
|
3604 f.storeHeap(viewType, pointerDef, rhsDef, needsBoundsCheck); |
|
3605 |
|
3606 *def = rhsDef; |
|
3607 *type = rhsType; |
|
3608 return true; |
|
3609 } |
|
3610 |
|
3611 static bool |
|
3612 CheckAssignName(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type) |
|
3613 { |
|
3614 Rooted<PropertyName *> name(f.cx(), lhs->name()); |
|
3615 |
|
3616 MDefinition *rhsDef; |
|
3617 Type rhsType; |
|
3618 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
3619 return false; |
|
3620 |
|
3621 if (const FunctionCompiler::Local *lhsVar = f.lookupLocal(name)) { |
|
3622 if (!(rhsType <= lhsVar->type)) { |
|
3623 return f.failf(lhs, "%s is not a subtype of %s", |
|
3624 rhsType.toChars(), lhsVar->type.toType().toChars()); |
|
3625 } |
|
3626 f.assign(*lhsVar, rhsDef); |
|
3627 } else if (const ModuleCompiler::Global *global = f.lookupGlobal(name)) { |
|
3628 if (global->which() != ModuleCompiler::Global::Variable) |
|
3629 return f.failName(lhs, "'%s' is not a mutable variable", name); |
|
3630 if (!(rhsType <= global->varOrConstType())) { |
|
3631 return f.failf(lhs, "%s is not a subtype of %s", |
|
3632 rhsType.toChars(), global->varOrConstType().toType().toChars()); |
|
3633 } |
|
3634 f.storeGlobalVar(*global, rhsDef); |
|
3635 } else { |
|
3636 return f.failName(lhs, "'%s' not found in local or asm.js module scope", name); |
|
3637 } |
|
3638 |
|
3639 *def = rhsDef; |
|
3640 *type = rhsType; |
|
3641 return true; |
|
3642 } |
|
3643 |
|
3644 static bool |
|
3645 CheckAssign(FunctionCompiler &f, ParseNode *assign, MDefinition **def, Type *type) |
|
3646 { |
|
3647 JS_ASSERT(assign->isKind(PNK_ASSIGN)); |
|
3648 ParseNode *lhs = BinaryLeft(assign); |
|
3649 ParseNode *rhs = BinaryRight(assign); |
|
3650 |
|
3651 if (lhs->getKind() == PNK_ELEM) |
|
3652 return CheckStoreArray(f, lhs, rhs, def, type); |
|
3653 |
|
3654 if (lhs->getKind() == PNK_NAME) |
|
3655 return CheckAssignName(f, lhs, rhs, def, type); |
|
3656 |
|
3657 return f.fail(assign, "left-hand side of assignment must be a variable or array access"); |
|
3658 } |
|
3659 |
|
3660 static bool |
|
3661 CheckMathIMul(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type) |
|
3662 { |
|
3663 if (CallArgListLength(call) != 2) |
|
3664 return f.fail(call, "Math.imul must be passed 2 arguments"); |
|
3665 |
|
3666 ParseNode *lhs = CallArgList(call); |
|
3667 ParseNode *rhs = NextNode(lhs); |
|
3668 |
|
3669 MDefinition *lhsDef; |
|
3670 Type lhsType; |
|
3671 if (!CheckExpr(f, lhs, &lhsDef, &lhsType)) |
|
3672 return false; |
|
3673 |
|
3674 MDefinition *rhsDef; |
|
3675 Type rhsType; |
|
3676 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
3677 return false; |
|
3678 |
|
3679 if (!lhsType.isIntish()) |
|
3680 return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars()); |
|
3681 if (!rhsType.isIntish()) |
|
3682 return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars()); |
|
3683 if (retType != RetType::Signed) |
|
3684 return f.failf(call, "return type is signed, used as %s", retType.toType().toChars()); |
|
3685 |
|
3686 *def = f.mul(lhsDef, rhsDef, MIRType_Int32, MMul::Integer); |
|
3687 *type = Type::Signed; |
|
3688 return true; |
|
3689 } |
|
3690 |
|
3691 static bool |
|
3692 CheckMathAbs(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type) |
|
3693 { |
|
3694 if (CallArgListLength(call) != 1) |
|
3695 return f.fail(call, "Math.abs must be passed 1 argument"); |
|
3696 |
|
3697 ParseNode *arg = CallArgList(call); |
|
3698 |
|
3699 MDefinition *argDef; |
|
3700 Type argType; |
|
3701 if (!CheckExpr(f, arg, &argDef, &argType)) |
|
3702 return false; |
|
3703 |
|
3704 if (argType.isSigned()) { |
|
3705 if (retType != RetType::Signed) |
|
3706 return f.failf(call, "return type is signed, used as %s", retType.toType().toChars()); |
|
3707 *def = f.unary<MAbs>(argDef, MIRType_Int32); |
|
3708 *type = Type::Signed; |
|
3709 return true; |
|
3710 } |
|
3711 |
|
3712 if (argType.isMaybeDouble()) { |
|
3713 if (retType != RetType::Double) |
|
3714 return f.failf(call, "return type is double, used as %s", retType.toType().toChars()); |
|
3715 *def = f.unary<MAbs>(argDef, MIRType_Double); |
|
3716 *type = Type::Double; |
|
3717 return true; |
|
3718 } |
|
3719 |
|
3720 if (argType.isMaybeFloat()) { |
|
3721 if (retType != RetType::Float) |
|
3722 return f.failf(call, "return type is float, used as %s", retType.toType().toChars()); |
|
3723 *def = f.unary<MAbs>(argDef, MIRType_Float32); |
|
3724 *type = Type::Float; |
|
3725 return true; |
|
3726 } |
|
3727 |
|
3728 return f.failf(call, "%s is not a subtype of signed, float? or double?", argType.toChars()); |
|
3729 } |
|
3730 |
|
3731 static bool |
|
3732 CheckMathSqrt(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type) |
|
3733 { |
|
3734 if (CallArgListLength(call) != 1) |
|
3735 return f.fail(call, "Math.sqrt must be passed 1 argument"); |
|
3736 |
|
3737 ParseNode *arg = CallArgList(call); |
|
3738 |
|
3739 MDefinition *argDef; |
|
3740 Type argType; |
|
3741 if (!CheckExpr(f, arg, &argDef, &argType)) |
|
3742 return false; |
|
3743 |
|
3744 if (argType.isMaybeDouble()) { |
|
3745 if (retType != RetType::Double) |
|
3746 return f.failf(call, "return type is double, used as %s", retType.toType().toChars()); |
|
3747 *def = f.unary<MSqrt>(argDef, MIRType_Double); |
|
3748 *type = Type::Double; |
|
3749 return true; |
|
3750 } |
|
3751 |
|
3752 if (argType.isMaybeFloat()) { |
|
3753 if (retType != RetType::Float) |
|
3754 return f.failf(call, "return type is float, used as %s", retType.toType().toChars()); |
|
3755 *def = f.unary<MSqrt>(argDef, MIRType_Float32); |
|
3756 *type = Type::Float; |
|
3757 return true; |
|
3758 } |
|
3759 |
|
3760 return f.failf(call, "%s is neither a subtype of double? nor float?", argType.toChars()); |
|
3761 } |
|
3762 |
|
3763 static bool |
|
3764 CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, RetType retType, MDefinition **def, Type *type, bool isMax) |
|
3765 { |
|
3766 if (CallArgListLength(callNode) < 2) |
|
3767 return f.fail(callNode, "Math.min/max must be passed at least 2 arguments"); |
|
3768 |
|
3769 ParseNode *firstArg = CallArgList(callNode); |
|
3770 MDefinition *firstDef; |
|
3771 Type firstType; |
|
3772 if (!CheckExpr(f, firstArg, &firstDef, &firstType)) |
|
3773 return false; |
|
3774 |
|
3775 bool opIsDouble = firstType.isMaybeDouble(); |
|
3776 bool opIsInteger = firstType.isInt(); |
|
3777 MIRType opType = firstType.toMIRType(); |
|
3778 |
|
3779 if (!opIsDouble && !opIsInteger) |
|
3780 return f.failf(firstArg, "%s is not a subtype of double? or int", firstType.toChars()); |
|
3781 |
|
3782 MDefinition *lastDef = firstDef; |
|
3783 ParseNode *nextArg = NextNode(firstArg); |
|
3784 for (unsigned i = 1; i < CallArgListLength(callNode); i++, nextArg = NextNode(nextArg)) { |
|
3785 MDefinition *nextDef; |
|
3786 Type nextType; |
|
3787 if (!CheckExpr(f, nextArg, &nextDef, &nextType)) |
|
3788 return false; |
|
3789 |
|
3790 if (opIsDouble && !nextType.isMaybeDouble()) |
|
3791 return f.failf(nextArg, "%s is not a subtype of double?", nextType.toChars()); |
|
3792 if (opIsInteger && !nextType.isInt()) |
|
3793 return f.failf(nextArg, "%s is not a subtype of int", nextType.toChars()); |
|
3794 |
|
3795 lastDef = f.minMax(lastDef, nextDef, opType, isMax); |
|
3796 } |
|
3797 |
|
3798 if (opIsDouble && retType != RetType::Double) |
|
3799 return f.failf(callNode, "return type is double, used as %s", retType.toType().toChars()); |
|
3800 if (opIsInteger && retType != RetType::Signed) |
|
3801 return f.failf(callNode, "return type is int, used as %s", retType.toType().toChars()); |
|
3802 |
|
3803 *type = opIsDouble ? Type::Double : Type::Signed; |
|
3804 *def = lastDef; |
|
3805 return true; |
|
3806 } |
|
3807 |
|
3808 typedef bool (*CheckArgType)(FunctionCompiler &f, ParseNode *argNode, Type type); |
|
3809 |
|
3810 static bool |
|
3811 CheckCallArgs(FunctionCompiler &f, ParseNode *callNode, CheckArgType checkArg, |
|
3812 FunctionCompiler::Call *call) |
|
3813 { |
|
3814 f.startCallArgs(call); |
|
3815 |
|
3816 ParseNode *argNode = CallArgList(callNode); |
|
3817 for (unsigned i = 0; i < CallArgListLength(callNode); i++, argNode = NextNode(argNode)) { |
|
3818 MDefinition *def; |
|
3819 Type type; |
|
3820 if (!CheckExpr(f, argNode, &def, &type)) |
|
3821 return false; |
|
3822 |
|
3823 if (!checkArg(f, argNode, type)) |
|
3824 return false; |
|
3825 |
|
3826 if (!f.passArg(def, VarType::FromCheckedType(type), call)) |
|
3827 return false; |
|
3828 } |
|
3829 |
|
3830 f.finishCallArgs(call); |
|
3831 return true; |
|
3832 } |
|
3833 |
|
3834 static bool |
|
3835 CheckSignatureAgainstExisting(ModuleCompiler &m, ParseNode *usepn, const Signature &sig, |
|
3836 const Signature &existing) |
|
3837 { |
|
3838 if (sig.args().length() != existing.args().length()) { |
|
3839 return m.failf(usepn, "incompatible number of arguments (%u here vs. %u before)", |
|
3840 sig.args().length(), existing.args().length()); |
|
3841 } |
|
3842 |
|
3843 for (unsigned i = 0; i < sig.args().length(); i++) { |
|
3844 if (sig.arg(i) != existing.arg(i)) { |
|
3845 return m.failf(usepn, "incompatible type for argument %u: (%s here vs. %s before)", |
|
3846 i, sig.arg(i).toType().toChars(), existing.arg(i).toType().toChars()); |
|
3847 } |
|
3848 } |
|
3849 |
|
3850 if (sig.retType() != existing.retType()) { |
|
3851 return m.failf(usepn, "%s incompatible with previous return of type %s", |
|
3852 sig.retType().toType().toChars(), existing.retType().toType().toChars()); |
|
3853 } |
|
3854 |
|
3855 JS_ASSERT(sig == existing); |
|
3856 return true; |
|
3857 } |
|
3858 |
|
3859 static bool |
|
3860 CheckFunctionSignature(ModuleCompiler &m, ParseNode *usepn, Signature &&sig, PropertyName *name, |
|
3861 ModuleCompiler::Func **func) |
|
3862 { |
|
3863 ModuleCompiler::Func *existing = m.lookupFunction(name); |
|
3864 if (!existing) { |
|
3865 if (!CheckModuleLevelName(m, usepn, name)) |
|
3866 return false; |
|
3867 return m.addFunction(name, Move(sig), func); |
|
3868 } |
|
3869 |
|
3870 if (!CheckSignatureAgainstExisting(m, usepn, sig, existing->sig())) |
|
3871 return false; |
|
3872 |
|
3873 *func = existing; |
|
3874 return true; |
|
3875 } |
|
3876 |
|
3877 static bool |
|
3878 CheckIsVarType(FunctionCompiler &f, ParseNode *argNode, Type type) |
|
3879 { |
|
3880 if (!type.isVarType()) |
|
3881 return f.failf(argNode, "%s is not a subtype of int, float or double", type.toChars()); |
|
3882 return true; |
|
3883 } |
|
3884 |
|
3885 static bool |
|
3886 CheckInternalCall(FunctionCompiler &f, ParseNode *callNode, PropertyName *calleeName, |
|
3887 RetType retType, MDefinition **def, Type *type) |
|
3888 { |
|
3889 FunctionCompiler::Call call(f, callNode, retType); |
|
3890 |
|
3891 if (!CheckCallArgs(f, callNode, CheckIsVarType, &call)) |
|
3892 return false; |
|
3893 |
|
3894 ModuleCompiler::Func *callee; |
|
3895 if (!CheckFunctionSignature(f.m(), callNode, Move(call.sig()), calleeName, &callee)) |
|
3896 return false; |
|
3897 |
|
3898 if (!f.internalCall(*callee, call, def)) |
|
3899 return false; |
|
3900 |
|
3901 *type = retType.toType(); |
|
3902 return true; |
|
3903 } |
|
3904 |
|
3905 static bool |
|
3906 CheckFuncPtrTableAgainstExisting(ModuleCompiler &m, ParseNode *usepn, |
|
3907 PropertyName *name, Signature &&sig, unsigned mask, |
|
3908 ModuleCompiler::FuncPtrTable **tableOut) |
|
3909 { |
|
3910 if (const ModuleCompiler::Global *existing = m.lookupGlobal(name)) { |
|
3911 if (existing->which() != ModuleCompiler::Global::FuncPtrTable) |
|
3912 return m.failName(usepn, "'%s' is not a function-pointer table", name); |
|
3913 |
|
3914 ModuleCompiler::FuncPtrTable &table = m.funcPtrTable(existing->funcPtrTableIndex()); |
|
3915 if (mask != table.mask()) |
|
3916 return m.failf(usepn, "mask does not match previous value (%u)", table.mask()); |
|
3917 |
|
3918 if (!CheckSignatureAgainstExisting(m, usepn, sig, table.sig())) |
|
3919 return false; |
|
3920 |
|
3921 *tableOut = &table; |
|
3922 return true; |
|
3923 } |
|
3924 |
|
3925 if (!CheckModuleLevelName(m, usepn, name)) |
|
3926 return false; |
|
3927 |
|
3928 return m.addFuncPtrTable(name, Move(sig), mask, tableOut); |
|
3929 } |
|
3930 |
|
3931 static bool |
|
3932 CheckFuncPtrCall(FunctionCompiler &f, ParseNode *callNode, RetType retType, MDefinition **def, Type *type) |
|
3933 { |
|
3934 ParseNode *callee = CallCallee(callNode); |
|
3935 ParseNode *tableNode = ElemBase(callee); |
|
3936 ParseNode *indexExpr = ElemIndex(callee); |
|
3937 |
|
3938 if (!tableNode->isKind(PNK_NAME)) |
|
3939 return f.fail(tableNode, "expecting name of function-pointer array"); |
|
3940 |
|
3941 PropertyName *name = tableNode->name(); |
|
3942 if (const ModuleCompiler::Global *existing = f.lookupGlobal(name)) { |
|
3943 if (existing->which() != ModuleCompiler::Global::FuncPtrTable) |
|
3944 return f.failName(tableNode, "'%s' is not the name of a function-pointer array", name); |
|
3945 } |
|
3946 |
|
3947 if (!indexExpr->isKind(PNK_BITAND)) |
|
3948 return f.fail(indexExpr, "function-pointer table index expression needs & mask"); |
|
3949 |
|
3950 ParseNode *indexNode = BinaryLeft(indexExpr); |
|
3951 ParseNode *maskNode = BinaryRight(indexExpr); |
|
3952 |
|
3953 uint32_t mask; |
|
3954 if (!IsLiteralInt(f.m(), maskNode, &mask) || mask == UINT32_MAX || !IsPowerOfTwo(mask + 1)) |
|
3955 return f.fail(maskNode, "function-pointer table index mask value must be a power of two"); |
|
3956 |
|
3957 MDefinition *indexDef; |
|
3958 Type indexType; |
|
3959 if (!CheckExpr(f, indexNode, &indexDef, &indexType)) |
|
3960 return false; |
|
3961 |
|
3962 if (!indexType.isIntish()) |
|
3963 return f.failf(indexNode, "%s is not a subtype of intish", indexType.toChars()); |
|
3964 |
|
3965 FunctionCompiler::Call call(f, callNode, retType); |
|
3966 |
|
3967 if (!CheckCallArgs(f, callNode, CheckIsVarType, &call)) |
|
3968 return false; |
|
3969 |
|
3970 ModuleCompiler::FuncPtrTable *table; |
|
3971 if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, Move(call.sig()), mask, &table)) |
|
3972 return false; |
|
3973 |
|
3974 if (!f.funcPtrCall(*table, indexDef, call, def)) |
|
3975 return false; |
|
3976 |
|
3977 *type = retType.toType(); |
|
3978 return true; |
|
3979 } |
|
3980 |
|
3981 static bool |
|
3982 CheckIsExternType(FunctionCompiler &f, ParseNode *argNode, Type type) |
|
3983 { |
|
3984 if (!type.isExtern()) |
|
3985 return f.failf(argNode, "%s is not a subtype of extern", type.toChars()); |
|
3986 return true; |
|
3987 } |
|
3988 |
|
3989 static bool |
|
3990 CheckFFICall(FunctionCompiler &f, ParseNode *callNode, unsigned ffiIndex, RetType retType, |
|
3991 MDefinition **def, Type *type) |
|
3992 { |
|
3993 PropertyName *calleeName = CallCallee(callNode)->name(); |
|
3994 |
|
3995 if (retType == RetType::Float) |
|
3996 return f.fail(callNode, "FFI calls can't return float"); |
|
3997 |
|
3998 FunctionCompiler::Call call(f, callNode, retType); |
|
3999 if (!CheckCallArgs(f, callNode, CheckIsExternType, &call)) |
|
4000 return false; |
|
4001 |
|
4002 unsigned exitIndex; |
|
4003 if (!f.m().addExit(ffiIndex, calleeName, Move(call.sig()), &exitIndex)) |
|
4004 return false; |
|
4005 |
|
4006 if (!f.ffiCall(exitIndex, call, retType.toMIRType(), def)) |
|
4007 return false; |
|
4008 |
|
4009 *type = retType.toType(); |
|
4010 return true; |
|
4011 } |
|
4012 |
|
4013 static bool CheckCall(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type); |
|
4014 |
|
4015 static bool |
|
4016 CheckFRoundArg(FunctionCompiler &f, ParseNode *arg, MDefinition **def, Type *type) |
|
4017 { |
|
4018 if (arg->isKind(PNK_CALL)) |
|
4019 return CheckCall(f, arg, RetType::Float, def, type); |
|
4020 |
|
4021 MDefinition *inputDef; |
|
4022 Type inputType; |
|
4023 if (!CheckExpr(f, arg, &inputDef, &inputType)) |
|
4024 return false; |
|
4025 |
|
4026 if (inputType.isMaybeDouble() || inputType.isSigned()) |
|
4027 *def = f.unary<MToFloat32>(inputDef); |
|
4028 else if (inputType.isUnsigned()) |
|
4029 *def = f.unary<MAsmJSUnsignedToFloat32>(inputDef); |
|
4030 else if (inputType.isFloatish()) |
|
4031 *def = inputDef; |
|
4032 else |
|
4033 return f.failf(arg, "%s is not a subtype of signed, unsigned, double? or floatish", inputType.toChars()); |
|
4034 |
|
4035 *type = Type::Float; |
|
4036 return true; |
|
4037 } |
|
4038 |
|
4039 static bool |
|
4040 CheckMathFRound(FunctionCompiler &f, ParseNode *callNode, RetType retType, MDefinition **def, Type *type) |
|
4041 { |
|
4042 ParseNode *argNode = nullptr; |
|
4043 if (!IsFloatCoercion(f.m(), callNode, &argNode)) |
|
4044 return f.fail(callNode, "invalid call to fround"); |
|
4045 |
|
4046 MDefinition *operand; |
|
4047 Type operandType; |
|
4048 if (!CheckFRoundArg(f, argNode, &operand, &operandType)) |
|
4049 return false; |
|
4050 |
|
4051 JS_ASSERT(operandType == Type::Float); |
|
4052 |
|
4053 switch (retType.which()) { |
|
4054 case RetType::Double: |
|
4055 *def = f.unary<MToDouble>(operand); |
|
4056 *type = Type::Double; |
|
4057 return true; |
|
4058 case RetType::Signed: |
|
4059 *def = f.unary<MTruncateToInt32>(operand); |
|
4060 *type = Type::Signed; |
|
4061 return true; |
|
4062 case RetType::Float: |
|
4063 *def = operand; |
|
4064 *type = Type::Float; |
|
4065 return true; |
|
4066 case RetType::Void: |
|
4067 // definition and return types should be ignored by the caller |
|
4068 return true; |
|
4069 } |
|
4070 |
|
4071 MOZ_ASSUME_UNREACHABLE("return value of fround is ignored"); |
|
4072 } |
|
4073 |
|
4074 static bool |
|
4075 CheckIsMaybeDouble(FunctionCompiler &f, ParseNode *argNode, Type type) |
|
4076 { |
|
4077 if (!type.isMaybeDouble()) |
|
4078 return f.failf(argNode, "%s is not a subtype of double?", type.toChars()); |
|
4079 return true; |
|
4080 } |
|
4081 |
|
4082 static bool |
|
4083 CheckIsMaybeFloat(FunctionCompiler &f, ParseNode *argNode, Type type) |
|
4084 { |
|
4085 if (!type.isMaybeFloat()) |
|
4086 return f.failf(argNode, "%s is not a subtype of float?", type.toChars()); |
|
4087 return true; |
|
4088 } |
|
4089 |
|
4090 static bool |
|
4091 CheckMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltinFunction func, |
|
4092 RetType retType, MDefinition **def, Type *type) |
|
4093 { |
|
4094 unsigned arity = 0; |
|
4095 AsmJSImmKind doubleCallee, floatCallee; |
|
4096 switch (func) { |
|
4097 case AsmJSMathBuiltin_imul: return CheckMathIMul(f, callNode, retType, def, type); |
|
4098 case AsmJSMathBuiltin_abs: return CheckMathAbs(f, callNode, retType, def, type); |
|
4099 case AsmJSMathBuiltin_sqrt: return CheckMathSqrt(f, callNode, retType, def, type); |
|
4100 case AsmJSMathBuiltin_fround: return CheckMathFRound(f, callNode, retType, def, type); |
|
4101 case AsmJSMathBuiltin_min: return CheckMathMinMax(f, callNode, retType, def, type, /* isMax = */ false); |
|
4102 case AsmJSMathBuiltin_max: return CheckMathMinMax(f, callNode, retType, def, type, /* isMax = */ true); |
|
4103 case AsmJSMathBuiltin_ceil: arity = 1; doubleCallee = AsmJSImm_CeilD; floatCallee = AsmJSImm_CeilF; break; |
|
4104 case AsmJSMathBuiltin_floor: arity = 1; doubleCallee = AsmJSImm_FloorD; floatCallee = AsmJSImm_FloorF; break; |
|
4105 case AsmJSMathBuiltin_sin: arity = 1; doubleCallee = AsmJSImm_SinD; floatCallee = AsmJSImm_Invalid; break; |
|
4106 case AsmJSMathBuiltin_cos: arity = 1; doubleCallee = AsmJSImm_CosD; floatCallee = AsmJSImm_Invalid; break; |
|
4107 case AsmJSMathBuiltin_tan: arity = 1; doubleCallee = AsmJSImm_TanD; floatCallee = AsmJSImm_Invalid; break; |
|
4108 case AsmJSMathBuiltin_asin: arity = 1; doubleCallee = AsmJSImm_ASinD; floatCallee = AsmJSImm_Invalid; break; |
|
4109 case AsmJSMathBuiltin_acos: arity = 1; doubleCallee = AsmJSImm_ACosD; floatCallee = AsmJSImm_Invalid; break; |
|
4110 case AsmJSMathBuiltin_atan: arity = 1; doubleCallee = AsmJSImm_ATanD; floatCallee = AsmJSImm_Invalid; break; |
|
4111 case AsmJSMathBuiltin_exp: arity = 1; doubleCallee = AsmJSImm_ExpD; floatCallee = AsmJSImm_Invalid; break; |
|
4112 case AsmJSMathBuiltin_log: arity = 1; doubleCallee = AsmJSImm_LogD; floatCallee = AsmJSImm_Invalid; break; |
|
4113 case AsmJSMathBuiltin_pow: arity = 2; doubleCallee = AsmJSImm_PowD; floatCallee = AsmJSImm_Invalid; break; |
|
4114 case AsmJSMathBuiltin_atan2: arity = 2; doubleCallee = AsmJSImm_ATan2D; floatCallee = AsmJSImm_Invalid; break; |
|
4115 default: MOZ_ASSUME_UNREACHABLE("unexpected mathBuiltin function"); |
|
4116 } |
|
4117 |
|
4118 if (retType == RetType::Float && floatCallee == AsmJSImm_Invalid) |
|
4119 return f.fail(callNode, "math builtin cannot be used as float"); |
|
4120 if (retType != RetType::Double && retType != RetType::Float) |
|
4121 return f.failf(callNode, "return type of math function is double or float, used as %s", retType.toType().toChars()); |
|
4122 |
|
4123 FunctionCompiler::Call call(f, callNode, retType); |
|
4124 if (retType == RetType::Float && !CheckCallArgs(f, callNode, CheckIsMaybeFloat, &call)) |
|
4125 return false; |
|
4126 if (retType == RetType::Double && !CheckCallArgs(f, callNode, CheckIsMaybeDouble, &call)) |
|
4127 return false; |
|
4128 |
|
4129 if (call.sig().args().length() != arity) |
|
4130 return f.failf(callNode, "call passed %u arguments, expected %u", call.sig().args().length(), arity); |
|
4131 |
|
4132 if (retType == RetType::Float && !f.builtinCall(floatCallee, call, retType.toMIRType(), def)) |
|
4133 return false; |
|
4134 if (retType == RetType::Double && !f.builtinCall(doubleCallee, call, retType.toMIRType(), def)) |
|
4135 return false; |
|
4136 |
|
4137 *type = retType.toType(); |
|
4138 return true; |
|
4139 } |
|
4140 |
|
4141 static bool |
|
4142 CheckCall(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type) |
|
4143 { |
|
4144 JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed()); |
|
4145 |
|
4146 ParseNode *callee = CallCallee(call); |
|
4147 |
|
4148 if (callee->isKind(PNK_ELEM)) |
|
4149 return CheckFuncPtrCall(f, call, retType, def, type); |
|
4150 |
|
4151 if (!callee->isKind(PNK_NAME)) |
|
4152 return f.fail(callee, "unexpected callee expression type"); |
|
4153 |
|
4154 PropertyName *calleeName = callee->name(); |
|
4155 |
|
4156 if (const ModuleCompiler::Global *global = f.lookupGlobal(calleeName)) { |
|
4157 switch (global->which()) { |
|
4158 case ModuleCompiler::Global::FFI: |
|
4159 return CheckFFICall(f, call, global->ffiIndex(), retType, def, type); |
|
4160 case ModuleCompiler::Global::MathBuiltinFunction: |
|
4161 return CheckMathBuiltinCall(f, call, global->mathBuiltinFunction(), retType, def, type); |
|
4162 case ModuleCompiler::Global::ConstantLiteral: |
|
4163 case ModuleCompiler::Global::ConstantImport: |
|
4164 case ModuleCompiler::Global::Variable: |
|
4165 case ModuleCompiler::Global::FuncPtrTable: |
|
4166 case ModuleCompiler::Global::ArrayView: |
|
4167 return f.failName(callee, "'%s' is not callable function", callee->name()); |
|
4168 case ModuleCompiler::Global::Function: |
|
4169 break; |
|
4170 } |
|
4171 } |
|
4172 |
|
4173 return CheckInternalCall(f, call, calleeName, retType, def, type); |
|
4174 } |
|
4175 |
|
4176 static bool |
|
4177 CheckPos(FunctionCompiler &f, ParseNode *pos, MDefinition **def, Type *type) |
|
4178 { |
|
4179 JS_ASSERT(pos->isKind(PNK_POS)); |
|
4180 ParseNode *operand = UnaryKid(pos); |
|
4181 |
|
4182 if (operand->isKind(PNK_CALL)) |
|
4183 return CheckCall(f, operand, RetType::Double, def, type); |
|
4184 |
|
4185 MDefinition *operandDef; |
|
4186 Type operandType; |
|
4187 if (!CheckExpr(f, operand, &operandDef, &operandType)) |
|
4188 return false; |
|
4189 |
|
4190 if (operandType.isMaybeFloat() || operandType.isSigned()) |
|
4191 *def = f.unary<MToDouble>(operandDef); |
|
4192 else if (operandType.isUnsigned()) |
|
4193 *def = f.unary<MAsmJSUnsignedToDouble>(operandDef); |
|
4194 else if (operandType.isMaybeDouble()) |
|
4195 *def = operandDef; |
|
4196 else |
|
4197 return f.failf(operand, "%s is not a subtype of signed, unsigned, float or double?", operandType.toChars()); |
|
4198 |
|
4199 *type = Type::Double; |
|
4200 return true; |
|
4201 } |
|
4202 |
|
4203 static bool |
|
4204 CheckNot(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type) |
|
4205 { |
|
4206 JS_ASSERT(expr->isKind(PNK_NOT)); |
|
4207 ParseNode *operand = UnaryKid(expr); |
|
4208 |
|
4209 MDefinition *operandDef; |
|
4210 Type operandType; |
|
4211 if (!CheckExpr(f, operand, &operandDef, &operandType)) |
|
4212 return false; |
|
4213 |
|
4214 if (!operandType.isInt()) |
|
4215 return f.failf(operand, "%s is not a subtype of int", operandType.toChars()); |
|
4216 |
|
4217 *def = f.unary<MNot>(operandDef); |
|
4218 *type = Type::Int; |
|
4219 return true; |
|
4220 } |
|
4221 |
|
4222 static bool |
|
4223 CheckNeg(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type) |
|
4224 { |
|
4225 JS_ASSERT(expr->isKind(PNK_NEG)); |
|
4226 ParseNode *operand = UnaryKid(expr); |
|
4227 |
|
4228 MDefinition *operandDef; |
|
4229 Type operandType; |
|
4230 if (!CheckExpr(f, operand, &operandDef, &operandType)) |
|
4231 return false; |
|
4232 |
|
4233 if (operandType.isInt()) { |
|
4234 *def = f.unary<MAsmJSNeg>(operandDef, MIRType_Int32); |
|
4235 *type = Type::Intish; |
|
4236 return true; |
|
4237 } |
|
4238 |
|
4239 if (operandType.isMaybeDouble()) { |
|
4240 *def = f.unary<MAsmJSNeg>(operandDef, MIRType_Double); |
|
4241 *type = Type::Double; |
|
4242 return true; |
|
4243 } |
|
4244 |
|
4245 if (operandType.isMaybeFloat()) { |
|
4246 *def = f.unary<MAsmJSNeg>(operandDef, MIRType_Float32); |
|
4247 *type = Type::Floatish; |
|
4248 return true; |
|
4249 } |
|
4250 |
|
4251 return f.failf(operand, "%s is not a subtype of int, float? or double?", operandType.toChars()); |
|
4252 } |
|
4253 |
|
4254 static bool |
|
4255 CheckCoerceToInt(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type) |
|
4256 { |
|
4257 JS_ASSERT(expr->isKind(PNK_BITNOT)); |
|
4258 ParseNode *operand = UnaryKid(expr); |
|
4259 |
|
4260 MDefinition *operandDef; |
|
4261 Type operandType; |
|
4262 if (!CheckExpr(f, operand, &operandDef, &operandType)) |
|
4263 return false; |
|
4264 |
|
4265 if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) { |
|
4266 *def = f.unary<MTruncateToInt32>(operandDef); |
|
4267 *type = Type::Signed; |
|
4268 return true; |
|
4269 } |
|
4270 |
|
4271 if (!operandType.isIntish()) |
|
4272 return f.failf(operand, "%s is not a subtype of double?, float? or intish", operandType.toChars()); |
|
4273 |
|
4274 *def = operandDef; |
|
4275 *type = Type::Signed; |
|
4276 return true; |
|
4277 } |
|
4278 |
|
4279 static bool |
|
4280 CheckBitNot(FunctionCompiler &f, ParseNode *neg, MDefinition **def, Type *type) |
|
4281 { |
|
4282 JS_ASSERT(neg->isKind(PNK_BITNOT)); |
|
4283 ParseNode *operand = UnaryKid(neg); |
|
4284 |
|
4285 if (operand->isKind(PNK_BITNOT)) |
|
4286 return CheckCoerceToInt(f, operand, def, type); |
|
4287 |
|
4288 MDefinition *operandDef; |
|
4289 Type operandType; |
|
4290 if (!CheckExpr(f, operand, &operandDef, &operandType)) |
|
4291 return false; |
|
4292 |
|
4293 if (!operandType.isIntish()) |
|
4294 return f.failf(operand, "%s is not a subtype of intish", operandType.toChars()); |
|
4295 |
|
4296 *def = f.bitwise<MBitNot>(operandDef); |
|
4297 *type = Type::Signed; |
|
4298 return true; |
|
4299 } |
|
4300 |
|
4301 static bool |
|
4302 CheckComma(FunctionCompiler &f, ParseNode *comma, MDefinition **def, Type *type) |
|
4303 { |
|
4304 JS_ASSERT(comma->isKind(PNK_COMMA)); |
|
4305 ParseNode *operands = ListHead(comma); |
|
4306 |
|
4307 ParseNode *pn = operands; |
|
4308 for (; NextNode(pn); pn = NextNode(pn)) { |
|
4309 MDefinition *_1; |
|
4310 Type _2; |
|
4311 if (pn->isKind(PNK_CALL)) { |
|
4312 if (!CheckCall(f, pn, RetType::Void, &_1, &_2)) |
|
4313 return false; |
|
4314 } else { |
|
4315 if (!CheckExpr(f, pn, &_1, &_2)) |
|
4316 return false; |
|
4317 } |
|
4318 } |
|
4319 |
|
4320 if (!CheckExpr(f, pn, def, type)) |
|
4321 return false; |
|
4322 |
|
4323 return true; |
|
4324 } |
|
4325 |
|
4326 static bool |
|
4327 CheckConditional(FunctionCompiler &f, ParseNode *ternary, MDefinition **def, Type *type) |
|
4328 { |
|
4329 JS_ASSERT(ternary->isKind(PNK_CONDITIONAL)); |
|
4330 ParseNode *cond = TernaryKid1(ternary); |
|
4331 ParseNode *thenExpr = TernaryKid2(ternary); |
|
4332 ParseNode *elseExpr = TernaryKid3(ternary); |
|
4333 |
|
4334 MDefinition *condDef; |
|
4335 Type condType; |
|
4336 if (!CheckExpr(f, cond, &condDef, &condType)) |
|
4337 return false; |
|
4338 |
|
4339 if (!condType.isInt()) |
|
4340 return f.failf(cond, "%s is not a subtype of int", condType.toChars()); |
|
4341 |
|
4342 MBasicBlock *thenBlock = nullptr, *elseBlock = nullptr; |
|
4343 if (!f.branchAndStartThen(condDef, &thenBlock, &elseBlock, thenExpr, elseExpr)) |
|
4344 return false; |
|
4345 |
|
4346 MDefinition *thenDef; |
|
4347 Type thenType; |
|
4348 if (!CheckExpr(f, thenExpr, &thenDef, &thenType)) |
|
4349 return false; |
|
4350 |
|
4351 BlockVector thenBlocks(f.cx()); |
|
4352 if (!f.appendThenBlock(&thenBlocks)) |
|
4353 return false; |
|
4354 |
|
4355 f.pushPhiInput(thenDef); |
|
4356 f.switchToElse(elseBlock); |
|
4357 |
|
4358 MDefinition *elseDef; |
|
4359 Type elseType; |
|
4360 if (!CheckExpr(f, elseExpr, &elseDef, &elseType)) |
|
4361 return false; |
|
4362 |
|
4363 f.pushPhiInput(elseDef); |
|
4364 |
|
4365 if (thenType.isInt() && elseType.isInt()) { |
|
4366 *type = Type::Int; |
|
4367 } else if (thenType.isDouble() && elseType.isDouble()) { |
|
4368 *type = Type::Double; |
|
4369 } else if (thenType.isFloat() && elseType.isFloat()) { |
|
4370 *type = Type::Float; |
|
4371 } else { |
|
4372 return f.failf(ternary, "then/else branches of conditional must both produce int or double, " |
|
4373 "current types are %s and %s", thenType.toChars(), elseType.toChars()); |
|
4374 } |
|
4375 |
|
4376 if (!f.joinIfElse(thenBlocks, elseExpr)) |
|
4377 return false; |
|
4378 |
|
4379 *def = f.popPhiOutput(); |
|
4380 return true; |
|
4381 } |
|
4382 |
|
4383 static bool |
|
4384 IsValidIntMultiplyConstant(ModuleCompiler &m, ParseNode *expr) |
|
4385 { |
|
4386 if (!IsNumericLiteral(m, expr)) |
|
4387 return false; |
|
4388 |
|
4389 NumLit literal = ExtractNumericLiteral(m, expr); |
|
4390 switch (literal.which()) { |
|
4391 case NumLit::Fixnum: |
|
4392 case NumLit::NegativeInt: |
|
4393 if (abs(literal.toInt32()) < (1<<20)) |
|
4394 return true; |
|
4395 return false; |
|
4396 case NumLit::BigUnsigned: |
|
4397 case NumLit::Double: |
|
4398 case NumLit::Float: |
|
4399 case NumLit::OutOfRangeInt: |
|
4400 return false; |
|
4401 } |
|
4402 |
|
4403 MOZ_ASSUME_UNREACHABLE("Bad literal"); |
|
4404 } |
|
4405 |
|
4406 static bool |
|
4407 CheckMultiply(FunctionCompiler &f, ParseNode *star, MDefinition **def, Type *type) |
|
4408 { |
|
4409 JS_ASSERT(star->isKind(PNK_STAR)); |
|
4410 ParseNode *lhs = BinaryLeft(star); |
|
4411 ParseNode *rhs = BinaryRight(star); |
|
4412 |
|
4413 MDefinition *lhsDef; |
|
4414 Type lhsType; |
|
4415 if (!CheckExpr(f, lhs, &lhsDef, &lhsType)) |
|
4416 return false; |
|
4417 |
|
4418 MDefinition *rhsDef; |
|
4419 Type rhsType; |
|
4420 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
4421 return false; |
|
4422 |
|
4423 if (lhsType.isInt() && rhsType.isInt()) { |
|
4424 if (!IsValidIntMultiplyConstant(f.m(), lhs) && !IsValidIntMultiplyConstant(f.m(), rhs)) |
|
4425 return f.fail(star, "one arg to int multiply must be a small (-2^20, 2^20) int literal"); |
|
4426 *def = f.mul(lhsDef, rhsDef, MIRType_Int32, MMul::Integer); |
|
4427 *type = Type::Intish; |
|
4428 return true; |
|
4429 } |
|
4430 |
|
4431 if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { |
|
4432 *def = f.mul(lhsDef, rhsDef, MIRType_Double, MMul::Normal); |
|
4433 *type = Type::Double; |
|
4434 return true; |
|
4435 } |
|
4436 |
|
4437 if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { |
|
4438 *def = f.mul(lhsDef, rhsDef, MIRType_Float32, MMul::Normal); |
|
4439 *type = Type::Floatish; |
|
4440 return true; |
|
4441 } |
|
4442 |
|
4443 return f.fail(star, "multiply operands must be both int, both double? or both float?"); |
|
4444 } |
|
4445 |
|
4446 static bool |
|
4447 CheckAddOrSub(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type, |
|
4448 unsigned *numAddOrSubOut = nullptr) |
|
4449 { |
|
4450 JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed()); |
|
4451 |
|
4452 JS_ASSERT(expr->isKind(PNK_ADD) || expr->isKind(PNK_SUB)); |
|
4453 ParseNode *lhs = BinaryLeft(expr); |
|
4454 ParseNode *rhs = BinaryRight(expr); |
|
4455 |
|
4456 MDefinition *lhsDef, *rhsDef; |
|
4457 Type lhsType, rhsType; |
|
4458 unsigned lhsNumAddOrSub, rhsNumAddOrSub; |
|
4459 |
|
4460 if (lhs->isKind(PNK_ADD) || lhs->isKind(PNK_SUB)) { |
|
4461 if (!CheckAddOrSub(f, lhs, &lhsDef, &lhsType, &lhsNumAddOrSub)) |
|
4462 return false; |
|
4463 if (lhsType == Type::Intish) |
|
4464 lhsType = Type::Int; |
|
4465 } else { |
|
4466 if (!CheckExpr(f, lhs, &lhsDef, &lhsType)) |
|
4467 return false; |
|
4468 lhsNumAddOrSub = 0; |
|
4469 } |
|
4470 |
|
4471 if (rhs->isKind(PNK_ADD) || rhs->isKind(PNK_SUB)) { |
|
4472 if (!CheckAddOrSub(f, rhs, &rhsDef, &rhsType, &rhsNumAddOrSub)) |
|
4473 return false; |
|
4474 if (rhsType == Type::Intish) |
|
4475 rhsType = Type::Int; |
|
4476 } else { |
|
4477 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
4478 return false; |
|
4479 rhsNumAddOrSub = 0; |
|
4480 } |
|
4481 |
|
4482 unsigned numAddOrSub = lhsNumAddOrSub + rhsNumAddOrSub + 1; |
|
4483 if (numAddOrSub > (1<<20)) |
|
4484 return f.fail(expr, "too many + or - without intervening coercion"); |
|
4485 |
|
4486 if (lhsType.isInt() && rhsType.isInt()) { |
|
4487 *def = expr->isKind(PNK_ADD) |
|
4488 ? f.binary<MAdd>(lhsDef, rhsDef, MIRType_Int32) |
|
4489 : f.binary<MSub>(lhsDef, rhsDef, MIRType_Int32); |
|
4490 *type = Type::Intish; |
|
4491 } else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { |
|
4492 *def = expr->isKind(PNK_ADD) |
|
4493 ? f.binary<MAdd>(lhsDef, rhsDef, MIRType_Double) |
|
4494 : f.binary<MSub>(lhsDef, rhsDef, MIRType_Double); |
|
4495 *type = Type::Double; |
|
4496 } else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { |
|
4497 *def = expr->isKind(PNK_ADD) |
|
4498 ? f.binary<MAdd>(lhsDef, rhsDef, MIRType_Float32) |
|
4499 : f.binary<MSub>(lhsDef, rhsDef, MIRType_Float32); |
|
4500 *type = Type::Floatish; |
|
4501 } else { |
|
4502 return f.failf(expr, "operands to + or - must both be int, float? or double?, got %s and %s", |
|
4503 lhsType.toChars(), rhsType.toChars()); |
|
4504 } |
|
4505 |
|
4506 if (numAddOrSubOut) |
|
4507 *numAddOrSubOut = numAddOrSub; |
|
4508 return true; |
|
4509 } |
|
4510 |
|
4511 static bool |
|
4512 CheckDivOrMod(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type) |
|
4513 { |
|
4514 JS_ASSERT(expr->isKind(PNK_DIV) || expr->isKind(PNK_MOD)); |
|
4515 ParseNode *lhs = BinaryLeft(expr); |
|
4516 ParseNode *rhs = BinaryRight(expr); |
|
4517 |
|
4518 MDefinition *lhsDef, *rhsDef; |
|
4519 Type lhsType, rhsType; |
|
4520 if (!CheckExpr(f, lhs, &lhsDef, &lhsType)) |
|
4521 return false; |
|
4522 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
4523 return false; |
|
4524 |
|
4525 if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { |
|
4526 *def = expr->isKind(PNK_DIV) |
|
4527 ? f.div(lhsDef, rhsDef, MIRType_Double, /* unsignd = */ false) |
|
4528 : f.mod(lhsDef, rhsDef, MIRType_Double, /* unsignd = */ false); |
|
4529 *type = Type::Double; |
|
4530 return true; |
|
4531 } |
|
4532 |
|
4533 if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { |
|
4534 if (expr->isKind(PNK_DIV)) |
|
4535 *def = f.div(lhsDef, rhsDef, MIRType_Float32, /* unsignd = */ false); |
|
4536 else |
|
4537 return f.fail(expr, "modulo cannot receive float arguments"); |
|
4538 *type = Type::Floatish; |
|
4539 return true; |
|
4540 } |
|
4541 |
|
4542 if (lhsType.isSigned() && rhsType.isSigned()) { |
|
4543 if (expr->isKind(PNK_DIV)) |
|
4544 *def = f.div(lhsDef, rhsDef, MIRType_Int32, /* unsignd = */ false); |
|
4545 else |
|
4546 *def = f.mod(lhsDef, rhsDef, MIRType_Int32, /* unsignd = */ false); |
|
4547 *type = Type::Intish; |
|
4548 return true; |
|
4549 } |
|
4550 |
|
4551 if (lhsType.isUnsigned() && rhsType.isUnsigned()) { |
|
4552 if (expr->isKind(PNK_DIV)) |
|
4553 *def = f.div(lhsDef, rhsDef, MIRType_Int32, /* unsignd = */ true); |
|
4554 else |
|
4555 *def = f.mod(lhsDef, rhsDef, MIRType_Int32, /* unsignd = */ true); |
|
4556 *type = Type::Intish; |
|
4557 return true; |
|
4558 } |
|
4559 |
|
4560 return f.failf(expr, "arguments to / or %% must both be double?, float?, signed, or unsigned; " |
|
4561 "%s and %s are given", lhsType.toChars(), rhsType.toChars()); |
|
4562 } |
|
4563 |
|
4564 static bool |
|
4565 CheckComparison(FunctionCompiler &f, ParseNode *comp, MDefinition **def, Type *type) |
|
4566 { |
|
4567 JS_ASSERT(comp->isKind(PNK_LT) || comp->isKind(PNK_LE) || comp->isKind(PNK_GT) || |
|
4568 comp->isKind(PNK_GE) || comp->isKind(PNK_EQ) || comp->isKind(PNK_NE)); |
|
4569 ParseNode *lhs = BinaryLeft(comp); |
|
4570 ParseNode *rhs = BinaryRight(comp); |
|
4571 |
|
4572 MDefinition *lhsDef, *rhsDef; |
|
4573 Type lhsType, rhsType; |
|
4574 if (!CheckExpr(f, lhs, &lhsDef, &lhsType)) |
|
4575 return false; |
|
4576 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
4577 return false; |
|
4578 |
|
4579 if ((lhsType.isSigned() && rhsType.isSigned()) || (lhsType.isUnsigned() && rhsType.isUnsigned())) { |
|
4580 MCompare::CompareType compareType = (lhsType.isUnsigned() && rhsType.isUnsigned()) |
|
4581 ? MCompare::Compare_UInt32 |
|
4582 : MCompare::Compare_Int32; |
|
4583 *def = f.compare(lhsDef, rhsDef, comp->getOp(), compareType); |
|
4584 *type = Type::Int; |
|
4585 return true; |
|
4586 } |
|
4587 |
|
4588 if (lhsType.isDouble() && rhsType.isDouble()) { |
|
4589 *def = f.compare(lhsDef, rhsDef, comp->getOp(), MCompare::Compare_Double); |
|
4590 *type = Type::Int; |
|
4591 return true; |
|
4592 } |
|
4593 |
|
4594 if (lhsType.isFloat() && rhsType.isFloat()) { |
|
4595 *def = f.compare(lhsDef, rhsDef, comp->getOp(), MCompare::Compare_Float32); |
|
4596 *type = Type::Int; |
|
4597 return true; |
|
4598 } |
|
4599 |
|
4600 return f.failf(comp, "arguments to a comparison must both be signed, unsigned, floats or doubles; " |
|
4601 "%s and %s are given", lhsType.toChars(), rhsType.toChars()); |
|
4602 } |
|
4603 |
|
4604 static bool |
|
4605 CheckBitwise(FunctionCompiler &f, ParseNode *bitwise, MDefinition **def, Type *type) |
|
4606 { |
|
4607 ParseNode *lhs = BinaryLeft(bitwise); |
|
4608 ParseNode *rhs = BinaryRight(bitwise); |
|
4609 |
|
4610 int32_t identityElement; |
|
4611 bool onlyOnRight; |
|
4612 switch (bitwise->getKind()) { |
|
4613 case PNK_BITOR: identityElement = 0; onlyOnRight = false; *type = Type::Signed; break; |
|
4614 case PNK_BITAND: identityElement = -1; onlyOnRight = false; *type = Type::Signed; break; |
|
4615 case PNK_BITXOR: identityElement = 0; onlyOnRight = false; *type = Type::Signed; break; |
|
4616 case PNK_LSH: identityElement = 0; onlyOnRight = true; *type = Type::Signed; break; |
|
4617 case PNK_RSH: identityElement = 0; onlyOnRight = true; *type = Type::Signed; break; |
|
4618 case PNK_URSH: identityElement = 0; onlyOnRight = true; *type = Type::Unsigned; break; |
|
4619 default: MOZ_ASSUME_UNREACHABLE("not a bitwise op"); |
|
4620 } |
|
4621 |
|
4622 uint32_t i; |
|
4623 if (!onlyOnRight && IsLiteralInt(f.m(), lhs, &i) && i == uint32_t(identityElement)) { |
|
4624 Type rhsType; |
|
4625 if (!CheckExpr(f, rhs, def, &rhsType)) |
|
4626 return false; |
|
4627 if (!rhsType.isIntish()) |
|
4628 return f.failf(bitwise, "%s is not a subtype of intish", rhsType.toChars()); |
|
4629 return true; |
|
4630 } |
|
4631 |
|
4632 if (IsLiteralInt(f.m(), rhs, &i) && i == uint32_t(identityElement)) { |
|
4633 if (bitwise->isKind(PNK_BITOR) && lhs->isKind(PNK_CALL)) |
|
4634 return CheckCall(f, lhs, RetType::Signed, def, type); |
|
4635 |
|
4636 Type lhsType; |
|
4637 if (!CheckExpr(f, lhs, def, &lhsType)) |
|
4638 return false; |
|
4639 if (!lhsType.isIntish()) |
|
4640 return f.failf(bitwise, "%s is not a subtype of intish", lhsType.toChars()); |
|
4641 return true; |
|
4642 } |
|
4643 |
|
4644 MDefinition *lhsDef; |
|
4645 Type lhsType; |
|
4646 if (!CheckExpr(f, lhs, &lhsDef, &lhsType)) |
|
4647 return false; |
|
4648 |
|
4649 MDefinition *rhsDef; |
|
4650 Type rhsType; |
|
4651 if (!CheckExpr(f, rhs, &rhsDef, &rhsType)) |
|
4652 return false; |
|
4653 |
|
4654 if (!lhsType.isIntish()) |
|
4655 return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars()); |
|
4656 if (!rhsType.isIntish()) |
|
4657 return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars()); |
|
4658 |
|
4659 switch (bitwise->getKind()) { |
|
4660 case PNK_BITOR: *def = f.bitwise<MBitOr>(lhsDef, rhsDef); break; |
|
4661 case PNK_BITAND: *def = f.bitwise<MBitAnd>(lhsDef, rhsDef); break; |
|
4662 case PNK_BITXOR: *def = f.bitwise<MBitXor>(lhsDef, rhsDef); break; |
|
4663 case PNK_LSH: *def = f.bitwise<MLsh>(lhsDef, rhsDef); break; |
|
4664 case PNK_RSH: *def = f.bitwise<MRsh>(lhsDef, rhsDef); break; |
|
4665 case PNK_URSH: *def = f.bitwise<MUrsh>(lhsDef, rhsDef); break; |
|
4666 default: MOZ_ASSUME_UNREACHABLE("not a bitwise op"); |
|
4667 } |
|
4668 |
|
4669 return true; |
|
4670 } |
|
4671 |
|
4672 static bool |
|
4673 CheckUncoercedCall(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type) |
|
4674 { |
|
4675 JS_ASSERT(expr->isKind(PNK_CALL)); |
|
4676 |
|
4677 ParseNode *arg; |
|
4678 if (!IsFloatCoercion(f.m(), expr, &arg)) { |
|
4679 return f.fail(expr, "all function calls must either be ignored (via f(); or " |
|
4680 "comma-expression), coerced to signed (via f()|0), coerced to float " |
|
4681 "(via fround(f())) or coerced to double (via +f())"); |
|
4682 } |
|
4683 |
|
4684 return CheckFRoundArg(f, arg, def, type); |
|
4685 } |
|
4686 |
|
4687 static bool |
|
4688 CheckExpr(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type) |
|
4689 { |
|
4690 JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed()); |
|
4691 |
|
4692 if (!f.mirGen().ensureBallast()) |
|
4693 return false; |
|
4694 |
|
4695 if (IsNumericLiteral(f.m(), expr)) |
|
4696 return CheckNumericLiteral(f, expr, def, type); |
|
4697 |
|
4698 switch (expr->getKind()) { |
|
4699 case PNK_NAME: return CheckVarRef(f, expr, def, type); |
|
4700 case PNK_ELEM: return CheckLoadArray(f, expr, def, type); |
|
4701 case PNK_ASSIGN: return CheckAssign(f, expr, def, type); |
|
4702 case PNK_POS: return CheckPos(f, expr, def, type); |
|
4703 case PNK_NOT: return CheckNot(f, expr, def, type); |
|
4704 case PNK_NEG: return CheckNeg(f, expr, def, type); |
|
4705 case PNK_BITNOT: return CheckBitNot(f, expr, def, type); |
|
4706 case PNK_COMMA: return CheckComma(f, expr, def, type); |
|
4707 case PNK_CONDITIONAL: return CheckConditional(f, expr, def, type); |
|
4708 case PNK_STAR: return CheckMultiply(f, expr, def, type); |
|
4709 case PNK_CALL: return CheckUncoercedCall(f, expr, def, type); |
|
4710 |
|
4711 case PNK_ADD: |
|
4712 case PNK_SUB: return CheckAddOrSub(f, expr, def, type); |
|
4713 |
|
4714 case PNK_DIV: |
|
4715 case PNK_MOD: return CheckDivOrMod(f, expr, def, type); |
|
4716 |
|
4717 case PNK_LT: |
|
4718 case PNK_LE: |
|
4719 case PNK_GT: |
|
4720 case PNK_GE: |
|
4721 case PNK_EQ: |
|
4722 case PNK_NE: return CheckComparison(f, expr, def, type); |
|
4723 |
|
4724 case PNK_BITOR: |
|
4725 case PNK_BITAND: |
|
4726 case PNK_BITXOR: |
|
4727 case PNK_LSH: |
|
4728 case PNK_RSH: |
|
4729 case PNK_URSH: return CheckBitwise(f, expr, def, type); |
|
4730 |
|
4731 default:; |
|
4732 } |
|
4733 |
|
4734 return f.fail(expr, "unsupported expression"); |
|
4735 } |
|
4736 |
|
4737 static bool |
|
4738 CheckStatement(FunctionCompiler &f, ParseNode *stmt, LabelVector *maybeLabels = nullptr); |
|
4739 |
|
4740 static bool |
|
4741 CheckExprStatement(FunctionCompiler &f, ParseNode *exprStmt) |
|
4742 { |
|
4743 JS_ASSERT(exprStmt->isKind(PNK_SEMI)); |
|
4744 ParseNode *expr = UnaryKid(exprStmt); |
|
4745 |
|
4746 if (!expr) |
|
4747 return true; |
|
4748 |
|
4749 MDefinition *_1; |
|
4750 Type _2; |
|
4751 |
|
4752 if (expr->isKind(PNK_CALL)) |
|
4753 return CheckCall(f, expr, RetType::Void, &_1, &_2); |
|
4754 |
|
4755 return CheckExpr(f, UnaryKid(exprStmt), &_1, &_2); |
|
4756 } |
|
4757 |
|
4758 static bool |
|
4759 CheckWhile(FunctionCompiler &f, ParseNode *whileStmt, const LabelVector *maybeLabels) |
|
4760 { |
|
4761 JS_ASSERT(whileStmt->isKind(PNK_WHILE)); |
|
4762 ParseNode *cond = BinaryLeft(whileStmt); |
|
4763 ParseNode *body = BinaryRight(whileStmt); |
|
4764 |
|
4765 MBasicBlock *loopEntry; |
|
4766 if (!f.startPendingLoop(whileStmt, &loopEntry, body)) |
|
4767 return false; |
|
4768 |
|
4769 MDefinition *condDef; |
|
4770 Type condType; |
|
4771 if (!CheckExpr(f, cond, &condDef, &condType)) |
|
4772 return false; |
|
4773 |
|
4774 if (!condType.isInt()) |
|
4775 return f.failf(cond, "%s is not a subtype of int", condType.toChars()); |
|
4776 |
|
4777 MBasicBlock *afterLoop; |
|
4778 if (!f.branchAndStartLoopBody(condDef, &afterLoop, body, NextNode(whileStmt))) |
|
4779 return false; |
|
4780 |
|
4781 if (!CheckStatement(f, body)) |
|
4782 return false; |
|
4783 |
|
4784 if (!f.bindContinues(whileStmt, maybeLabels)) |
|
4785 return false; |
|
4786 |
|
4787 return f.closeLoop(loopEntry, afterLoop); |
|
4788 } |
|
4789 |
|
4790 static bool |
|
4791 CheckFor(FunctionCompiler &f, ParseNode *forStmt, const LabelVector *maybeLabels) |
|
4792 { |
|
4793 JS_ASSERT(forStmt->isKind(PNK_FOR)); |
|
4794 ParseNode *forHead = BinaryLeft(forStmt); |
|
4795 ParseNode *body = BinaryRight(forStmt); |
|
4796 |
|
4797 if (!forHead->isKind(PNK_FORHEAD)) |
|
4798 return f.fail(forHead, "unsupported for-loop statement"); |
|
4799 |
|
4800 ParseNode *maybeInit = TernaryKid1(forHead); |
|
4801 ParseNode *maybeCond = TernaryKid2(forHead); |
|
4802 ParseNode *maybeInc = TernaryKid3(forHead); |
|
4803 |
|
4804 if (maybeInit) { |
|
4805 MDefinition *_1; |
|
4806 Type _2; |
|
4807 if (!CheckExpr(f, maybeInit, &_1, &_2)) |
|
4808 return false; |
|
4809 } |
|
4810 |
|
4811 MBasicBlock *loopEntry; |
|
4812 if (!f.startPendingLoop(forStmt, &loopEntry, body)) |
|
4813 return false; |
|
4814 |
|
4815 MDefinition *condDef; |
|
4816 if (maybeCond) { |
|
4817 Type condType; |
|
4818 if (!CheckExpr(f, maybeCond, &condDef, &condType)) |
|
4819 return false; |
|
4820 |
|
4821 if (!condType.isInt()) |
|
4822 return f.failf(maybeCond, "%s is not a subtype of int", condType.toChars()); |
|
4823 } else { |
|
4824 condDef = f.constant(Int32Value(1), Type::Int); |
|
4825 } |
|
4826 |
|
4827 MBasicBlock *afterLoop; |
|
4828 if (!f.branchAndStartLoopBody(condDef, &afterLoop, body, NextNode(forStmt))) |
|
4829 return false; |
|
4830 |
|
4831 if (!CheckStatement(f, body)) |
|
4832 return false; |
|
4833 |
|
4834 if (!f.bindContinues(forStmt, maybeLabels)) |
|
4835 return false; |
|
4836 |
|
4837 if (maybeInc) { |
|
4838 MDefinition *_1; |
|
4839 Type _2; |
|
4840 if (!CheckExpr(f, maybeInc, &_1, &_2)) |
|
4841 return false; |
|
4842 } |
|
4843 |
|
4844 return f.closeLoop(loopEntry, afterLoop); |
|
4845 } |
|
4846 |
|
4847 static bool |
|
4848 CheckDoWhile(FunctionCompiler &f, ParseNode *whileStmt, const LabelVector *maybeLabels) |
|
4849 { |
|
4850 JS_ASSERT(whileStmt->isKind(PNK_DOWHILE)); |
|
4851 ParseNode *body = BinaryLeft(whileStmt); |
|
4852 ParseNode *cond = BinaryRight(whileStmt); |
|
4853 |
|
4854 MBasicBlock *loopEntry; |
|
4855 if (!f.startPendingLoop(whileStmt, &loopEntry, body)) |
|
4856 return false; |
|
4857 |
|
4858 if (!CheckStatement(f, body)) |
|
4859 return false; |
|
4860 |
|
4861 if (!f.bindContinues(whileStmt, maybeLabels)) |
|
4862 return false; |
|
4863 |
|
4864 MDefinition *condDef; |
|
4865 Type condType; |
|
4866 if (!CheckExpr(f, cond, &condDef, &condType)) |
|
4867 return false; |
|
4868 |
|
4869 if (!condType.isInt()) |
|
4870 return f.failf(cond, "%s is not a subtype of int", condType.toChars()); |
|
4871 |
|
4872 return f.branchAndCloseDoWhileLoop(condDef, loopEntry, NextNode(whileStmt)); |
|
4873 } |
|
4874 |
|
4875 static bool |
|
4876 CheckLabel(FunctionCompiler &f, ParseNode *labeledStmt, LabelVector *maybeLabels) |
|
4877 { |
|
4878 JS_ASSERT(labeledStmt->isKind(PNK_LABEL)); |
|
4879 PropertyName *label = LabeledStatementLabel(labeledStmt); |
|
4880 ParseNode *stmt = LabeledStatementStatement(labeledStmt); |
|
4881 |
|
4882 if (maybeLabels) { |
|
4883 if (!maybeLabels->append(label)) |
|
4884 return false; |
|
4885 if (!CheckStatement(f, stmt, maybeLabels)) |
|
4886 return false; |
|
4887 return true; |
|
4888 } |
|
4889 |
|
4890 LabelVector labels(f.cx()); |
|
4891 if (!labels.append(label)) |
|
4892 return false; |
|
4893 |
|
4894 if (!CheckStatement(f, stmt, &labels)) |
|
4895 return false; |
|
4896 |
|
4897 return f.bindLabeledBreaks(&labels, labeledStmt); |
|
4898 } |
|
4899 |
|
4900 static bool |
|
4901 CheckLeafCondition(FunctionCompiler &f, ParseNode *cond, ParseNode *thenStmt, ParseNode *elseOrJoinStmt, |
|
4902 MBasicBlock **thenBlock, MBasicBlock **elseOrJoinBlock) |
|
4903 { |
|
4904 MDefinition *condDef; |
|
4905 Type condType; |
|
4906 if (!CheckExpr(f, cond, &condDef, &condType)) |
|
4907 return false; |
|
4908 if (!condType.isInt()) |
|
4909 return f.failf(cond, "%s is not a subtype of int", condType.toChars()); |
|
4910 |
|
4911 if (!f.branchAndStartThen(condDef, thenBlock, elseOrJoinBlock, thenStmt, elseOrJoinStmt)) |
|
4912 return false; |
|
4913 return true; |
|
4914 } |
|
4915 |
|
4916 static bool |
|
4917 CheckIfCondition(FunctionCompiler &f, ParseNode *cond, ParseNode *thenStmt, ParseNode *elseOrJoinStmt, |
|
4918 MBasicBlock **thenBlock, MBasicBlock **elseOrJoinBlock); |
|
4919 |
|
4920 static bool |
|
4921 CheckIfConditional(FunctionCompiler &f, ParseNode *conditional, ParseNode *thenStmt, ParseNode *elseOrJoinStmt, |
|
4922 MBasicBlock **thenBlock, MBasicBlock **elseOrJoinBlock) |
|
4923 { |
|
4924 JS_ASSERT(conditional->isKind(PNK_CONDITIONAL)); |
|
4925 |
|
4926 // a ? b : c <=> (a && b) || (!a && c) |
|
4927 // b is always referred to the AND condition, as we need A and B to reach this test, |
|
4928 // c is always referred as the OR condition, as we reach it if we don't have A. |
|
4929 ParseNode *cond = TernaryKid1(conditional); |
|
4930 ParseNode *lhs = TernaryKid2(conditional); |
|
4931 ParseNode *rhs = TernaryKid3(conditional); |
|
4932 |
|
4933 MBasicBlock *maybeAndTest = nullptr, *maybeOrTest = nullptr; |
|
4934 MBasicBlock **ifTrueBlock = &maybeAndTest, **ifFalseBlock = &maybeOrTest; |
|
4935 ParseNode *ifTrueBlockNode = lhs, *ifFalseBlockNode = rhs; |
|
4936 |
|
4937 // Try to spot opportunities for short-circuiting in the AND subpart |
|
4938 uint32_t andTestLiteral = 0; |
|
4939 bool skipAndTest = false; |
|
4940 |
|
4941 if (IsLiteralInt(f.m(), lhs, &andTestLiteral)) { |
|
4942 skipAndTest = true; |
|
4943 if (andTestLiteral == 0) { |
|
4944 // (a ? 0 : b) is equivalent to !a && b |
|
4945 // If a is true, jump to the elseBlock directly |
|
4946 ifTrueBlock = elseOrJoinBlock; |
|
4947 ifTrueBlockNode = elseOrJoinStmt; |
|
4948 } else { |
|
4949 // (a ? 1 : b) is equivalent to a || b |
|
4950 // If a is true, jump to the thenBlock directly |
|
4951 ifTrueBlock = thenBlock; |
|
4952 ifTrueBlockNode = thenStmt; |
|
4953 } |
|
4954 } |
|
4955 |
|
4956 // Try to spot opportunities for short-circuiting in the OR subpart |
|
4957 uint32_t orTestLiteral = 0; |
|
4958 bool skipOrTest = false; |
|
4959 |
|
4960 if (IsLiteralInt(f.m(), rhs, &orTestLiteral)) { |
|
4961 skipOrTest = true; |
|
4962 if (orTestLiteral == 0) { |
|
4963 // (a ? b : 0) is equivalent to a && b |
|
4964 // If a is false, jump to the elseBlock directly |
|
4965 ifFalseBlock = elseOrJoinBlock; |
|
4966 ifFalseBlockNode = elseOrJoinStmt; |
|
4967 } else { |
|
4968 // (a ? b : 1) is equivalent to !a || b |
|
4969 // If a is false, jump to the thenBlock directly |
|
4970 ifFalseBlock = thenBlock; |
|
4971 ifFalseBlockNode = thenStmt; |
|
4972 } |
|
4973 } |
|
4974 |
|
4975 // Pathological cases: a ? 0 : 0 (i.e. false) or a ? 1 : 1 (i.e. true) |
|
4976 // These cases can't be optimized properly at this point: one of the blocks might be |
|
4977 // created and won't ever be executed. Furthermore, it introduces inconsistencies in the |
|
4978 // MIR graph (even if we try to create a block by hand, it will have no predecessor, which |
|
4979 // breaks graph assumptions). The only way we could optimize it is to do it directly in |
|
4980 // CheckIf by removing the control flow entirely. |
|
4981 if (skipOrTest && skipAndTest && (!!orTestLiteral == !!andTestLiteral)) |
|
4982 return CheckLeafCondition(f, conditional, thenStmt, elseOrJoinStmt, thenBlock, elseOrJoinBlock); |
|
4983 |
|
4984 if (!CheckIfCondition(f, cond, ifTrueBlockNode, ifFalseBlockNode, ifTrueBlock, ifFalseBlock)) |
|
4985 return false; |
|
4986 f.assertCurrentBlockIs(*ifTrueBlock); |
|
4987 |
|
4988 // Add supplementary tests, if needed |
|
4989 if (!skipAndTest) { |
|
4990 if (!CheckIfCondition(f, lhs, thenStmt, elseOrJoinStmt, thenBlock, elseOrJoinBlock)) |
|
4991 return false; |
|
4992 f.assertCurrentBlockIs(*thenBlock); |
|
4993 } |
|
4994 |
|
4995 if (!skipOrTest) { |
|
4996 f.switchToElse(*ifFalseBlock); |
|
4997 if (!CheckIfCondition(f, rhs, thenStmt, elseOrJoinStmt, thenBlock, elseOrJoinBlock)) |
|
4998 return false; |
|
4999 f.assertCurrentBlockIs(*thenBlock); |
|
5000 } |
|
5001 |
|
5002 // We might not be on the thenBlock in one case |
|
5003 if (ifTrueBlock == elseOrJoinBlock) { |
|
5004 JS_ASSERT(skipAndTest && andTestLiteral == 0); |
|
5005 f.switchToElse(*thenBlock); |
|
5006 } |
|
5007 |
|
5008 // Check post-conditions |
|
5009 f.assertCurrentBlockIs(*thenBlock); |
|
5010 JS_ASSERT_IF(!f.inDeadCode(), *thenBlock && *elseOrJoinBlock); |
|
5011 return true; |
|
5012 } |
|
5013 |
|
5014 /* |
|
5015 * Recursive function that checks for a complex condition (formed with ternary |
|
5016 * conditionals) and creates the associated short-circuiting control flow graph. |
|
5017 * |
|
5018 * After a call to CheckCondition, the followings are true: |
|
5019 * - if *thenBlock and *elseOrJoinBlock were non-null on entry, their value is |
|
5020 * not changed by this function. |
|
5021 * - *thenBlock and *elseOrJoinBlock are non-null on exit. |
|
5022 * - the current block on exit is the *thenBlock. |
|
5023 */ |
|
5024 static bool |
|
5025 CheckIfCondition(FunctionCompiler &f, ParseNode *cond, ParseNode *thenStmt, |
|
5026 ParseNode *elseOrJoinStmt, MBasicBlock **thenBlock, MBasicBlock **elseOrJoinBlock) |
|
5027 { |
|
5028 JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed()); |
|
5029 |
|
5030 if (cond->isKind(PNK_CONDITIONAL)) |
|
5031 return CheckIfConditional(f, cond, thenStmt, elseOrJoinStmt, thenBlock, elseOrJoinBlock); |
|
5032 |
|
5033 // We've reached a leaf, i.e. an atomic condition |
|
5034 JS_ASSERT(!cond->isKind(PNK_CONDITIONAL)); |
|
5035 if (!CheckLeafCondition(f, cond, thenStmt, elseOrJoinStmt, thenBlock, elseOrJoinBlock)) |
|
5036 return false; |
|
5037 |
|
5038 // Check post-conditions |
|
5039 f.assertCurrentBlockIs(*thenBlock); |
|
5040 JS_ASSERT_IF(!f.inDeadCode(), *thenBlock && *elseOrJoinBlock); |
|
5041 return true; |
|
5042 } |
|
5043 |
|
5044 static bool |
|
5045 CheckIf(FunctionCompiler &f, ParseNode *ifStmt) |
|
5046 { |
|
5047 // Handle if/else-if chains using iteration instead of recursion. This |
|
5048 // avoids blowing the C stack quota for long if/else-if chains and also |
|
5049 // creates fewer MBasicBlocks at join points (by creating one join block |
|
5050 // for the entire if/else-if chain). |
|
5051 BlockVector thenBlocks(f.cx()); |
|
5052 |
|
5053 ParseNode *nextStmt = NextNode(ifStmt); |
|
5054 recurse: |
|
5055 JS_ASSERT(ifStmt->isKind(PNK_IF)); |
|
5056 ParseNode *cond = TernaryKid1(ifStmt); |
|
5057 ParseNode *thenStmt = TernaryKid2(ifStmt); |
|
5058 ParseNode *elseStmt = TernaryKid3(ifStmt); |
|
5059 |
|
5060 MBasicBlock *thenBlock = nullptr, *elseBlock = nullptr; |
|
5061 ParseNode *elseOrJoinStmt = elseStmt ? elseStmt : nextStmt; |
|
5062 |
|
5063 if (!CheckIfCondition(f, cond, thenStmt, elseOrJoinStmt, &thenBlock, &elseBlock)) |
|
5064 return false; |
|
5065 |
|
5066 if (!CheckStatement(f, thenStmt)) |
|
5067 return false; |
|
5068 |
|
5069 if (!f.appendThenBlock(&thenBlocks)) |
|
5070 return false; |
|
5071 |
|
5072 if (!elseStmt) { |
|
5073 if (!f.joinIf(thenBlocks, elseBlock)) |
|
5074 return false; |
|
5075 } else { |
|
5076 f.switchToElse(elseBlock); |
|
5077 |
|
5078 if (elseStmt->isKind(PNK_IF)) { |
|
5079 ifStmt = elseStmt; |
|
5080 goto recurse; |
|
5081 } |
|
5082 |
|
5083 if (!CheckStatement(f, elseStmt)) |
|
5084 return false; |
|
5085 |
|
5086 if (!f.joinIfElse(thenBlocks, nextStmt)) |
|
5087 return false; |
|
5088 } |
|
5089 |
|
5090 return true; |
|
5091 } |
|
5092 |
|
5093 static bool |
|
5094 CheckCaseExpr(FunctionCompiler &f, ParseNode *caseExpr, int32_t *value) |
|
5095 { |
|
5096 if (!IsNumericLiteral(f.m(), caseExpr)) |
|
5097 return f.fail(caseExpr, "switch case expression must be an integer literal"); |
|
5098 |
|
5099 NumLit literal = ExtractNumericLiteral(f.m(), caseExpr); |
|
5100 switch (literal.which()) { |
|
5101 case NumLit::Fixnum: |
|
5102 case NumLit::NegativeInt: |
|
5103 *value = literal.toInt32(); |
|
5104 break; |
|
5105 case NumLit::OutOfRangeInt: |
|
5106 case NumLit::BigUnsigned: |
|
5107 return f.fail(caseExpr, "switch case expression out of integer range"); |
|
5108 case NumLit::Double: |
|
5109 case NumLit::Float: |
|
5110 return f.fail(caseExpr, "switch case expression must be an integer literal"); |
|
5111 } |
|
5112 |
|
5113 return true; |
|
5114 } |
|
5115 |
|
5116 static bool |
|
5117 CheckDefaultAtEnd(FunctionCompiler &f, ParseNode *stmt) |
|
5118 { |
|
5119 for (; stmt; stmt = NextNode(stmt)) { |
|
5120 JS_ASSERT(stmt->isKind(PNK_CASE) || stmt->isKind(PNK_DEFAULT)); |
|
5121 if (stmt->isKind(PNK_DEFAULT) && NextNode(stmt) != nullptr) |
|
5122 return f.fail(stmt, "default label must be at the end"); |
|
5123 } |
|
5124 |
|
5125 return true; |
|
5126 } |
|
5127 |
|
5128 static bool |
|
5129 CheckSwitchRange(FunctionCompiler &f, ParseNode *stmt, int32_t *low, int32_t *high, |
|
5130 int32_t *tableLength) |
|
5131 { |
|
5132 if (stmt->isKind(PNK_DEFAULT)) { |
|
5133 *low = 0; |
|
5134 *high = -1; |
|
5135 *tableLength = 0; |
|
5136 return true; |
|
5137 } |
|
5138 |
|
5139 int32_t i = 0; |
|
5140 if (!CheckCaseExpr(f, CaseExpr(stmt), &i)) |
|
5141 return false; |
|
5142 |
|
5143 *low = *high = i; |
|
5144 |
|
5145 ParseNode *initialStmt = stmt; |
|
5146 for (stmt = NextNode(stmt); stmt && stmt->isKind(PNK_CASE); stmt = NextNode(stmt)) { |
|
5147 int32_t i = 0; |
|
5148 if (!CheckCaseExpr(f, CaseExpr(stmt), &i)) |
|
5149 return false; |
|
5150 |
|
5151 *low = Min(*low, i); |
|
5152 *high = Max(*high, i); |
|
5153 } |
|
5154 |
|
5155 int64_t i64 = (int64_t(*high) - int64_t(*low)) + 1; |
|
5156 if (i64 > 4*1024*1024) |
|
5157 return f.fail(initialStmt, "all switch statements generate tables; this table would be too big"); |
|
5158 |
|
5159 *tableLength = int32_t(i64); |
|
5160 return true; |
|
5161 } |
|
5162 |
|
5163 static bool |
|
5164 CheckSwitch(FunctionCompiler &f, ParseNode *switchStmt) |
|
5165 { |
|
5166 JS_ASSERT(switchStmt->isKind(PNK_SWITCH)); |
|
5167 ParseNode *switchExpr = BinaryLeft(switchStmt); |
|
5168 ParseNode *switchBody = BinaryRight(switchStmt); |
|
5169 |
|
5170 if (!switchBody->isKind(PNK_STATEMENTLIST)) |
|
5171 return f.fail(switchBody, "switch body may not contain 'let' declarations"); |
|
5172 |
|
5173 MDefinition *exprDef; |
|
5174 Type exprType; |
|
5175 if (!CheckExpr(f, switchExpr, &exprDef, &exprType)) |
|
5176 return false; |
|
5177 |
|
5178 if (!exprType.isSigned()) |
|
5179 return f.failf(switchExpr, "%s is not a subtype of signed", exprType.toChars()); |
|
5180 |
|
5181 ParseNode *stmt = ListHead(switchBody); |
|
5182 |
|
5183 if (!CheckDefaultAtEnd(f, stmt)) |
|
5184 return false; |
|
5185 |
|
5186 if (!stmt) |
|
5187 return true; |
|
5188 |
|
5189 int32_t low = 0, high = 0, tableLength = 0; |
|
5190 if (!CheckSwitchRange(f, stmt, &low, &high, &tableLength)) |
|
5191 return false; |
|
5192 |
|
5193 BlockVector cases(f.cx()); |
|
5194 if (!cases.resize(tableLength)) |
|
5195 return false; |
|
5196 |
|
5197 MBasicBlock *switchBlock; |
|
5198 if (!f.startSwitch(switchStmt, exprDef, low, high, &switchBlock)) |
|
5199 return false; |
|
5200 |
|
5201 for (; stmt && stmt->isKind(PNK_CASE); stmt = NextNode(stmt)) { |
|
5202 int32_t caseValue = ExtractNumericLiteral(f.m(), CaseExpr(stmt)).toInt32(); |
|
5203 unsigned caseIndex = caseValue - low; |
|
5204 |
|
5205 if (cases[caseIndex]) |
|
5206 return f.fail(stmt, "no duplicate case labels"); |
|
5207 |
|
5208 if (!f.startSwitchCase(switchBlock, &cases[caseIndex], stmt)) |
|
5209 return false; |
|
5210 |
|
5211 if (!CheckStatement(f, CaseBody(stmt))) |
|
5212 return false; |
|
5213 } |
|
5214 |
|
5215 MBasicBlock *defaultBlock; |
|
5216 if (!f.startSwitchDefault(switchBlock, &cases, &defaultBlock, stmt)) |
|
5217 return false; |
|
5218 |
|
5219 if (stmt && stmt->isKind(PNK_DEFAULT)) { |
|
5220 if (!CheckStatement(f, CaseBody(stmt))) |
|
5221 return false; |
|
5222 } |
|
5223 |
|
5224 return f.joinSwitch(switchBlock, cases, defaultBlock); |
|
5225 } |
|
5226 |
|
5227 static bool |
|
5228 CheckReturnType(FunctionCompiler &f, ParseNode *usepn, RetType retType) |
|
5229 { |
|
5230 if (!f.hasAlreadyReturned()) { |
|
5231 f.setReturnedType(retType); |
|
5232 return true; |
|
5233 } |
|
5234 |
|
5235 if (f.returnedType() != retType) { |
|
5236 return f.failf(usepn, "%s incompatible with previous return of type %s", |
|
5237 retType.toType().toChars(), f.returnedType().toType().toChars()); |
|
5238 } |
|
5239 |
|
5240 return true; |
|
5241 } |
|
5242 |
|
5243 static bool |
|
5244 CheckReturn(FunctionCompiler &f, ParseNode *returnStmt) |
|
5245 { |
|
5246 ParseNode *expr = ReturnExpr(returnStmt); |
|
5247 |
|
5248 if (!expr) { |
|
5249 if (!CheckReturnType(f, returnStmt, RetType::Void)) |
|
5250 return false; |
|
5251 |
|
5252 f.returnVoid(); |
|
5253 return true; |
|
5254 } |
|
5255 |
|
5256 MDefinition *def; |
|
5257 Type type; |
|
5258 if (!CheckExpr(f, expr, &def, &type)) |
|
5259 return false; |
|
5260 |
|
5261 RetType retType; |
|
5262 if (type.isSigned()) |
|
5263 retType = RetType::Signed; |
|
5264 else if (type.isDouble()) |
|
5265 retType = RetType::Double; |
|
5266 else if (type.isFloat()) |
|
5267 retType = RetType::Float; |
|
5268 else if (type.isVoid()) |
|
5269 retType = RetType::Void; |
|
5270 else |
|
5271 return f.failf(expr, "%s is not a valid return type", type.toChars()); |
|
5272 |
|
5273 if (!CheckReturnType(f, expr, retType)) |
|
5274 return false; |
|
5275 |
|
5276 if (retType == RetType::Void) |
|
5277 f.returnVoid(); |
|
5278 else |
|
5279 f.returnExpr(def); |
|
5280 return true; |
|
5281 } |
|
5282 |
|
5283 static bool |
|
5284 CheckStatementList(FunctionCompiler &f, ParseNode *stmtList) |
|
5285 { |
|
5286 JS_ASSERT(stmtList->isKind(PNK_STATEMENTLIST)); |
|
5287 |
|
5288 for (ParseNode *stmt = ListHead(stmtList); stmt; stmt = NextNode(stmt)) { |
|
5289 if (!CheckStatement(f, stmt)) |
|
5290 return false; |
|
5291 } |
|
5292 |
|
5293 return true; |
|
5294 } |
|
5295 |
|
5296 static bool |
|
5297 CheckStatement(FunctionCompiler &f, ParseNode *stmt, LabelVector *maybeLabels) |
|
5298 { |
|
5299 JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed()); |
|
5300 |
|
5301 if (!f.mirGen().ensureBallast()) |
|
5302 return false; |
|
5303 |
|
5304 switch (stmt->getKind()) { |
|
5305 case PNK_SEMI: return CheckExprStatement(f, stmt); |
|
5306 case PNK_WHILE: return CheckWhile(f, stmt, maybeLabels); |
|
5307 case PNK_FOR: return CheckFor(f, stmt, maybeLabels); |
|
5308 case PNK_DOWHILE: return CheckDoWhile(f, stmt, maybeLabels); |
|
5309 case PNK_LABEL: return CheckLabel(f, stmt, maybeLabels); |
|
5310 case PNK_IF: return CheckIf(f, stmt); |
|
5311 case PNK_SWITCH: return CheckSwitch(f, stmt); |
|
5312 case PNK_RETURN: return CheckReturn(f, stmt); |
|
5313 case PNK_STATEMENTLIST: return CheckStatementList(f, stmt); |
|
5314 case PNK_BREAK: return f.addBreak(LoopControlMaybeLabel(stmt)); |
|
5315 case PNK_CONTINUE: return f.addContinue(LoopControlMaybeLabel(stmt)); |
|
5316 default:; |
|
5317 } |
|
5318 |
|
5319 return f.fail(stmt, "unexpected statement kind"); |
|
5320 } |
|
5321 |
|
5322 static bool |
|
5323 ParseFunction(ModuleCompiler &m, ParseNode **fnOut) |
|
5324 { |
|
5325 TokenStream &tokenStream = m.tokenStream(); |
|
5326 |
|
5327 DebugOnly<TokenKind> tk = tokenStream.getToken(); |
|
5328 JS_ASSERT(tk == TOK_FUNCTION); |
|
5329 |
|
5330 RootedPropertyName name(m.cx()); |
|
5331 |
|
5332 TokenKind tt = tokenStream.getToken(); |
|
5333 if (tt == TOK_NAME) { |
|
5334 name = tokenStream.currentName(); |
|
5335 } else if (tt == TOK_YIELD) { |
|
5336 if (!m.parser().checkYieldNameValidity()) |
|
5337 return false; |
|
5338 name = m.cx()->names().yield; |
|
5339 } else { |
|
5340 return false; // The regular parser will throw a SyntaxError, no need to m.fail. |
|
5341 } |
|
5342 |
|
5343 ParseNode *fn = m.parser().handler.newFunctionDefinition(); |
|
5344 if (!fn) |
|
5345 return false; |
|
5346 |
|
5347 // This flows into FunctionBox, so must be tenured. |
|
5348 RootedFunction fun(m.cx(), NewFunction(m.cx(), NullPtr(), nullptr, 0, JSFunction::INTERPRETED, |
|
5349 m.cx()->global(), name, JSFunction::FinalizeKind, |
|
5350 TenuredObject)); |
|
5351 if (!fun) |
|
5352 return false; |
|
5353 |
|
5354 AsmJSParseContext *outerpc = m.parser().pc; |
|
5355 |
|
5356 Directives directives(outerpc); |
|
5357 FunctionBox *funbox = m.parser().newFunctionBox(fn, fun, outerpc, directives, NotGenerator); |
|
5358 if (!funbox) |
|
5359 return false; |
|
5360 |
|
5361 Directives newDirectives = directives; |
|
5362 AsmJSParseContext funpc(&m.parser(), outerpc, fn, funbox, &newDirectives, |
|
5363 outerpc->staticLevel + 1, outerpc->blockidGen, |
|
5364 /* blockScopeDepth = */ 0); |
|
5365 if (!funpc.init(tokenStream)) |
|
5366 return false; |
|
5367 |
|
5368 if (!m.parser().functionArgsAndBodyGeneric(fn, fun, Normal, Statement, &newDirectives)) |
|
5369 return false; |
|
5370 |
|
5371 if (tokenStream.hadError() || directives != newDirectives) |
|
5372 return false; |
|
5373 |
|
5374 outerpc->blockidGen = funpc.blockidGen; |
|
5375 fn->pn_blockid = outerpc->blockid(); |
|
5376 |
|
5377 *fnOut = fn; |
|
5378 return true; |
|
5379 } |
|
5380 |
|
5381 static bool |
|
5382 CheckFunction(ModuleCompiler &m, LifoAlloc &lifo, MIRGenerator **mir, ModuleCompiler::Func **funcOut) |
|
5383 { |
|
5384 int64_t before = PRMJ_Now(); |
|
5385 |
|
5386 // asm.js modules can be quite large when represented as parse trees so pop |
|
5387 // the backing LifoAlloc after parsing/compiling each function. |
|
5388 AsmJSParser::Mark mark = m.parser().mark(); |
|
5389 |
|
5390 ParseNode *fn; |
|
5391 if (!ParseFunction(m, &fn)) |
|
5392 return false; |
|
5393 |
|
5394 if (!CheckFunctionHead(m, fn)) |
|
5395 return false; |
|
5396 |
|
5397 FunctionCompiler f(m, fn, lifo); |
|
5398 if (!f.init()) |
|
5399 return false; |
|
5400 |
|
5401 ParseNode *stmtIter = ListHead(FunctionStatementList(fn)); |
|
5402 |
|
5403 VarTypeVector argTypes(m.lifo()); |
|
5404 if (!CheckArguments(f, &stmtIter, &argTypes)) |
|
5405 return false; |
|
5406 |
|
5407 if (!CheckVariables(f, &stmtIter)) |
|
5408 return false; |
|
5409 |
|
5410 if (!f.prepareToEmitMIR(argTypes)) |
|
5411 return false; |
|
5412 |
|
5413 ParseNode *lastNonEmptyStmt = nullptr; |
|
5414 for (; stmtIter; stmtIter = NextNode(stmtIter)) { |
|
5415 if (!CheckStatement(f, stmtIter)) |
|
5416 return false; |
|
5417 if (!IsEmptyStatement(stmtIter)) |
|
5418 lastNonEmptyStmt = stmtIter; |
|
5419 } |
|
5420 |
|
5421 RetType retType; |
|
5422 if (!CheckFinalReturn(f, lastNonEmptyStmt, &retType)) |
|
5423 return false; |
|
5424 |
|
5425 if (!CheckReturnType(f, lastNonEmptyStmt, retType)) |
|
5426 return false; |
|
5427 |
|
5428 Signature sig(Move(argTypes), retType); |
|
5429 ModuleCompiler::Func *func = nullptr; |
|
5430 if (!CheckFunctionSignature(m, fn, Move(sig), FunctionName(fn), &func)) |
|
5431 return false; |
|
5432 |
|
5433 if (func->defined()) |
|
5434 return m.failName(fn, "function '%s' already defined", FunctionName(fn)); |
|
5435 |
|
5436 uint32_t funcBegin = fn->pn_pos.begin; |
|
5437 uint32_t funcEnd = fn->pn_pos.end; |
|
5438 // The begin/end char range is relative to the beginning of the module, |
|
5439 // hence the assertions. |
|
5440 JS_ASSERT(funcBegin > m.moduleStart()); |
|
5441 JS_ASSERT(funcEnd > m.moduleStart()); |
|
5442 funcBegin -= m.moduleStart(); |
|
5443 funcEnd -= m.moduleStart(); |
|
5444 func->finish(funcBegin, funcEnd); |
|
5445 |
|
5446 func->accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC); |
|
5447 |
|
5448 m.parser().release(mark); |
|
5449 |
|
5450 // Copy the cumulative minimum heap size constraint to the MIR for use in analysis. The length |
|
5451 // is also constrained to particular lengths, so firstly round up - a larger 'heap required |
|
5452 // length' can help range analysis to prove that bounds checks are not needed. |
|
5453 uint32_t len = js::RoundUpToNextValidAsmJSHeapLength(m.minHeapLength()); |
|
5454 m.requireHeapLengthToBeAtLeast(len); |
|
5455 |
|
5456 *mir = f.extractMIR(); |
|
5457 (*mir)->noteMinAsmJSHeapLength(len); |
|
5458 *funcOut = func; |
|
5459 return true; |
|
5460 } |
|
5461 |
|
5462 static bool |
|
5463 GenerateCode(ModuleCompiler &m, ModuleCompiler::Func &func, MIRGenerator &mir, LIRGraph &lir) |
|
5464 { |
|
5465 int64_t before = PRMJ_Now(); |
|
5466 |
|
5467 // A single MacroAssembler is reused for all function compilations so |
|
5468 // that there is a single linear code segment for each module. To avoid |
|
5469 // spiking memory, a LifoAllocScope in the caller frees all MIR/LIR |
|
5470 // after each function is compiled. This method is responsible for cleaning |
|
5471 // out any dangling pointers that the MacroAssembler may have kept. |
|
5472 m.masm().resetForNewCodeGenerator(mir.alloc()); |
|
5473 |
|
5474 m.masm().bind(func.code()); |
|
5475 |
|
5476 ScopedJSDeletePtr<CodeGenerator> codegen(js_new<CodeGenerator>(&mir, &lir, &m.masm())); |
|
5477 if (!codegen || !codegen->generateAsmJS(&m.stackOverflowLabel())) |
|
5478 return m.fail(nullptr, "internal codegen failure (probably out of memory)"); |
|
5479 |
|
5480 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF) |
|
5481 // Profiling might not be active now, but it may be activated later (perhaps |
|
5482 // after the module has been cached and reloaded from the cache). Function |
|
5483 // profiling info isn't huge, so store it always (in --enable-profiling |
|
5484 // builds, which is only Nightly builds, but default). |
|
5485 if (!m.trackProfiledFunction(func, m.masm().currentOffset())) |
|
5486 return false; |
|
5487 #endif |
|
5488 |
|
5489 #ifdef JS_ION_PERF |
|
5490 // Per-block profiling info uses significantly more memory so only store |
|
5491 // this information if it is actively requested. |
|
5492 if (PerfBlockEnabled()) { |
|
5493 if (!m.trackPerfProfiledBlocks(mir.perfSpewer(), func, m.masm().currentOffset())) |
|
5494 return false; |
|
5495 } |
|
5496 #endif |
|
5497 |
|
5498 // Align internal function headers. |
|
5499 m.masm().align(CodeAlignment); |
|
5500 |
|
5501 func.accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC); |
|
5502 if (!m.maybeReportCompileTime(func)) |
|
5503 return false; |
|
5504 |
|
5505 // Unlike regular IonMonkey which links and generates a new JitCode for |
|
5506 // every function, we accumulate all the functions in the module in a |
|
5507 // single MacroAssembler and link at end. Linking asm.js doesn't require a |
|
5508 // CodeGenerator so we can destroy it now. |
|
5509 return true; |
|
5510 } |
|
5511 |
|
5512 static bool |
|
5513 CheckAllFunctionsDefined(ModuleCompiler &m) |
|
5514 { |
|
5515 for (unsigned i = 0; i < m.numFunctions(); i++) { |
|
5516 if (!m.function(i).code()->bound()) |
|
5517 return m.failName(nullptr, "missing definition of function %s", m.function(i).name()); |
|
5518 } |
|
5519 |
|
5520 return true; |
|
5521 } |
|
5522 |
|
5523 static bool |
|
5524 CheckFunctionsSequential(ModuleCompiler &m) |
|
5525 { |
|
5526 // Use a single LifoAlloc to allocate all the temporary compiler IR. |
|
5527 // All allocated LifoAlloc'd memory is released after compiling each |
|
5528 // function by the LifoAllocScope inside the loop. |
|
5529 LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE); |
|
5530 |
|
5531 while (PeekToken(m.parser()) == TOK_FUNCTION) { |
|
5532 LifoAllocScope scope(&lifo); |
|
5533 |
|
5534 MIRGenerator *mir; |
|
5535 ModuleCompiler::Func *func; |
|
5536 if (!CheckFunction(m, lifo, &mir, &func)) |
|
5537 return false; |
|
5538 |
|
5539 int64_t before = PRMJ_Now(); |
|
5540 |
|
5541 IonContext icx(m.cx(), &mir->alloc()); |
|
5542 |
|
5543 IonSpewNewFunction(&mir->graph(), NullPtr()); |
|
5544 |
|
5545 if (!OptimizeMIR(mir)) |
|
5546 return m.failOffset(func->srcOffset(), "internal compiler failure (probably out of memory)"); |
|
5547 |
|
5548 LIRGraph *lir = GenerateLIR(mir); |
|
5549 if (!lir) |
|
5550 return m.failOffset(func->srcOffset(), "internal compiler failure (probably out of memory)"); |
|
5551 |
|
5552 func->accumulateCompileTime((PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC); |
|
5553 |
|
5554 if (!GenerateCode(m, *func, *mir, *lir)) |
|
5555 return false; |
|
5556 |
|
5557 IonSpewEndFunction(); |
|
5558 } |
|
5559 |
|
5560 if (!CheckAllFunctionsDefined(m)) |
|
5561 return false; |
|
5562 |
|
5563 return true; |
|
5564 } |
|
5565 |
|
5566 #ifdef JS_THREADSAFE |
|
5567 |
|
5568 // Currently, only one asm.js parallel compilation is allowed at a time. |
|
5569 // This RAII class attempts to claim this parallel compilation using atomic ops |
|
5570 // on rt->workerThreadState->asmJSCompilationInProgress. |
|
5571 class ParallelCompilationGuard |
|
5572 { |
|
5573 bool parallelState_; |
|
5574 public: |
|
5575 ParallelCompilationGuard() : parallelState_(false) {} |
|
5576 ~ParallelCompilationGuard() { |
|
5577 if (parallelState_) { |
|
5578 JS_ASSERT(WorkerThreadState().asmJSCompilationInProgress == true); |
|
5579 WorkerThreadState().asmJSCompilationInProgress = false; |
|
5580 } |
|
5581 } |
|
5582 bool claim() { |
|
5583 JS_ASSERT(!parallelState_); |
|
5584 if (!WorkerThreadState().asmJSCompilationInProgress.compareExchange(false, true)) |
|
5585 return false; |
|
5586 parallelState_ = true; |
|
5587 return true; |
|
5588 } |
|
5589 }; |
|
5590 |
|
5591 static bool |
|
5592 ParallelCompilationEnabled(ExclusiveContext *cx) |
|
5593 { |
|
5594 // If 'cx' isn't a JSContext, then we are already off the main thread so |
|
5595 // off-thread compilation must be enabled. However, since there are a fixed |
|
5596 // number of worker threads and one is already being consumed by this |
|
5597 // parsing task, ensure that there another free thread to avoid deadlock. |
|
5598 // (Note: there is at most one thread used for parsing so we don't have to |
|
5599 // worry about general dining philosophers.) |
|
5600 if (WorkerThreadState().threadCount <= 1) |
|
5601 return false; |
|
5602 |
|
5603 if (!cx->isJSContext()) |
|
5604 return true; |
|
5605 return cx->asJSContext()->runtime()->canUseParallelIonCompilation(); |
|
5606 } |
|
5607 |
|
5608 // State of compilation as tracked and updated by the main thread. |
|
5609 struct ParallelGroupState |
|
5610 { |
|
5611 js::Vector<AsmJSParallelTask> &tasks; |
|
5612 int32_t outstandingJobs; // Good work, jobs! |
|
5613 uint32_t compiledJobs; |
|
5614 |
|
5615 ParallelGroupState(js::Vector<AsmJSParallelTask> &tasks) |
|
5616 : tasks(tasks), outstandingJobs(0), compiledJobs(0) |
|
5617 { } |
|
5618 }; |
|
5619 |
|
5620 // Block until a worker-assigned LifoAlloc becomes finished. |
|
5621 static AsmJSParallelTask * |
|
5622 GetFinishedCompilation(ModuleCompiler &m, ParallelGroupState &group) |
|
5623 { |
|
5624 AutoLockWorkerThreadState lock; |
|
5625 |
|
5626 while (!WorkerThreadState().asmJSWorkerFailed()) { |
|
5627 if (!WorkerThreadState().asmJSFinishedList().empty()) { |
|
5628 group.outstandingJobs--; |
|
5629 return WorkerThreadState().asmJSFinishedList().popCopy(); |
|
5630 } |
|
5631 WorkerThreadState().wait(GlobalWorkerThreadState::CONSUMER); |
|
5632 } |
|
5633 |
|
5634 return nullptr; |
|
5635 } |
|
5636 |
|
5637 static bool |
|
5638 GenerateCodeForFinishedJob(ModuleCompiler &m, ParallelGroupState &group, AsmJSParallelTask **outTask) |
|
5639 { |
|
5640 // Block until a used LifoAlloc becomes available. |
|
5641 AsmJSParallelTask *task = GetFinishedCompilation(m, group); |
|
5642 if (!task) |
|
5643 return false; |
|
5644 |
|
5645 ModuleCompiler::Func &func = *reinterpret_cast<ModuleCompiler::Func *>(task->func); |
|
5646 func.accumulateCompileTime(task->compileTime); |
|
5647 |
|
5648 { |
|
5649 // Perform code generation on the main thread. |
|
5650 IonContext ionContext(m.cx(), &task->mir->alloc()); |
|
5651 if (!GenerateCode(m, func, *task->mir, *task->lir)) |
|
5652 return false; |
|
5653 } |
|
5654 |
|
5655 group.compiledJobs++; |
|
5656 |
|
5657 // Clear the LifoAlloc for use by another worker. |
|
5658 TempAllocator &tempAlloc = task->mir->alloc(); |
|
5659 tempAlloc.TempAllocator::~TempAllocator(); |
|
5660 task->lifo.releaseAll(); |
|
5661 |
|
5662 *outTask = task; |
|
5663 return true; |
|
5664 } |
|
5665 |
|
5666 static inline bool |
|
5667 GetUnusedTask(ParallelGroupState &group, uint32_t i, AsmJSParallelTask **outTask) |
|
5668 { |
|
5669 // Since functions are dispatched in order, if fewer than |numLifos| functions |
|
5670 // have been generated, then the |i'th| LifoAlloc must never have been |
|
5671 // assigned to a worker thread. |
|
5672 if (i >= group.tasks.length()) |
|
5673 return false; |
|
5674 *outTask = &group.tasks[i]; |
|
5675 return true; |
|
5676 } |
|
5677 |
|
5678 static bool |
|
5679 CheckFunctionsParallelImpl(ModuleCompiler &m, ParallelGroupState &group) |
|
5680 { |
|
5681 #ifdef DEBUG |
|
5682 { |
|
5683 AutoLockWorkerThreadState lock; |
|
5684 JS_ASSERT(WorkerThreadState().asmJSWorklist().empty()); |
|
5685 JS_ASSERT(WorkerThreadState().asmJSFinishedList().empty()); |
|
5686 } |
|
5687 #endif |
|
5688 WorkerThreadState().resetAsmJSFailureState(); |
|
5689 |
|
5690 for (unsigned i = 0; PeekToken(m.parser()) == TOK_FUNCTION; i++) { |
|
5691 // Get exclusive access to an empty LifoAlloc from the thread group's pool. |
|
5692 AsmJSParallelTask *task = nullptr; |
|
5693 if (!GetUnusedTask(group, i, &task) && !GenerateCodeForFinishedJob(m, group, &task)) |
|
5694 return false; |
|
5695 |
|
5696 // Generate MIR into the LifoAlloc on the main thread. |
|
5697 MIRGenerator *mir; |
|
5698 ModuleCompiler::Func *func; |
|
5699 if (!CheckFunction(m, task->lifo, &mir, &func)) |
|
5700 return false; |
|
5701 |
|
5702 // Perform optimizations and LIR generation on a worker thread. |
|
5703 task->init(m.cx()->compartment()->runtimeFromAnyThread(), func, mir); |
|
5704 if (!StartOffThreadAsmJSCompile(m.cx(), task)) |
|
5705 return false; |
|
5706 |
|
5707 group.outstandingJobs++; |
|
5708 } |
|
5709 |
|
5710 // Block for all outstanding workers to complete. |
|
5711 while (group.outstandingJobs > 0) { |
|
5712 AsmJSParallelTask *ignored = nullptr; |
|
5713 if (!GenerateCodeForFinishedJob(m, group, &ignored)) |
|
5714 return false; |
|
5715 } |
|
5716 |
|
5717 if (!CheckAllFunctionsDefined(m)) |
|
5718 return false; |
|
5719 |
|
5720 JS_ASSERT(group.outstandingJobs == 0); |
|
5721 JS_ASSERT(group.compiledJobs == m.numFunctions()); |
|
5722 #ifdef DEBUG |
|
5723 { |
|
5724 AutoLockWorkerThreadState lock; |
|
5725 JS_ASSERT(WorkerThreadState().asmJSWorklist().empty()); |
|
5726 JS_ASSERT(WorkerThreadState().asmJSFinishedList().empty()); |
|
5727 } |
|
5728 #endif |
|
5729 JS_ASSERT(!WorkerThreadState().asmJSWorkerFailed()); |
|
5730 return true; |
|
5731 } |
|
5732 |
|
5733 static void |
|
5734 CancelOutstandingJobs(ModuleCompiler &m, ParallelGroupState &group) |
|
5735 { |
|
5736 // This is failure-handling code, so it's not allowed to fail. |
|
5737 // The problem is that all memory for compilation is stored in LifoAllocs |
|
5738 // maintained in the scope of CheckFunctionsParallel() -- so in order |
|
5739 // for that function to safely return, and thereby remove the LifoAllocs, |
|
5740 // none of that memory can be in use or reachable by workers. |
|
5741 |
|
5742 JS_ASSERT(group.outstandingJobs >= 0); |
|
5743 if (!group.outstandingJobs) |
|
5744 return; |
|
5745 |
|
5746 AutoLockWorkerThreadState lock; |
|
5747 |
|
5748 // From the compiling tasks, eliminate those waiting for worker assignation. |
|
5749 group.outstandingJobs -= WorkerThreadState().asmJSWorklist().length(); |
|
5750 WorkerThreadState().asmJSWorklist().clear(); |
|
5751 |
|
5752 // From the compiling tasks, eliminate those waiting for codegen. |
|
5753 group.outstandingJobs -= WorkerThreadState().asmJSFinishedList().length(); |
|
5754 WorkerThreadState().asmJSFinishedList().clear(); |
|
5755 |
|
5756 // Eliminate tasks that failed without adding to the finished list. |
|
5757 group.outstandingJobs -= WorkerThreadState().harvestFailedAsmJSJobs(); |
|
5758 |
|
5759 // Any remaining tasks are therefore undergoing active compilation. |
|
5760 JS_ASSERT(group.outstandingJobs >= 0); |
|
5761 while (group.outstandingJobs > 0) { |
|
5762 WorkerThreadState().wait(GlobalWorkerThreadState::CONSUMER); |
|
5763 |
|
5764 group.outstandingJobs -= WorkerThreadState().harvestFailedAsmJSJobs(); |
|
5765 group.outstandingJobs -= WorkerThreadState().asmJSFinishedList().length(); |
|
5766 WorkerThreadState().asmJSFinishedList().clear(); |
|
5767 } |
|
5768 |
|
5769 JS_ASSERT(group.outstandingJobs == 0); |
|
5770 JS_ASSERT(WorkerThreadState().asmJSWorklist().empty()); |
|
5771 JS_ASSERT(WorkerThreadState().asmJSFinishedList().empty()); |
|
5772 } |
|
5773 |
|
5774 static const size_t LIFO_ALLOC_PARALLEL_CHUNK_SIZE = 1 << 12; |
|
5775 |
|
5776 static bool |
|
5777 CheckFunctionsParallel(ModuleCompiler &m) |
|
5778 { |
|
5779 // If parallel compilation isn't enabled (not enough cores, disabled by |
|
5780 // pref, etc) or another thread is currently compiling asm.js in parallel, |
|
5781 // fall back to sequential compilation. (We could lift the latter |
|
5782 // constraint by hoisting asmJS* state out of WorkerThreadState so multiple |
|
5783 // concurrent asm.js parallel compilations don't race.) |
|
5784 ParallelCompilationGuard g; |
|
5785 if (!ParallelCompilationEnabled(m.cx()) || !g.claim()) |
|
5786 return CheckFunctionsSequential(m); |
|
5787 |
|
5788 IonSpew(IonSpew_Logs, "Can't log asm.js script. (Compiled on background thread.)"); |
|
5789 |
|
5790 // Saturate all worker threads plus the main thread. |
|
5791 size_t numParallelJobs = WorkerThreadState().threadCount + 1; |
|
5792 |
|
5793 // Allocate scoped AsmJSParallelTask objects. Each contains a unique |
|
5794 // LifoAlloc that provides all necessary memory for compilation. |
|
5795 js::Vector<AsmJSParallelTask, 0> tasks(m.cx()); |
|
5796 if (!tasks.initCapacity(numParallelJobs)) |
|
5797 return false; |
|
5798 |
|
5799 for (size_t i = 0; i < numParallelJobs; i++) |
|
5800 tasks.infallibleAppend(LIFO_ALLOC_PARALLEL_CHUNK_SIZE); |
|
5801 |
|
5802 // With compilation memory in-scope, dispatch worker threads. |
|
5803 ParallelGroupState group(tasks); |
|
5804 if (!CheckFunctionsParallelImpl(m, group)) { |
|
5805 CancelOutstandingJobs(m, group); |
|
5806 |
|
5807 // If failure was triggered by a worker thread, report error. |
|
5808 if (void *maybeFunc = WorkerThreadState().maybeAsmJSFailedFunction()) { |
|
5809 ModuleCompiler::Func *func = reinterpret_cast<ModuleCompiler::Func *>(maybeFunc); |
|
5810 return m.failOffset(func->srcOffset(), "allocation failure during compilation"); |
|
5811 } |
|
5812 |
|
5813 // Otherwise, the error occurred on the main thread and was already reported. |
|
5814 return false; |
|
5815 } |
|
5816 return true; |
|
5817 } |
|
5818 #endif // JS_THREADSAFE |
|
5819 |
|
5820 static bool |
|
5821 CheckFuncPtrTable(ModuleCompiler &m, ParseNode *var) |
|
5822 { |
|
5823 if (!IsDefinition(var)) |
|
5824 return m.fail(var, "function-pointer table name must be unique"); |
|
5825 |
|
5826 ParseNode *arrayLiteral = MaybeDefinitionInitializer(var); |
|
5827 if (!arrayLiteral || !arrayLiteral->isKind(PNK_ARRAY)) |
|
5828 return m.fail(var, "function-pointer table's initializer must be an array literal"); |
|
5829 |
|
5830 unsigned length = ListLength(arrayLiteral); |
|
5831 |
|
5832 if (!IsPowerOfTwo(length)) |
|
5833 return m.failf(arrayLiteral, "function-pointer table length must be a power of 2 (is %u)", length); |
|
5834 |
|
5835 unsigned mask = length - 1; |
|
5836 |
|
5837 ModuleCompiler::FuncPtrVector elems(m.cx()); |
|
5838 const Signature *firstSig = nullptr; |
|
5839 |
|
5840 for (ParseNode *elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) { |
|
5841 if (!elem->isKind(PNK_NAME)) |
|
5842 return m.fail(elem, "function-pointer table's elements must be names of functions"); |
|
5843 |
|
5844 PropertyName *funcName = elem->name(); |
|
5845 const ModuleCompiler::Func *func = m.lookupFunction(funcName); |
|
5846 if (!func) |
|
5847 return m.fail(elem, "function-pointer table's elements must be names of functions"); |
|
5848 |
|
5849 if (firstSig) { |
|
5850 if (*firstSig != func->sig()) |
|
5851 return m.fail(elem, "all functions in table must have same signature"); |
|
5852 } else { |
|
5853 firstSig = &func->sig(); |
|
5854 } |
|
5855 |
|
5856 if (!elems.append(func)) |
|
5857 return false; |
|
5858 } |
|
5859 |
|
5860 Signature sig(m.lifo()); |
|
5861 if (!sig.copy(*firstSig)) |
|
5862 return false; |
|
5863 |
|
5864 ModuleCompiler::FuncPtrTable *table; |
|
5865 if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), Move(sig), mask, &table)) |
|
5866 return false; |
|
5867 |
|
5868 table->initElems(Move(elems)); |
|
5869 return true; |
|
5870 } |
|
5871 |
|
5872 static bool |
|
5873 CheckFuncPtrTables(ModuleCompiler &m) |
|
5874 { |
|
5875 while (true) { |
|
5876 ParseNode *varStmt; |
|
5877 if (!ParseVarOrConstStatement(m.parser(), &varStmt)) |
|
5878 return false; |
|
5879 if (!varStmt) |
|
5880 break; |
|
5881 for (ParseNode *var = VarListHead(varStmt); var; var = NextNode(var)) { |
|
5882 if (!CheckFuncPtrTable(m, var)) |
|
5883 return false; |
|
5884 } |
|
5885 } |
|
5886 |
|
5887 for (unsigned i = 0; i < m.numFuncPtrTables(); i++) { |
|
5888 if (!m.funcPtrTable(i).initialized()) |
|
5889 return m.fail(nullptr, "expecting function-pointer table"); |
|
5890 } |
|
5891 |
|
5892 return true; |
|
5893 } |
|
5894 |
|
5895 static bool |
|
5896 CheckModuleExportFunction(ModuleCompiler &m, ParseNode *returnExpr) |
|
5897 { |
|
5898 if (!returnExpr->isKind(PNK_NAME)) |
|
5899 return m.fail(returnExpr, "export statement must be of the form 'return name'"); |
|
5900 |
|
5901 PropertyName *funcName = returnExpr->name(); |
|
5902 |
|
5903 const ModuleCompiler::Func *func = m.lookupFunction(funcName); |
|
5904 if (!func) |
|
5905 return m.failName(returnExpr, "exported function name '%s' not found", funcName); |
|
5906 |
|
5907 return m.addExportedFunction(func, /* maybeFieldName = */ nullptr); |
|
5908 } |
|
5909 |
|
5910 static bool |
|
5911 CheckModuleExportObject(ModuleCompiler &m, ParseNode *object) |
|
5912 { |
|
5913 JS_ASSERT(object->isKind(PNK_OBJECT)); |
|
5914 |
|
5915 for (ParseNode *pn = ListHead(object); pn; pn = NextNode(pn)) { |
|
5916 if (!IsNormalObjectField(m.cx(), pn)) |
|
5917 return m.fail(pn, "only normal object properties may be used in the export object literal"); |
|
5918 |
|
5919 PropertyName *fieldName = ObjectNormalFieldName(m.cx(), pn); |
|
5920 |
|
5921 ParseNode *initNode = ObjectFieldInitializer(pn); |
|
5922 if (!initNode->isKind(PNK_NAME)) |
|
5923 return m.fail(initNode, "initializer of exported object literal must be name of function"); |
|
5924 |
|
5925 PropertyName *funcName = initNode->name(); |
|
5926 |
|
5927 const ModuleCompiler::Func *func = m.lookupFunction(funcName); |
|
5928 if (!func) |
|
5929 return m.failName(initNode, "exported function name '%s' not found", funcName); |
|
5930 |
|
5931 if (!m.addExportedFunction(func, fieldName)) |
|
5932 return false; |
|
5933 } |
|
5934 |
|
5935 return true; |
|
5936 } |
|
5937 |
|
5938 static bool |
|
5939 CheckModuleReturn(ModuleCompiler &m) |
|
5940 { |
|
5941 if (PeekToken(m.parser()) != TOK_RETURN) { |
|
5942 TokenKind tk = PeekToken(m.parser()); |
|
5943 if (tk == TOK_RC || tk == TOK_EOF) |
|
5944 return m.fail(nullptr, "expecting return statement"); |
|
5945 return m.fail(nullptr, "invalid asm.js statement"); |
|
5946 } |
|
5947 |
|
5948 ParseNode *returnStmt = m.parser().statement(); |
|
5949 if (!returnStmt) |
|
5950 return false; |
|
5951 |
|
5952 ParseNode *returnExpr = ReturnExpr(returnStmt); |
|
5953 if (!returnExpr) |
|
5954 return m.fail(returnStmt, "export statement must return something"); |
|
5955 |
|
5956 if (returnExpr->isKind(PNK_OBJECT)) { |
|
5957 if (!CheckModuleExportObject(m, returnExpr)) |
|
5958 return false; |
|
5959 } else { |
|
5960 if (!CheckModuleExportFunction(m, returnExpr)) |
|
5961 return false; |
|
5962 } |
|
5963 |
|
5964 // Function statements are not added to the lexical scope in ParseContext |
|
5965 // (since cx->tempLifoAlloc is marked/released after each function |
|
5966 // statement) and thus all the identifiers in the return statement will be |
|
5967 // mistaken as free variables and added to lexdeps. Clear these now. |
|
5968 m.parser().pc->lexdeps->clear(); |
|
5969 return true; |
|
5970 } |
|
5971 |
|
5972 // All registers except the stack pointer. |
|
5973 static const RegisterSet AllRegsExceptSP = |
|
5974 RegisterSet(GeneralRegisterSet(Registers::AllMask & |
|
5975 ~(uint32_t(1) << Registers::StackPointer)), |
|
5976 FloatRegisterSet(FloatRegisters::AllMask)); |
|
5977 #if defined(JS_CODEGEN_ARM) |
|
5978 // The ARM system ABI also includes d15 in the non volatile float registers. |
|
5979 static const RegisterSet NonVolatileRegs = |
|
5980 RegisterSet(GeneralRegisterSet(Registers::NonVolatileMask), |
|
5981 FloatRegisterSet(FloatRegisters::NonVolatileMask | (1 << FloatRegisters::d15))); |
|
5982 #else |
|
5983 static const RegisterSet NonVolatileRegs = |
|
5984 RegisterSet(GeneralRegisterSet(Registers::NonVolatileMask), |
|
5985 FloatRegisterSet(FloatRegisters::NonVolatileMask)); |
|
5986 #endif |
|
5987 |
|
5988 static void |
|
5989 LoadAsmJSActivationIntoRegister(MacroAssembler &masm, Register reg) |
|
5990 { |
|
5991 masm.movePtr(AsmJSImm_Runtime, reg); |
|
5992 size_t offset = offsetof(JSRuntime, mainThread) + |
|
5993 PerThreadData::offsetOfAsmJSActivationStackReadOnly(); |
|
5994 masm.loadPtr(Address(reg, offset), reg); |
|
5995 } |
|
5996 |
|
5997 static void |
|
5998 LoadJSContextFromActivation(MacroAssembler &masm, Register activation, Register dest) |
|
5999 { |
|
6000 masm.loadPtr(Address(activation, AsmJSActivation::offsetOfContext()), dest); |
|
6001 } |
|
6002 |
|
6003 static void |
|
6004 AssertStackAlignment(MacroAssembler &masm) |
|
6005 { |
|
6006 JS_ASSERT((AlignmentAtPrologue + masm.framePushed()) % StackAlignment == 0); |
|
6007 #ifdef DEBUG |
|
6008 Label ok; |
|
6009 JS_ASSERT(IsPowerOfTwo(StackAlignment)); |
|
6010 masm.branchTestPtr(Assembler::Zero, StackPointer, Imm32(StackAlignment - 1), &ok); |
|
6011 masm.assumeUnreachable("Stack should be aligned."); |
|
6012 masm.bind(&ok); |
|
6013 #endif |
|
6014 } |
|
6015 |
|
6016 template <class VectorT> |
|
6017 static unsigned |
|
6018 StackArgBytes(const VectorT &argTypes) |
|
6019 { |
|
6020 ABIArgIter<VectorT> iter(argTypes); |
|
6021 while (!iter.done()) |
|
6022 iter++; |
|
6023 return iter.stackBytesConsumedSoFar(); |
|
6024 } |
|
6025 |
|
6026 static unsigned |
|
6027 StackDecrementForCall(MacroAssembler &masm, unsigned bytesToPush) |
|
6028 { |
|
6029 // Include extra padding so that, after pushing the bytesToPush, |
|
6030 // the stack is aligned for a call instruction. |
|
6031 unsigned alreadyPushed = AlignmentAtPrologue + masm.framePushed(); |
|
6032 return AlignBytes(alreadyPushed + bytesToPush, StackAlignment) - alreadyPushed; |
|
6033 } |
|
6034 |
|
6035 template <class VectorT> |
|
6036 static unsigned |
|
6037 StackDecrementForCall(MacroAssembler &masm, const VectorT &argTypes, unsigned extraBytes = 0) |
|
6038 { |
|
6039 return StackDecrementForCall(masm, StackArgBytes(argTypes) + extraBytes); |
|
6040 } |
|
6041 |
|
6042 static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) + |
|
6043 NonVolatileRegs.fpus().size() * sizeof(double); |
|
6044 |
|
6045 // On arm, we need to include an extra word of space at the top of the stack so |
|
6046 // we can explicitly store the return address before making the call to C++ or |
|
6047 // Ion. On x86/x64, this isn't necessary since the call instruction pushes the |
|
6048 // return address. |
|
6049 #ifdef JS_CODEGEN_ARM |
|
6050 static const unsigned MaybeRetAddr = sizeof(void*); |
|
6051 #else |
|
6052 static const unsigned MaybeRetAddr = 0; |
|
6053 #endif |
|
6054 |
|
6055 static bool |
|
6056 GenerateEntry(ModuleCompiler &m, const AsmJSModule::ExportedFunction &exportedFunc) |
|
6057 { |
|
6058 MacroAssembler &masm = m.masm(); |
|
6059 |
|
6060 // In constrast to the system ABI, the Ion convention is that all registers |
|
6061 // are clobbered by calls. Thus, we must save the caller's non-volatile |
|
6062 // registers. |
|
6063 // |
|
6064 // NB: GenerateExits assumes that masm.framePushed() == 0 before |
|
6065 // PushRegsInMask(NonVolatileRegs). |
|
6066 masm.setFramePushed(0); |
|
6067 masm.PushRegsInMask(NonVolatileRegs); |
|
6068 JS_ASSERT(masm.framePushed() == FramePushedAfterSave); |
|
6069 |
|
6070 // Remember the stack pointer in the current AsmJSActivation. This will be |
|
6071 // used by error exit paths to set the stack pointer back to what it was |
|
6072 // right after the (C++) caller's non-volatile registers were saved so that |
|
6073 // they can be restored. |
|
6074 Register activation = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6075 LoadAsmJSActivationIntoRegister(masm, activation); |
|
6076 masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfErrorRejoinSP())); |
|
6077 |
|
6078 // ARM has a globally-pinned GlobalReg (x64 uses RIP-relative addressing, |
|
6079 // x86 uses immediates in effective addresses) and NaN register (used as |
|
6080 // part of the out-of-bounds handling in heap loads/stores). |
|
6081 #if defined(JS_CODEGEN_ARM) |
|
6082 masm.movePtr(IntArgReg1, GlobalReg); |
|
6083 masm.ma_vimm(GenericNaN(), NANReg); |
|
6084 #endif |
|
6085 |
|
6086 // ARM and x64 have a globally-pinned HeapReg (x86 uses immediates in |
|
6087 // effective addresses). |
|
6088 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) |
|
6089 masm.loadPtr(Address(IntArgReg1, m.module().heapOffset()), HeapReg); |
|
6090 #endif |
|
6091 |
|
6092 // Get 'argv' into a non-arg register and save it on the stack. |
|
6093 Register argv = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6094 Register scratch = ABIArgGenerator::NonArgReturnVolatileReg1; |
|
6095 #if defined(JS_CODEGEN_X86) |
|
6096 masm.loadPtr(Address(StackPointer, NativeFrameSize + masm.framePushed()), argv); |
|
6097 #else |
|
6098 masm.movePtr(IntArgReg0, argv); |
|
6099 #endif |
|
6100 masm.Push(argv); |
|
6101 |
|
6102 // Bump the stack for the call. |
|
6103 const ModuleCompiler::Func &func = *m.lookupFunction(exportedFunc.name()); |
|
6104 unsigned stackDec = StackDecrementForCall(masm, func.sig().args()); |
|
6105 masm.reserveStack(stackDec); |
|
6106 |
|
6107 // Copy parameters out of argv and into the registers/stack-slots specified by |
|
6108 // the system ABI. |
|
6109 for (ABIArgTypeIter iter(func.sig().args()); !iter.done(); iter++) { |
|
6110 unsigned argOffset = iter.index() * sizeof(uint64_t); |
|
6111 Address src(argv, argOffset); |
|
6112 switch (iter->kind()) { |
|
6113 case ABIArg::GPR: |
|
6114 masm.load32(src, iter->gpr()); |
|
6115 break; |
|
6116 case ABIArg::FPU: |
|
6117 masm.loadDouble(src, iter->fpu()); |
|
6118 break; |
|
6119 case ABIArg::Stack: |
|
6120 if (iter.mirType() == MIRType_Int32) { |
|
6121 masm.load32(src, scratch); |
|
6122 masm.storePtr(scratch, Address(StackPointer, iter->offsetFromArgBase())); |
|
6123 } else { |
|
6124 JS_ASSERT(iter.mirType() == MIRType_Double || iter.mirType() == MIRType_Float32); |
|
6125 masm.loadDouble(src, ScratchFloatReg); |
|
6126 masm.storeDouble(ScratchFloatReg, Address(StackPointer, iter->offsetFromArgBase())); |
|
6127 } |
|
6128 break; |
|
6129 } |
|
6130 } |
|
6131 |
|
6132 // Call into the real function. |
|
6133 AssertStackAlignment(masm); |
|
6134 masm.call(CallSiteDesc::Entry(), func.code()); |
|
6135 |
|
6136 // Pop the stack and recover the original 'argv' argument passed to the |
|
6137 // trampoline (which was pushed on the stack). |
|
6138 masm.freeStack(stackDec); |
|
6139 masm.Pop(argv); |
|
6140 |
|
6141 // Store the return value in argv[0] |
|
6142 switch (func.sig().retType().which()) { |
|
6143 case RetType::Void: |
|
6144 break; |
|
6145 case RetType::Signed: |
|
6146 masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0)); |
|
6147 break; |
|
6148 case RetType::Float: |
|
6149 masm.convertFloat32ToDouble(ReturnFloatReg, ReturnFloatReg); |
|
6150 // Fall through as ReturnFloatReg now contains a Double |
|
6151 case RetType::Double: |
|
6152 masm.canonicalizeDouble(ReturnFloatReg); |
|
6153 masm.storeDouble(ReturnFloatReg, Address(argv, 0)); |
|
6154 break; |
|
6155 } |
|
6156 |
|
6157 // Restore clobbered non-volatile registers of the caller. |
|
6158 masm.PopRegsInMask(NonVolatileRegs); |
|
6159 |
|
6160 JS_ASSERT(masm.framePushed() == 0); |
|
6161 |
|
6162 masm.move32(Imm32(true), ReturnReg); |
|
6163 masm.abiret(); |
|
6164 return true; |
|
6165 } |
|
6166 |
|
6167 static inline bool |
|
6168 TryEnablingIon(JSContext *cx, AsmJSModule &module, HandleFunction fun, uint32_t exitIndex, |
|
6169 int32_t argc, Value *argv) |
|
6170 { |
|
6171 if (!fun->hasScript()) |
|
6172 return true; |
|
6173 |
|
6174 // Test if the function is Ion compiled |
|
6175 JSScript *script = fun->nonLazyScript(); |
|
6176 if (!script->hasIonScript()) |
|
6177 return true; |
|
6178 |
|
6179 // Currently we can't rectify arguments. Therefore disabling if argc is too low. |
|
6180 if (fun->nargs() > size_t(argc)) |
|
6181 return true; |
|
6182 |
|
6183 // Normally the types should corresond, since we just ran with those types, |
|
6184 // but there are reports this is asserting. Therefore doing it as a check, instead of DEBUG only. |
|
6185 if (!types::TypeScript::ThisTypes(script)->hasType(types::Type::UndefinedType())) |
|
6186 return true; |
|
6187 for(uint32_t i = 0; i < fun->nargs(); i++) { |
|
6188 types::StackTypeSet *typeset = types::TypeScript::ArgTypes(script, i); |
|
6189 types::Type type = types::Type::DoubleType(); |
|
6190 if (!argv[i].isDouble()) |
|
6191 type = types::Type::PrimitiveType(argv[i].extractNonDoubleType()); |
|
6192 if (!typeset->hasType(type)) |
|
6193 return true; |
|
6194 } |
|
6195 |
|
6196 // Enable |
|
6197 IonScript *ionScript = script->ionScript(); |
|
6198 if (!ionScript->addDependentAsmJSModule(cx, DependentAsmJSModuleExit(&module, exitIndex))) |
|
6199 return false; |
|
6200 |
|
6201 module.exitIndexToGlobalDatum(exitIndex).exit = module.ionExitTrampoline(module.exit(exitIndex)); |
|
6202 return true; |
|
6203 } |
|
6204 |
|
6205 namespace js { |
|
6206 |
|
6207 int32_t |
|
6208 InvokeFromAsmJS_Ignore(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv) |
|
6209 { |
|
6210 AsmJSModule &module = cx->mainThread().asmJSActivationStackFromOwnerThread()->module(); |
|
6211 |
|
6212 RootedFunction fun(cx, module.exitIndexToGlobalDatum(exitIndex).fun); |
|
6213 RootedValue fval(cx, ObjectValue(*fun)); |
|
6214 RootedValue rval(cx); |
|
6215 if (!Invoke(cx, UndefinedValue(), fval, argc, argv, &rval)) |
|
6216 return false; |
|
6217 |
|
6218 if (!TryEnablingIon(cx, module, fun, exitIndex, argc, argv)) |
|
6219 return false; |
|
6220 |
|
6221 return true; |
|
6222 } |
|
6223 |
|
6224 int32_t |
|
6225 InvokeFromAsmJS_ToInt32(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv) |
|
6226 { |
|
6227 AsmJSModule &module = cx->mainThread().asmJSActivationStackFromOwnerThread()->module(); |
|
6228 |
|
6229 RootedFunction fun(cx, module.exitIndexToGlobalDatum(exitIndex).fun); |
|
6230 RootedValue fval(cx, ObjectValue(*fun)); |
|
6231 RootedValue rval(cx); |
|
6232 if (!Invoke(cx, UndefinedValue(), fval, argc, argv, &rval)) |
|
6233 return false; |
|
6234 |
|
6235 if (!TryEnablingIon(cx, module, fun, exitIndex, argc, argv)) |
|
6236 return false; |
|
6237 |
|
6238 int32_t i32; |
|
6239 if (!ToInt32(cx, rval, &i32)) |
|
6240 return false; |
|
6241 argv[0] = Int32Value(i32); |
|
6242 |
|
6243 return true; |
|
6244 } |
|
6245 |
|
6246 int32_t |
|
6247 InvokeFromAsmJS_ToNumber(JSContext *cx, int32_t exitIndex, int32_t argc, Value *argv) |
|
6248 { |
|
6249 AsmJSModule &module = cx->mainThread().asmJSActivationStackFromOwnerThread()->module(); |
|
6250 |
|
6251 RootedFunction fun(cx, module.exitIndexToGlobalDatum(exitIndex).fun); |
|
6252 RootedValue fval(cx, ObjectValue(*fun)); |
|
6253 RootedValue rval(cx); |
|
6254 if (!Invoke(cx, UndefinedValue(), fval, argc, argv, &rval)) |
|
6255 return false; |
|
6256 |
|
6257 if (!TryEnablingIon(cx, module, fun, exitIndex, argc, argv)) |
|
6258 return false; |
|
6259 |
|
6260 double dbl; |
|
6261 if (!ToNumber(cx, rval, &dbl)) |
|
6262 return false; |
|
6263 argv[0] = DoubleValue(dbl); |
|
6264 |
|
6265 return true; |
|
6266 } |
|
6267 |
|
6268 } // namespace js |
|
6269 |
|
6270 static void |
|
6271 FillArgumentArray(ModuleCompiler &m, const VarTypeVector &argTypes, |
|
6272 unsigned offsetToArgs, unsigned offsetToCallerStackArgs, |
|
6273 Register scratch) |
|
6274 { |
|
6275 MacroAssembler &masm = m.masm(); |
|
6276 |
|
6277 for (ABIArgTypeIter i(argTypes); !i.done(); i++) { |
|
6278 Address dstAddr = Address(StackPointer, offsetToArgs + i.index() * sizeof(Value)); |
|
6279 switch (i->kind()) { |
|
6280 case ABIArg::GPR: |
|
6281 masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dstAddr); |
|
6282 break; |
|
6283 case ABIArg::FPU: { |
|
6284 masm.canonicalizeDouble(i->fpu()); |
|
6285 masm.storeDouble(i->fpu(), dstAddr); |
|
6286 break; |
|
6287 } |
|
6288 case ABIArg::Stack: |
|
6289 if (i.mirType() == MIRType_Int32) { |
|
6290 Address src(StackPointer, offsetToCallerStackArgs + i->offsetFromArgBase()); |
|
6291 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) |
|
6292 masm.load32(src, scratch); |
|
6293 masm.storeValue(JSVAL_TYPE_INT32, scratch, dstAddr); |
|
6294 #else |
|
6295 masm.memIntToValue(src, dstAddr); |
|
6296 #endif |
|
6297 } else { |
|
6298 JS_ASSERT(i.mirType() == MIRType_Double); |
|
6299 Address src(StackPointer, offsetToCallerStackArgs + i->offsetFromArgBase()); |
|
6300 masm.loadDouble(src, ScratchFloatReg); |
|
6301 masm.canonicalizeDouble(ScratchFloatReg); |
|
6302 masm.storeDouble(ScratchFloatReg, dstAddr); |
|
6303 } |
|
6304 break; |
|
6305 } |
|
6306 } |
|
6307 } |
|
6308 |
|
6309 static void |
|
6310 GenerateFFIInterpreterExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit, |
|
6311 unsigned exitIndex, Label *throwLabel) |
|
6312 { |
|
6313 MacroAssembler &masm = m.masm(); |
|
6314 masm.align(CodeAlignment); |
|
6315 m.setInterpExitOffset(exitIndex); |
|
6316 masm.setFramePushed(0); |
|
6317 #if defined(JS_CODEGEN_ARM) |
|
6318 masm.Push(lr); |
|
6319 #endif |
|
6320 |
|
6321 MIRType typeArray[] = { MIRType_Pointer, // cx |
|
6322 MIRType_Pointer, // exitDatum |
|
6323 MIRType_Int32, // argc |
|
6324 MIRType_Pointer }; // argv |
|
6325 MIRTypeVector invokeArgTypes(m.cx()); |
|
6326 invokeArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray)); |
|
6327 |
|
6328 // The stack layout looks like: |
|
6329 // | return address | stack arguments | array of values | |
|
6330 unsigned arraySize = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value); |
|
6331 unsigned stackDec = StackDecrementForCall(masm, invokeArgTypes, arraySize + MaybeRetAddr); |
|
6332 masm.reserveStack(stackDec); |
|
6333 |
|
6334 // Fill the argument array. |
|
6335 unsigned offsetToCallerStackArgs = AlignmentAtPrologue + masm.framePushed(); |
|
6336 unsigned offsetToArgv = StackArgBytes(invokeArgTypes) + MaybeRetAddr; |
|
6337 Register scratch = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6338 FillArgumentArray(m, exit.sig().args(), offsetToArgv, offsetToCallerStackArgs, scratch); |
|
6339 |
|
6340 // Prepare the arguments for the call to InvokeFromAsmJS_*. |
|
6341 ABIArgMIRTypeIter i(invokeArgTypes); |
|
6342 Register activation = ABIArgGenerator::NonArgReturnVolatileReg1; |
|
6343 LoadAsmJSActivationIntoRegister(masm, activation); |
|
6344 |
|
6345 // Record sp in the AsmJSActivation for stack-walking. |
|
6346 masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfExitSP())); |
|
6347 |
|
6348 // argument 0: cx |
|
6349 if (i->kind() == ABIArg::GPR) { |
|
6350 LoadJSContextFromActivation(masm, activation, i->gpr()); |
|
6351 } else { |
|
6352 LoadJSContextFromActivation(masm, activation, scratch); |
|
6353 masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase())); |
|
6354 } |
|
6355 i++; |
|
6356 |
|
6357 // argument 1: exitIndex |
|
6358 if (i->kind() == ABIArg::GPR) |
|
6359 masm.mov(ImmWord(exitIndex), i->gpr()); |
|
6360 else |
|
6361 masm.store32(Imm32(exitIndex), Address(StackPointer, i->offsetFromArgBase())); |
|
6362 i++; |
|
6363 |
|
6364 // argument 2: argc |
|
6365 unsigned argc = exit.sig().args().length(); |
|
6366 if (i->kind() == ABIArg::GPR) |
|
6367 masm.mov(ImmWord(argc), i->gpr()); |
|
6368 else |
|
6369 masm.store32(Imm32(argc), Address(StackPointer, i->offsetFromArgBase())); |
|
6370 i++; |
|
6371 |
|
6372 // argument 3: argv |
|
6373 Address argv(StackPointer, offsetToArgv); |
|
6374 if (i->kind() == ABIArg::GPR) { |
|
6375 masm.computeEffectiveAddress(argv, i->gpr()); |
|
6376 } else { |
|
6377 masm.computeEffectiveAddress(argv, scratch); |
|
6378 masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase())); |
|
6379 } |
|
6380 i++; |
|
6381 JS_ASSERT(i.done()); |
|
6382 |
|
6383 // Make the call, test whether it succeeded, and extract the return value. |
|
6384 AssertStackAlignment(masm); |
|
6385 switch (exit.sig().retType().which()) { |
|
6386 case RetType::Void: |
|
6387 masm.callExit(AsmJSImm_InvokeFromAsmJS_Ignore, i.stackBytesConsumedSoFar()); |
|
6388 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); |
|
6389 break; |
|
6390 case RetType::Signed: |
|
6391 masm.callExit(AsmJSImm_InvokeFromAsmJS_ToInt32, i.stackBytesConsumedSoFar()); |
|
6392 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); |
|
6393 masm.unboxInt32(argv, ReturnReg); |
|
6394 break; |
|
6395 case RetType::Double: |
|
6396 masm.callExit(AsmJSImm_InvokeFromAsmJS_ToNumber, i.stackBytesConsumedSoFar()); |
|
6397 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); |
|
6398 masm.loadDouble(argv, ReturnFloatReg); |
|
6399 break; |
|
6400 case RetType::Float: |
|
6401 MOZ_ASSUME_UNREACHABLE("Float32 shouldn't be returned from a FFI"); |
|
6402 break; |
|
6403 } |
|
6404 |
|
6405 // Note: the caller is IonMonkey code which means there are no non-volatile |
|
6406 // registers to restore. |
|
6407 masm.freeStack(stackDec); |
|
6408 masm.ret(); |
|
6409 } |
|
6410 |
|
6411 static void |
|
6412 GenerateOOLConvert(ModuleCompiler &m, RetType retType, Label *throwLabel) |
|
6413 { |
|
6414 MacroAssembler &masm = m.masm(); |
|
6415 |
|
6416 MIRType typeArray[] = { MIRType_Pointer, // cx |
|
6417 MIRType_Pointer }; // argv |
|
6418 MIRTypeVector callArgTypes(m.cx()); |
|
6419 callArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray)); |
|
6420 |
|
6421 // The stack is assumed to be aligned. The frame is allocated by GenerateFFIIonExit and |
|
6422 // the stack usage here needs to kept in sync with GenerateFFIIonExit. |
|
6423 |
|
6424 // Store value |
|
6425 unsigned offsetToArgv = StackArgBytes(callArgTypes) + MaybeRetAddr; |
|
6426 masm.storeValue(JSReturnOperand, Address(StackPointer, offsetToArgv)); |
|
6427 |
|
6428 Register scratch = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6429 Register activation = ABIArgGenerator::NonArgReturnVolatileReg1; |
|
6430 LoadAsmJSActivationIntoRegister(masm, activation); |
|
6431 |
|
6432 // Record sp in the AsmJSActivation for stack-walking. |
|
6433 masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfExitSP())); |
|
6434 |
|
6435 // Store real arguments |
|
6436 ABIArgMIRTypeIter i(callArgTypes); |
|
6437 |
|
6438 // argument 0: cx |
|
6439 if (i->kind() == ABIArg::GPR) { |
|
6440 LoadJSContextFromActivation(masm, activation, i->gpr()); |
|
6441 } else { |
|
6442 LoadJSContextFromActivation(masm, activation, scratch); |
|
6443 masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase())); |
|
6444 } |
|
6445 i++; |
|
6446 |
|
6447 // argument 1: argv |
|
6448 Address argv(StackPointer, offsetToArgv); |
|
6449 if (i->kind() == ABIArg::GPR) { |
|
6450 masm.computeEffectiveAddress(argv, i->gpr()); |
|
6451 } else { |
|
6452 masm.computeEffectiveAddress(argv, scratch); |
|
6453 masm.storePtr(scratch, Address(StackPointer, i->offsetFromArgBase())); |
|
6454 } |
|
6455 i++; |
|
6456 JS_ASSERT(i.done()); |
|
6457 |
|
6458 // Call |
|
6459 AssertStackAlignment(masm); |
|
6460 switch (retType.which()) { |
|
6461 case RetType::Signed: |
|
6462 masm.callExit(AsmJSImm_CoerceInPlace_ToInt32, i.stackBytesConsumedSoFar()); |
|
6463 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); |
|
6464 masm.unboxInt32(Address(StackPointer, offsetToArgv), ReturnReg); |
|
6465 break; |
|
6466 case RetType::Double: |
|
6467 masm.callExit(AsmJSImm_CoerceInPlace_ToNumber, i.stackBytesConsumedSoFar()); |
|
6468 masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); |
|
6469 masm.loadDouble(Address(StackPointer, offsetToArgv), ReturnFloatReg); |
|
6470 break; |
|
6471 default: |
|
6472 MOZ_ASSUME_UNREACHABLE("Unsupported convert type"); |
|
6473 } |
|
6474 } |
|
6475 |
|
6476 static void |
|
6477 GenerateFFIIonExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit, |
|
6478 unsigned exitIndex, Label *throwLabel) |
|
6479 { |
|
6480 MacroAssembler &masm = m.masm(); |
|
6481 masm.align(CodeAlignment); |
|
6482 m.setIonExitOffset(exitIndex); |
|
6483 masm.setFramePushed(0); |
|
6484 |
|
6485 #if defined(JS_CODEGEN_X64) |
|
6486 masm.Push(HeapReg); |
|
6487 #elif defined(JS_CODEGEN_ARM) |
|
6488 // The lr register holds the return address and needs to be saved. The GlobalReg |
|
6489 // (r10) and HeapReg (r11) also need to be restored before returning to asm.js code. |
|
6490 // The NANReg also needs to be restored, but is a constant and is reloaded before |
|
6491 // returning to asm.js code. |
|
6492 masm.PushRegsInMask(RegisterSet(GeneralRegisterSet((1<<GlobalReg.code()) | |
|
6493 (1<<HeapReg.code()) | |
|
6494 (1<<lr.code())), |
|
6495 FloatRegisterSet(uint32_t(0)))); |
|
6496 #endif |
|
6497 |
|
6498 // The stack frame is used for the call into Ion and also for calls into C for OOL |
|
6499 // conversion of the result. A frame large enough for both is allocated. |
|
6500 // |
|
6501 // Arguments to the Ion function are in the following order on the stack: |
|
6502 // | return address | descriptor | callee | argc | this | arg1 | arg2 | ... |
|
6503 unsigned argBytes = 3 * sizeof(size_t) + (1 + exit.sig().args().length()) * sizeof(Value); |
|
6504 unsigned offsetToArgs = MaybeRetAddr; |
|
6505 unsigned stackDecForIonCall = StackDecrementForCall(masm, argBytes + offsetToArgs); |
|
6506 |
|
6507 // Reserve space for a call to AsmJSImm_CoerceInPlace_* and an array of values used by |
|
6508 // OOLConvert which reuses the same frame. This code needs to be kept in sync with the |
|
6509 // stack usage in GenerateOOLConvert. |
|
6510 MIRType typeArray[] = { MIRType_Pointer, MIRType_Pointer }; // cx, argv |
|
6511 MIRTypeVector callArgTypes(m.cx()); |
|
6512 callArgTypes.infallibleAppend(typeArray, ArrayLength(typeArray)); |
|
6513 unsigned oolExtraBytes = sizeof(Value) + MaybeRetAddr; |
|
6514 unsigned stackDecForOOLCall = StackDecrementForCall(masm, callArgTypes, oolExtraBytes); |
|
6515 |
|
6516 // Allocate a frame large enough for both of the above calls. |
|
6517 unsigned stackDec = Max(stackDecForIonCall, stackDecForOOLCall); |
|
6518 |
|
6519 masm.reserveStack(stackDec); |
|
6520 AssertStackAlignment(masm); |
|
6521 |
|
6522 // 1. Descriptor |
|
6523 size_t argOffset = offsetToArgs; |
|
6524 uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_Entry); |
|
6525 masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(StackPointer, argOffset)); |
|
6526 argOffset += sizeof(size_t); |
|
6527 |
|
6528 // 2. Callee |
|
6529 Register callee = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6530 Register scratch = ABIArgGenerator::NonArgReturnVolatileReg1; |
|
6531 |
|
6532 // 2.1. Get ExitDatum |
|
6533 unsigned globalDataOffset = m.module().exitIndexToGlobalDataOffset(exitIndex); |
|
6534 #if defined(JS_CODEGEN_X64) |
|
6535 CodeOffsetLabel label2 = masm.leaRipRelative(callee); |
|
6536 m.masm().append(AsmJSGlobalAccess(label2.offset(), globalDataOffset)); |
|
6537 #elif defined(JS_CODEGEN_X86) |
|
6538 CodeOffsetLabel label2 = masm.movlWithPatch(Imm32(0), callee); |
|
6539 m.masm().append(AsmJSGlobalAccess(label2.offset(), globalDataOffset)); |
|
6540 #else |
|
6541 masm.lea(Operand(GlobalReg, globalDataOffset), callee); |
|
6542 #endif |
|
6543 |
|
6544 // 2.2. Get callee |
|
6545 masm.loadPtr(Address(callee, offsetof(AsmJSModule::ExitDatum, fun)), callee); |
|
6546 |
|
6547 // 2.3. Save callee |
|
6548 masm.storePtr(callee, Address(StackPointer, argOffset)); |
|
6549 argOffset += sizeof(size_t); |
|
6550 |
|
6551 // 3. Argc |
|
6552 unsigned argc = exit.sig().args().length(); |
|
6553 masm.storePtr(ImmWord(uintptr_t(argc)), Address(StackPointer, argOffset)); |
|
6554 argOffset += sizeof(size_t); |
|
6555 |
|
6556 // 4. |this| value |
|
6557 masm.storeValue(UndefinedValue(), Address(StackPointer, argOffset)); |
|
6558 argOffset += sizeof(Value); |
|
6559 |
|
6560 // 5. Fill the arguments |
|
6561 unsigned offsetToCallerStackArgs = masm.framePushed(); |
|
6562 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) |
|
6563 offsetToCallerStackArgs += NativeFrameSize; |
|
6564 #endif |
|
6565 FillArgumentArray(m, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch); |
|
6566 argOffset += exit.sig().args().length() * sizeof(Value); |
|
6567 JS_ASSERT(argOffset == offsetToArgs + argBytes); |
|
6568 |
|
6569 // Get the pointer to the ion code |
|
6570 Label done, oolConvert; |
|
6571 Label *maybeDebugBreakpoint = nullptr; |
|
6572 |
|
6573 #ifdef DEBUG |
|
6574 Label ionFailed; |
|
6575 maybeDebugBreakpoint = &ionFailed; |
|
6576 masm.branchIfFunctionHasNoScript(callee, &ionFailed); |
|
6577 #endif |
|
6578 |
|
6579 masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee); |
|
6580 masm.loadBaselineOrIonNoArgCheck(callee, callee, SequentialExecution, maybeDebugBreakpoint); |
|
6581 |
|
6582 AssertStackAlignment(masm); |
|
6583 |
|
6584 { |
|
6585 // Enable Activation. |
|
6586 // |
|
6587 // This sequence requires four registers, and needs to preserve the 'callee' |
|
6588 // register, so there are five live registers. |
|
6589 JS_ASSERT(callee == AsmJSIonExitRegCallee); |
|
6590 Register reg0 = AsmJSIonExitRegE0; |
|
6591 Register reg1 = AsmJSIonExitRegE1; |
|
6592 Register reg2 = AsmJSIonExitRegE2; |
|
6593 Register reg3 = AsmJSIonExitRegE3; |
|
6594 |
|
6595 LoadAsmJSActivationIntoRegister(masm, reg0); |
|
6596 |
|
6597 // Record sp in the AsmJSActivation for stack-walking. |
|
6598 masm.storePtr(StackPointer, Address(reg0, AsmJSActivation::offsetOfExitSP())); |
|
6599 |
|
6600 // The following is inlined: |
|
6601 // JSContext *cx = activation->cx(); |
|
6602 // Activation *act = cx->mainThread().activation(); |
|
6603 // act.active_ = true; |
|
6604 // act.prevIonTop_ = cx->mainThread().ionTop; |
|
6605 // act.prevJitJSContext_ = cx->mainThread().jitJSContext; |
|
6606 // cx->mainThread().jitJSContext = cx; |
|
6607 // On the ARM store8() uses the secondScratchReg (lr) as a temp. |
|
6608 size_t offsetOfActivation = offsetof(JSRuntime, mainThread) + |
|
6609 PerThreadData::offsetOfActivation(); |
|
6610 size_t offsetOfIonTop = offsetof(JSRuntime, mainThread) + offsetof(PerThreadData, ionTop); |
|
6611 size_t offsetOfJitJSContext = offsetof(JSRuntime, mainThread) + |
|
6612 offsetof(PerThreadData, jitJSContext); |
|
6613 masm.loadPtr(Address(reg0, AsmJSActivation::offsetOfContext()), reg3); |
|
6614 masm.loadPtr(Address(reg3, JSContext::offsetOfRuntime()), reg0); |
|
6615 masm.loadPtr(Address(reg0, offsetOfActivation), reg1); |
|
6616 masm.store8(Imm32(1), Address(reg1, JitActivation::offsetOfActiveUint8())); |
|
6617 masm.loadPtr(Address(reg0, offsetOfIonTop), reg2); |
|
6618 masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevIonTop())); |
|
6619 masm.loadPtr(Address(reg0, offsetOfJitJSContext), reg2); |
|
6620 masm.storePtr(reg2, Address(reg1, JitActivation::offsetOfPrevJitJSContext())); |
|
6621 masm.storePtr(reg3, Address(reg0, offsetOfJitJSContext)); |
|
6622 } |
|
6623 |
|
6624 // 2. Call |
|
6625 AssertStackAlignment(masm); |
|
6626 masm.callIonFromAsmJS(callee); |
|
6627 AssertStackAlignment(masm); |
|
6628 |
|
6629 { |
|
6630 // Disable Activation. |
|
6631 // |
|
6632 // This sequence needs three registers, and must preserve the JSReturnReg_Data and |
|
6633 // JSReturnReg_Type, so there are five live registers. |
|
6634 JS_ASSERT(JSReturnReg_Data == AsmJSIonExitRegReturnData); |
|
6635 JS_ASSERT(JSReturnReg_Type == AsmJSIonExitRegReturnType); |
|
6636 Register reg0 = AsmJSIonExitRegD0; |
|
6637 Register reg1 = AsmJSIonExitRegD1; |
|
6638 Register reg2 = AsmJSIonExitRegD2; |
|
6639 |
|
6640 LoadAsmJSActivationIntoRegister(masm, reg0); |
|
6641 |
|
6642 // The following is inlined: |
|
6643 // JSContext *cx = activation->cx(); |
|
6644 // Activation *act = cx->mainThread().activation(); |
|
6645 // act.active_ = false; |
|
6646 // cx->mainThread().ionTop = prevIonTop_; |
|
6647 // cx->mainThread().jitJSContext = prevJitJSContext_; |
|
6648 // On the ARM store8() uses the secondScratchReg (lr) as a temp. |
|
6649 size_t offsetOfActivation = offsetof(JSRuntime, mainThread) + |
|
6650 PerThreadData::offsetOfActivation(); |
|
6651 size_t offsetOfIonTop = offsetof(JSRuntime, mainThread) + offsetof(PerThreadData, ionTop); |
|
6652 size_t offsetOfJitJSContext = offsetof(JSRuntime, mainThread) + |
|
6653 offsetof(PerThreadData, jitJSContext); |
|
6654 masm.loadPtr(Address(reg0, AsmJSActivation::offsetOfContext()), reg0); |
|
6655 masm.loadPtr(Address(reg0, JSContext::offsetOfRuntime()), reg0); |
|
6656 masm.loadPtr(Address(reg0, offsetOfActivation), reg1); |
|
6657 masm.store8(Imm32(0), Address(reg1, JitActivation::offsetOfActiveUint8())); |
|
6658 masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevIonTop()), reg2); |
|
6659 masm.storePtr(reg2, Address(reg0, offsetOfIonTop)); |
|
6660 masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitJSContext()), reg2); |
|
6661 masm.storePtr(reg2, Address(reg0, offsetOfJitJSContext)); |
|
6662 } |
|
6663 |
|
6664 #ifdef DEBUG |
|
6665 masm.branchTestMagicValue(Assembler::Equal, JSReturnOperand, JS_ION_ERROR, throwLabel); |
|
6666 masm.branchTestMagic(Assembler::Equal, JSReturnOperand, &ionFailed); |
|
6667 #else |
|
6668 masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel); |
|
6669 #endif |
|
6670 |
|
6671 uint32_t oolConvertFramePushed = masm.framePushed(); |
|
6672 switch (exit.sig().retType().which()) { |
|
6673 case RetType::Void: |
|
6674 break; |
|
6675 case RetType::Signed: |
|
6676 masm.convertValueToInt32(JSReturnOperand, ReturnFloatReg, ReturnReg, &oolConvert, |
|
6677 /* -0 check */ false); |
|
6678 break; |
|
6679 case RetType::Double: |
|
6680 masm.convertValueToDouble(JSReturnOperand, ReturnFloatReg, &oolConvert); |
|
6681 break; |
|
6682 case RetType::Float: |
|
6683 MOZ_ASSUME_UNREACHABLE("Float shouldn't be returned from a FFI"); |
|
6684 break; |
|
6685 } |
|
6686 |
|
6687 masm.bind(&done); |
|
6688 masm.freeStack(stackDec); |
|
6689 #if defined(JS_CODEGEN_ARM) |
|
6690 masm.ma_vimm(GenericNaN(), NANReg); |
|
6691 masm.PopRegsInMask(RegisterSet(GeneralRegisterSet((1<<GlobalReg.code()) | |
|
6692 (1<<HeapReg.code()) | |
|
6693 (1<<pc.code())), |
|
6694 FloatRegisterSet(uint32_t(0)))); |
|
6695 #else |
|
6696 # if defined(JS_CODEGEN_X64) |
|
6697 masm.Pop(HeapReg); |
|
6698 # endif |
|
6699 masm.ret(); |
|
6700 #endif |
|
6701 JS_ASSERT(masm.framePushed() == 0); |
|
6702 |
|
6703 // oolConvert |
|
6704 if (oolConvert.used()) { |
|
6705 masm.bind(&oolConvert); |
|
6706 masm.setFramePushed(oolConvertFramePushed); |
|
6707 GenerateOOLConvert(m, exit.sig().retType(), throwLabel); |
|
6708 masm.setFramePushed(0); |
|
6709 masm.jump(&done); |
|
6710 } |
|
6711 |
|
6712 #ifdef DEBUG |
|
6713 masm.bind(&ionFailed); |
|
6714 masm.assumeUnreachable("AsmJS to IonMonkey call failed."); |
|
6715 #endif |
|
6716 } |
|
6717 |
|
6718 // See "asm.js FFI calls" comment above. |
|
6719 static void |
|
6720 GenerateFFIExit(ModuleCompiler &m, const ModuleCompiler::ExitDescriptor &exit, unsigned exitIndex, |
|
6721 Label *throwLabel) |
|
6722 { |
|
6723 // Generate the slow path through the interpreter |
|
6724 GenerateFFIInterpreterExit(m, exit, exitIndex, throwLabel); |
|
6725 |
|
6726 // Generate the fast path |
|
6727 GenerateFFIIonExit(m, exit, exitIndex, throwLabel); |
|
6728 } |
|
6729 |
|
6730 // The stack-overflow exit is called when the stack limit has definitely been |
|
6731 // exceeded. In this case, we can clobber everything since we are about to pop |
|
6732 // all the frames. |
|
6733 static bool |
|
6734 GenerateStackOverflowExit(ModuleCompiler &m, Label *throwLabel) |
|
6735 { |
|
6736 MacroAssembler &masm = m.masm(); |
|
6737 masm.align(CodeAlignment); |
|
6738 masm.bind(&m.stackOverflowLabel()); |
|
6739 |
|
6740 // The overflow check always occurs before the initial function-specific |
|
6741 // stack-size adjustment. See CodeGenerator::generateAsmJSPrologue. |
|
6742 masm.setFramePushed(AlignmentMidPrologue - AlignmentAtPrologue); |
|
6743 |
|
6744 MIRTypeVector argTypes(m.cx()); |
|
6745 argTypes.infallibleAppend(MIRType_Pointer); // cx |
|
6746 |
|
6747 unsigned stackDec = StackDecrementForCall(masm, argTypes, MaybeRetAddr); |
|
6748 masm.reserveStack(stackDec); |
|
6749 |
|
6750 Register activation = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6751 LoadAsmJSActivationIntoRegister(masm, activation); |
|
6752 |
|
6753 // Record sp in the AsmJSActivation for stack-walking. |
|
6754 masm.storePtr(StackPointer, Address(activation, AsmJSActivation::offsetOfExitSP())); |
|
6755 |
|
6756 ABIArgMIRTypeIter i(argTypes); |
|
6757 |
|
6758 // argument 0: cx |
|
6759 if (i->kind() == ABIArg::GPR) { |
|
6760 LoadJSContextFromActivation(masm, activation, i->gpr()); |
|
6761 } else { |
|
6762 LoadJSContextFromActivation(masm, activation, activation); |
|
6763 masm.storePtr(activation, Address(StackPointer, i->offsetFromArgBase())); |
|
6764 } |
|
6765 i++; |
|
6766 |
|
6767 JS_ASSERT(i.done()); |
|
6768 |
|
6769 AssertStackAlignment(masm); |
|
6770 masm.callExit(AsmJSImm_ReportOverRecursed, i.stackBytesConsumedSoFar()); |
|
6771 |
|
6772 // Don't worry about restoring the stack; throwLabel will pop everything. |
|
6773 masm.jump(throwLabel); |
|
6774 return !masm.oom(); |
|
6775 } |
|
6776 |
|
6777 // The operation-callback exit is called from arbitrarily-interrupted asm.js |
|
6778 // code. That means we must first save *all* registers and restore *all* |
|
6779 // registers (except the stack pointer) when we resume. The address to resume to |
|
6780 // (assuming that js::HandleExecutionInterrupt doesn't indicate that the |
|
6781 // execution should be aborted) is stored in AsmJSActivation::resumePC_. |
|
6782 // Unfortunately, loading this requires a scratch register which we don't have |
|
6783 // after restoring all registers. To hack around this, push the resumePC on the |
|
6784 // stack so that it can be popped directly into PC. |
|
6785 static bool |
|
6786 GenerateInterruptExit(ModuleCompiler &m, Label *throwLabel) |
|
6787 { |
|
6788 MacroAssembler &masm = m.masm(); |
|
6789 masm.align(CodeAlignment); |
|
6790 masm.bind(&m.interruptLabel()); |
|
6791 |
|
6792 #ifndef JS_CODEGEN_ARM |
|
6793 // Be very careful here not to perturb the machine state before saving it |
|
6794 // to the stack. In particular, add/sub instructions may set conditions in |
|
6795 // the flags register. |
|
6796 masm.push(Imm32(0)); // space for resumePC |
|
6797 masm.pushFlags(); // after this we are safe to use sub |
|
6798 masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below |
|
6799 masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP) |
|
6800 |
|
6801 Register activation = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6802 Register scratch = ABIArgGenerator::NonArgReturnVolatileReg1; |
|
6803 |
|
6804 // Store resumePC into the reserved space. |
|
6805 LoadAsmJSActivationIntoRegister(masm, activation); |
|
6806 masm.loadPtr(Address(activation, AsmJSActivation::offsetOfResumePC()), scratch); |
|
6807 masm.storePtr(scratch, Address(StackPointer, masm.framePushed() + sizeof(void*))); |
|
6808 |
|
6809 // We know that StackPointer is word-aligned, but not necessarily |
|
6810 // stack-aligned, so we need to align it dynamically. |
|
6811 masm.mov(StackPointer, ABIArgGenerator::NonVolatileReg); |
|
6812 #if defined(JS_CODEGEN_X86) |
|
6813 // Ensure that at least one slot is pushed for passing 'cx' below. |
|
6814 masm.push(Imm32(0)); |
|
6815 #endif |
|
6816 masm.andPtr(Imm32(~(StackAlignment - 1)), StackPointer); |
|
6817 if (ShadowStackSpace) |
|
6818 masm.subPtr(Imm32(ShadowStackSpace), StackPointer); |
|
6819 |
|
6820 // argument 0: cx |
|
6821 #if defined(JS_CODEGEN_X86) |
|
6822 LoadJSContextFromActivation(masm, activation, scratch); |
|
6823 masm.storePtr(scratch, Address(StackPointer, 0)); |
|
6824 #elif defined(JS_CODEGEN_X64) |
|
6825 LoadJSContextFromActivation(masm, activation, IntArgReg0); |
|
6826 #endif |
|
6827 |
|
6828 masm.call(AsmJSImm_HandleExecutionInterrupt); |
|
6829 masm.branchIfFalseBool(ReturnReg, throwLabel); |
|
6830 |
|
6831 // Restore the StackPointer to it's position before the call. |
|
6832 masm.mov(ABIArgGenerator::NonVolatileReg, StackPointer); |
|
6833 |
|
6834 // Restore the machine state to before the interrupt. |
|
6835 masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP) |
|
6836 masm.popFlags(); // after this, nothing that sets conditions |
|
6837 masm.ret(); // pop resumePC into PC |
|
6838 #else |
|
6839 masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below |
|
6840 masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)), FloatRegisterSet(uint32_t(0)))); // save all GP registers,excep sp |
|
6841 |
|
6842 // Save both the APSR and FPSCR in non-volatile registers. |
|
6843 masm.as_mrs(r4); |
|
6844 masm.as_vmrs(r5); |
|
6845 // Save the stack pointer in a non-volatile register. |
|
6846 masm.mov(sp,r6); |
|
6847 // Align the stack. |
|
6848 masm.ma_and(Imm32(~7), sp, sp); |
|
6849 |
|
6850 // Store resumePC into the return PC stack slot. |
|
6851 LoadAsmJSActivationIntoRegister(masm, IntArgReg0); |
|
6852 masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfResumePC()), IntArgReg1); |
|
6853 masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*))); |
|
6854 |
|
6855 // argument 0: cx |
|
6856 masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfContext()), IntArgReg0); |
|
6857 |
|
6858 masm.PushRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllMask))); // save all FP registers |
|
6859 masm.call(AsmJSImm_HandleExecutionInterrupt); |
|
6860 masm.branchIfFalseBool(ReturnReg, throwLabel); |
|
6861 |
|
6862 // Restore the machine state to before the interrupt. this will set the pc! |
|
6863 masm.PopRegsInMask(RegisterSet(GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllMask))); // restore all FP registers |
|
6864 masm.mov(r6,sp); |
|
6865 masm.as_vmsr(r5); |
|
6866 masm.as_msr(r4); |
|
6867 // Restore all GP registers |
|
6868 masm.startDataTransferM(IsLoad, sp, IA, WriteBack); |
|
6869 masm.transferReg(r0); |
|
6870 masm.transferReg(r1); |
|
6871 masm.transferReg(r2); |
|
6872 masm.transferReg(r3); |
|
6873 masm.transferReg(r4); |
|
6874 masm.transferReg(r5); |
|
6875 masm.transferReg(r6); |
|
6876 masm.transferReg(r7); |
|
6877 masm.transferReg(r8); |
|
6878 masm.transferReg(r9); |
|
6879 masm.transferReg(r10); |
|
6880 masm.transferReg(r11); |
|
6881 masm.transferReg(r12); |
|
6882 masm.transferReg(lr); |
|
6883 masm.finishDataTransfer(); |
|
6884 masm.ret(); |
|
6885 |
|
6886 #endif |
|
6887 |
|
6888 return !masm.oom(); |
|
6889 } |
|
6890 |
|
6891 // If an exception is thrown, simply pop all frames (since asm.js does not |
|
6892 // contain try/catch). To do this: |
|
6893 // 1. Restore 'sp' to it's value right after the PushRegsInMask in GenerateEntry. |
|
6894 // 2. PopRegsInMask to restore the caller's non-volatile registers. |
|
6895 // 3. Return (to CallAsmJS). |
|
6896 static bool |
|
6897 GenerateThrowExit(ModuleCompiler &m, Label *throwLabel) |
|
6898 { |
|
6899 MacroAssembler &masm = m.masm(); |
|
6900 masm.align(CodeAlignment); |
|
6901 masm.bind(throwLabel); |
|
6902 |
|
6903 Register activation = ABIArgGenerator::NonArgReturnVolatileReg0; |
|
6904 LoadAsmJSActivationIntoRegister(masm, activation); |
|
6905 |
|
6906 masm.setFramePushed(FramePushedAfterSave); |
|
6907 masm.loadPtr(Address(activation, AsmJSActivation::offsetOfErrorRejoinSP()), StackPointer); |
|
6908 |
|
6909 masm.PopRegsInMask(NonVolatileRegs); |
|
6910 JS_ASSERT(masm.framePushed() == 0); |
|
6911 |
|
6912 masm.mov(ImmWord(0), ReturnReg); |
|
6913 masm.abiret(); |
|
6914 |
|
6915 return !masm.oom(); |
|
6916 } |
|
6917 |
|
6918 static bool |
|
6919 GenerateStubs(ModuleCompiler &m) |
|
6920 { |
|
6921 for (unsigned i = 0; i < m.module().numExportedFunctions(); i++) { |
|
6922 m.setEntryOffset(i); |
|
6923 if (!GenerateEntry(m, m.module().exportedFunction(i))) |
|
6924 return false; |
|
6925 if (m.masm().oom()) |
|
6926 return false; |
|
6927 } |
|
6928 |
|
6929 Label throwLabel; |
|
6930 |
|
6931 // The order of the iterations here is non-deterministic, since |
|
6932 // m.allExits() is a hash keyed by pointer values! |
|
6933 for (ModuleCompiler::ExitMap::Range r = m.allExits(); !r.empty(); r.popFront()) { |
|
6934 GenerateFFIExit(m, r.front().key(), r.front().value(), &throwLabel); |
|
6935 if (m.masm().oom()) |
|
6936 return false; |
|
6937 } |
|
6938 |
|
6939 if (m.stackOverflowLabel().used()) { |
|
6940 if (!GenerateStackOverflowExit(m, &throwLabel)) |
|
6941 return false; |
|
6942 } |
|
6943 |
|
6944 if (!GenerateInterruptExit(m, &throwLabel)) |
|
6945 return false; |
|
6946 |
|
6947 if (!GenerateThrowExit(m, &throwLabel)) |
|
6948 return false; |
|
6949 |
|
6950 return true; |
|
6951 } |
|
6952 |
|
6953 static bool |
|
6954 FinishModule(ModuleCompiler &m, |
|
6955 ScopedJSDeletePtr<AsmJSModule> *module) |
|
6956 { |
|
6957 LifoAlloc lifo(LIFO_ALLOC_PRIMARY_CHUNK_SIZE); |
|
6958 TempAllocator alloc(&lifo); |
|
6959 IonContext ionContext(m.cx(), &alloc); |
|
6960 |
|
6961 m.masm().resetForNewCodeGenerator(alloc); |
|
6962 |
|
6963 if (!GenerateStubs(m)) |
|
6964 return false; |
|
6965 |
|
6966 return m.finish(module); |
|
6967 } |
|
6968 |
|
6969 static bool |
|
6970 CheckModule(ExclusiveContext *cx, AsmJSParser &parser, ParseNode *stmtList, |
|
6971 ScopedJSDeletePtr<AsmJSModule> *moduleOut, |
|
6972 ScopedJSFreePtr<char> *compilationTimeReport) |
|
6973 { |
|
6974 if (!LookupAsmJSModuleInCache(cx, parser, moduleOut, compilationTimeReport)) |
|
6975 return false; |
|
6976 if (*moduleOut) |
|
6977 return true; |
|
6978 |
|
6979 ModuleCompiler m(cx, parser); |
|
6980 if (!m.init()) |
|
6981 return false; |
|
6982 |
|
6983 if (PropertyName *moduleFunctionName = FunctionName(m.moduleFunctionNode())) { |
|
6984 if (!CheckModuleLevelName(m, m.moduleFunctionNode(), moduleFunctionName)) |
|
6985 return false; |
|
6986 m.initModuleFunctionName(moduleFunctionName); |
|
6987 } |
|
6988 |
|
6989 if (!CheckFunctionHead(m, m.moduleFunctionNode())) |
|
6990 return false; |
|
6991 |
|
6992 if (!CheckModuleArguments(m, m.moduleFunctionNode())) |
|
6993 return false; |
|
6994 |
|
6995 if (!CheckPrecedingStatements(m, stmtList)) |
|
6996 return false; |
|
6997 |
|
6998 if (!CheckModuleGlobals(m)) |
|
6999 return false; |
|
7000 |
|
7001 #ifdef JS_THREADSAFE |
|
7002 if (!CheckFunctionsParallel(m)) |
|
7003 return false; |
|
7004 #else |
|
7005 if (!CheckFunctionsSequential(m)) |
|
7006 return false; |
|
7007 #endif |
|
7008 |
|
7009 m.finishFunctionBodies(); |
|
7010 |
|
7011 if (!CheckFuncPtrTables(m)) |
|
7012 return false; |
|
7013 |
|
7014 if (!CheckModuleReturn(m)) |
|
7015 return false; |
|
7016 |
|
7017 TokenKind tk = PeekToken(m.parser()); |
|
7018 if (tk != TOK_EOF && tk != TOK_RC) |
|
7019 return m.fail(nullptr, "top-level export (return) must be the last statement"); |
|
7020 |
|
7021 // The instruction cache is flushed when dynamically linking, so can inhibit now. |
|
7022 AutoFlushICache afc("CheckModule", /* inhibit= */ true); |
|
7023 |
|
7024 ScopedJSDeletePtr<AsmJSModule> module; |
|
7025 if (!FinishModule(m, &module)) |
|
7026 return false; |
|
7027 |
|
7028 bool storedInCache = StoreAsmJSModuleInCache(parser, *module, cx); |
|
7029 module->staticallyLink(cx); |
|
7030 |
|
7031 m.buildCompilationTimeReport(storedInCache, compilationTimeReport); |
|
7032 *moduleOut = module.forget(); |
|
7033 return true; |
|
7034 } |
|
7035 |
|
7036 static bool |
|
7037 Warn(AsmJSParser &parser, int errorNumber, const char *str) |
|
7038 { |
|
7039 parser.reportNoOffset(ParseWarning, /* strict = */ false, errorNumber, str ? str : ""); |
|
7040 return false; |
|
7041 } |
|
7042 |
|
7043 static bool |
|
7044 EstablishPreconditions(ExclusiveContext *cx, AsmJSParser &parser) |
|
7045 { |
|
7046 if (!cx->jitSupportsFloatingPoint()) |
|
7047 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by lack of floating point support"); |
|
7048 |
|
7049 if (!cx->signalHandlersInstalled()) |
|
7050 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Platform missing signal handler support"); |
|
7051 |
|
7052 if (cx->gcSystemPageSize() != AsmJSPageSize) |
|
7053 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by non 4KiB system page size"); |
|
7054 |
|
7055 if (!parser.options().asmJSOption) |
|
7056 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by javascript.options.asmjs in about:config"); |
|
7057 |
|
7058 if (!parser.options().compileAndGo) |
|
7059 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Temporarily disabled for event-handler and other cloneable scripts"); |
|
7060 |
|
7061 if (cx->compartment()->debugMode()) |
|
7062 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by debugger"); |
|
7063 |
|
7064 if (parser.pc->isGenerator()) |
|
7065 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by generator context"); |
|
7066 |
|
7067 if (parser.pc->isArrowFunction()) |
|
7068 return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by arrow function context"); |
|
7069 |
|
7070 #ifdef JS_THREADSAFE |
|
7071 if (ParallelCompilationEnabled(cx)) |
|
7072 EnsureWorkerThreadsInitialized(cx); |
|
7073 #endif |
|
7074 |
|
7075 return true; |
|
7076 } |
|
7077 |
|
7078 static bool |
|
7079 NoExceptionPending(ExclusiveContext *cx) |
|
7080 { |
|
7081 return !cx->isJSContext() || !cx->asJSContext()->isExceptionPending(); |
|
7082 } |
|
7083 |
|
7084 bool |
|
7085 js::CompileAsmJS(ExclusiveContext *cx, AsmJSParser &parser, ParseNode *stmtList, bool *validated) |
|
7086 { |
|
7087 *validated = false; |
|
7088 |
|
7089 if (!EstablishPreconditions(cx, parser)) |
|
7090 return NoExceptionPending(cx); |
|
7091 |
|
7092 ScopedJSFreePtr<char> compilationTimeReport; |
|
7093 ScopedJSDeletePtr<AsmJSModule> module; |
|
7094 if (!CheckModule(cx, parser, stmtList, &module, &compilationTimeReport)) |
|
7095 return NoExceptionPending(cx); |
|
7096 |
|
7097 RootedObject moduleObj(cx, AsmJSModuleObject::create(cx, &module)); |
|
7098 if (!moduleObj) |
|
7099 return false; |
|
7100 |
|
7101 FunctionBox *funbox = parser.pc->maybeFunction->pn_funbox; |
|
7102 RootedFunction moduleFun(cx, NewAsmJSModuleFunction(cx, funbox->function(), moduleObj)); |
|
7103 if (!moduleFun) |
|
7104 return false; |
|
7105 |
|
7106 JS_ASSERT(funbox->function()->isInterpreted()); |
|
7107 funbox->object = moduleFun; |
|
7108 |
|
7109 *validated = true; |
|
7110 Warn(parser, JSMSG_USE_ASM_TYPE_OK, compilationTimeReport.get()); |
|
7111 return NoExceptionPending(cx); |
|
7112 } |
|
7113 |
|
7114 bool |
|
7115 js::IsAsmJSCompilationAvailable(JSContext *cx, unsigned argc, Value *vp) |
|
7116 { |
|
7117 CallArgs args = CallArgsFromVp(argc, vp); |
|
7118 |
|
7119 // See EstablishPreconditions. |
|
7120 bool available = cx->jitSupportsFloatingPoint() && |
|
7121 cx->signalHandlersInstalled() && |
|
7122 cx->gcSystemPageSize() == AsmJSPageSize && |
|
7123 !cx->compartment()->debugMode() && |
|
7124 cx->runtime()->options().asmJS(); |
|
7125 |
|
7126 args.rval().set(BooleanValue(available)); |
|
7127 return true; |
|
7128 } |