|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #ifndef jit_LIR_h |
|
8 #define jit_LIR_h |
|
9 |
|
10 // This file declares the core data structures for LIR: storage allocations for |
|
11 // inputs and outputs, as well as the interface instructions must conform to. |
|
12 |
|
13 #include "mozilla/Array.h" |
|
14 |
|
15 #include "jit/Bailouts.h" |
|
16 #include "jit/InlineList.h" |
|
17 #include "jit/IonAllocPolicy.h" |
|
18 #include "jit/LOpcodes.h" |
|
19 #include "jit/MIR.h" |
|
20 #include "jit/MIRGraph.h" |
|
21 #include "jit/Registers.h" |
|
22 #include "jit/Safepoints.h" |
|
23 |
|
24 namespace js { |
|
25 namespace jit { |
|
26 |
|
27 class LUse; |
|
28 class LGeneralReg; |
|
29 class LFloatReg; |
|
30 class LStackSlot; |
|
31 class LArgument; |
|
32 class LConstantIndex; |
|
33 class MBasicBlock; |
|
34 class MTableSwitch; |
|
35 class MIRGenerator; |
|
36 class MSnapshot; |
|
37 |
|
38 static const uint32_t VREG_INCREMENT = 1; |
|
39 |
|
40 static const uint32_t THIS_FRAME_ARGSLOT = 0; |
|
41 |
|
42 #if defined(JS_NUNBOX32) |
|
43 # define BOX_PIECES 2 |
|
44 static const uint32_t VREG_TYPE_OFFSET = 0; |
|
45 static const uint32_t VREG_DATA_OFFSET = 1; |
|
46 static const uint32_t TYPE_INDEX = 0; |
|
47 static const uint32_t PAYLOAD_INDEX = 1; |
|
48 #elif defined(JS_PUNBOX64) |
|
49 # define BOX_PIECES 1 |
|
50 #else |
|
51 # error "Unknown!" |
|
52 #endif |
|
53 |
|
54 // Represents storage for an operand. For constants, the pointer is tagged |
|
55 // with a single bit, and the untagged pointer is a pointer to a Value. |
|
56 class LAllocation : public TempObject |
|
57 { |
|
58 uintptr_t bits_; |
|
59 |
|
60 static const uintptr_t TAG_BIT = 1; |
|
61 static const uintptr_t TAG_SHIFT = 0; |
|
62 static const uintptr_t TAG_MASK = 1 << TAG_SHIFT; |
|
63 static const uintptr_t KIND_BITS = 3; |
|
64 static const uintptr_t KIND_SHIFT = TAG_SHIFT + TAG_BIT; |
|
65 static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1; |
|
66 |
|
67 protected: |
|
68 static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS - TAG_BIT; |
|
69 static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS; |
|
70 static const uintptr_t DATA_MASK = (1 << DATA_BITS) - 1; |
|
71 |
|
72 public: |
|
73 enum Kind { |
|
74 USE, // Use of a virtual register, with physical allocation policy. |
|
75 CONSTANT_VALUE, // Constant js::Value. |
|
76 CONSTANT_INDEX, // Constant arbitrary index. |
|
77 GPR, // General purpose register. |
|
78 FPU, // Floating-point register. |
|
79 STACK_SLOT, // Stack slot. |
|
80 ARGUMENT_SLOT // Argument slot. |
|
81 }; |
|
82 |
|
83 protected: |
|
84 bool isTagged() const { |
|
85 return !!(bits_ & TAG_MASK); |
|
86 } |
|
87 |
|
88 int32_t data() const { |
|
89 return int32_t(bits_) >> DATA_SHIFT; |
|
90 } |
|
91 void setData(int32_t data) { |
|
92 JS_ASSERT(int32_t(data) <= int32_t(DATA_MASK)); |
|
93 bits_ &= ~(DATA_MASK << DATA_SHIFT); |
|
94 bits_ |= (data << DATA_SHIFT); |
|
95 } |
|
96 void setKindAndData(Kind kind, uint32_t data) { |
|
97 JS_ASSERT(int32_t(data) <= int32_t(DATA_MASK)); |
|
98 bits_ = (uint32_t(kind) << KIND_SHIFT) | data << DATA_SHIFT; |
|
99 } |
|
100 |
|
101 LAllocation(Kind kind, uint32_t data) { |
|
102 setKindAndData(kind, data); |
|
103 } |
|
104 explicit LAllocation(Kind kind) { |
|
105 setKindAndData(kind, 0); |
|
106 } |
|
107 |
|
108 public: |
|
109 LAllocation() : bits_(0) |
|
110 { } |
|
111 |
|
112 static LAllocation *New(TempAllocator &alloc) { |
|
113 return new(alloc) LAllocation(); |
|
114 } |
|
115 template <typename T> |
|
116 static LAllocation *New(TempAllocator &alloc, const T &other) { |
|
117 return new(alloc) LAllocation(other); |
|
118 } |
|
119 |
|
120 // The value pointer must be rooted in MIR and have its low bit cleared. |
|
121 explicit LAllocation(const Value *vp) { |
|
122 bits_ = uintptr_t(vp); |
|
123 JS_ASSERT(!isTagged()); |
|
124 bits_ |= TAG_MASK; |
|
125 } |
|
126 inline explicit LAllocation(const AnyRegister ®); |
|
127 |
|
128 Kind kind() const { |
|
129 if (isTagged()) |
|
130 return CONSTANT_VALUE; |
|
131 return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); |
|
132 } |
|
133 |
|
134 bool isUse() const { |
|
135 return kind() == USE; |
|
136 } |
|
137 bool isConstant() const { |
|
138 return isConstantValue() || isConstantIndex(); |
|
139 } |
|
140 bool isConstantValue() const { |
|
141 return kind() == CONSTANT_VALUE; |
|
142 } |
|
143 bool isConstantIndex() const { |
|
144 return kind() == CONSTANT_INDEX; |
|
145 } |
|
146 bool isValue() const { |
|
147 return kind() == CONSTANT_VALUE; |
|
148 } |
|
149 bool isGeneralReg() const { |
|
150 return kind() == GPR; |
|
151 } |
|
152 bool isFloatReg() const { |
|
153 return kind() == FPU; |
|
154 } |
|
155 bool isStackSlot() const { |
|
156 return kind() == STACK_SLOT; |
|
157 } |
|
158 bool isArgument() const { |
|
159 return kind() == ARGUMENT_SLOT; |
|
160 } |
|
161 bool isRegister() const { |
|
162 return isGeneralReg() || isFloatReg(); |
|
163 } |
|
164 bool isRegister(bool needFloat) const { |
|
165 return needFloat ? isFloatReg() : isGeneralReg(); |
|
166 } |
|
167 bool isMemory() const { |
|
168 return isStackSlot() || isArgument(); |
|
169 } |
|
170 inline LUse *toUse(); |
|
171 inline const LUse *toUse() const; |
|
172 inline const LGeneralReg *toGeneralReg() const; |
|
173 inline const LFloatReg *toFloatReg() const; |
|
174 inline const LStackSlot *toStackSlot() const; |
|
175 inline const LArgument *toArgument() const; |
|
176 inline const LConstantIndex *toConstantIndex() const; |
|
177 inline AnyRegister toRegister() const; |
|
178 |
|
179 const Value *toConstant() const { |
|
180 JS_ASSERT(isConstantValue()); |
|
181 return reinterpret_cast<const Value *>(bits_ & ~TAG_MASK); |
|
182 } |
|
183 |
|
184 bool operator ==(const LAllocation &other) const { |
|
185 return bits_ == other.bits_; |
|
186 } |
|
187 |
|
188 bool operator !=(const LAllocation &other) const { |
|
189 return bits_ != other.bits_; |
|
190 } |
|
191 |
|
192 HashNumber hash() const { |
|
193 return bits_; |
|
194 } |
|
195 |
|
196 #ifdef DEBUG |
|
197 const char *toString() const; |
|
198 #else |
|
199 const char *toString() const { return "???"; } |
|
200 #endif |
|
201 |
|
202 void dump() const; |
|
203 }; |
|
204 |
|
205 class LUse : public LAllocation |
|
206 { |
|
207 static const uint32_t POLICY_BITS = 3; |
|
208 static const uint32_t POLICY_SHIFT = 0; |
|
209 static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1; |
|
210 static const uint32_t REG_BITS = 5; |
|
211 static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS; |
|
212 static const uint32_t REG_MASK = (1 << REG_BITS) - 1; |
|
213 |
|
214 // Whether the physical register for this operand may be reused for a def. |
|
215 static const uint32_t USED_AT_START_BITS = 1; |
|
216 static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS; |
|
217 static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1; |
|
218 |
|
219 public: |
|
220 // Virtual registers get the remaining 20 bits. |
|
221 static const uint32_t VREG_BITS = DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS); |
|
222 static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS; |
|
223 static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1; |
|
224 |
|
225 enum Policy { |
|
226 // Input should be in a read-only register or stack slot. |
|
227 ANY, |
|
228 |
|
229 // Input must be in a read-only register. |
|
230 REGISTER, |
|
231 |
|
232 // Input must be in a specific, read-only register. |
|
233 FIXED, |
|
234 |
|
235 // Keep the used virtual register alive, and use whatever allocation is |
|
236 // available. This is similar to ANY but hints to the register allocator |
|
237 // that it is never useful to optimize this site. |
|
238 KEEPALIVE, |
|
239 |
|
240 // For snapshot inputs, indicates that the associated instruction will |
|
241 // write this input to its output register before bailing out. |
|
242 // The register allocator may thus allocate that output register, and |
|
243 // does not need to keep the virtual register alive (alternatively, |
|
244 // this may be treated as KEEPALIVE). |
|
245 RECOVERED_INPUT |
|
246 }; |
|
247 |
|
248 void set(Policy policy, uint32_t reg, bool usedAtStart) { |
|
249 setKindAndData(USE, (policy << POLICY_SHIFT) | |
|
250 (reg << REG_SHIFT) | |
|
251 ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT)); |
|
252 } |
|
253 |
|
254 public: |
|
255 LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) { |
|
256 set(policy, 0, usedAtStart); |
|
257 setVirtualRegister(vreg); |
|
258 } |
|
259 LUse(Policy policy, bool usedAtStart = false) { |
|
260 set(policy, 0, usedAtStart); |
|
261 } |
|
262 LUse(Register reg, bool usedAtStart = false) { |
|
263 set(FIXED, reg.code(), usedAtStart); |
|
264 } |
|
265 LUse(FloatRegister reg, bool usedAtStart = false) { |
|
266 set(FIXED, reg.code(), usedAtStart); |
|
267 } |
|
268 LUse(Register reg, uint32_t virtualRegister) { |
|
269 set(FIXED, reg.code(), false); |
|
270 setVirtualRegister(virtualRegister); |
|
271 } |
|
272 LUse(FloatRegister reg, uint32_t virtualRegister) { |
|
273 set(FIXED, reg.code(), false); |
|
274 setVirtualRegister(virtualRegister); |
|
275 } |
|
276 |
|
277 void setVirtualRegister(uint32_t index) { |
|
278 JS_ASSERT(index < VREG_MASK); |
|
279 |
|
280 uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT); |
|
281 setData(old | (index << VREG_SHIFT)); |
|
282 } |
|
283 |
|
284 Policy policy() const { |
|
285 Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK); |
|
286 return policy; |
|
287 } |
|
288 uint32_t virtualRegister() const { |
|
289 uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK; |
|
290 return index; |
|
291 } |
|
292 uint32_t registerCode() const { |
|
293 JS_ASSERT(policy() == FIXED); |
|
294 return (data() >> REG_SHIFT) & REG_MASK; |
|
295 } |
|
296 bool isFixedRegister() const { |
|
297 return policy() == FIXED; |
|
298 } |
|
299 bool usedAtStart() const { |
|
300 return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK); |
|
301 } |
|
302 }; |
|
303 |
|
304 static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK; |
|
305 |
|
306 class LGeneralReg : public LAllocation |
|
307 { |
|
308 public: |
|
309 explicit LGeneralReg(Register reg) |
|
310 : LAllocation(GPR, reg.code()) |
|
311 { } |
|
312 |
|
313 Register reg() const { |
|
314 return Register::FromCode(data()); |
|
315 } |
|
316 }; |
|
317 |
|
318 class LFloatReg : public LAllocation |
|
319 { |
|
320 public: |
|
321 explicit LFloatReg(FloatRegister reg) |
|
322 : LAllocation(FPU, reg.code()) |
|
323 { } |
|
324 |
|
325 FloatRegister reg() const { |
|
326 return FloatRegister::FromCode(data()); |
|
327 } |
|
328 }; |
|
329 |
|
330 // Arbitrary constant index. |
|
331 class LConstantIndex : public LAllocation |
|
332 { |
|
333 explicit LConstantIndex(uint32_t index) |
|
334 : LAllocation(CONSTANT_INDEX, index) |
|
335 { } |
|
336 |
|
337 public: |
|
338 // Used as a placeholder for inputs that can be ignored. |
|
339 static LConstantIndex Bogus() { |
|
340 return LConstantIndex(0); |
|
341 } |
|
342 |
|
343 static LConstantIndex FromIndex(uint32_t index) { |
|
344 return LConstantIndex(index); |
|
345 } |
|
346 |
|
347 uint32_t index() const { |
|
348 return data(); |
|
349 } |
|
350 }; |
|
351 |
|
352 // Stack slots are indices into the stack. The indices are byte indices. |
|
353 class LStackSlot : public LAllocation |
|
354 { |
|
355 public: |
|
356 explicit LStackSlot(uint32_t slot) |
|
357 : LAllocation(STACK_SLOT, slot) |
|
358 { } |
|
359 |
|
360 uint32_t slot() const { |
|
361 return data(); |
|
362 } |
|
363 }; |
|
364 |
|
365 // Arguments are reverse indices into the stack. The indices are byte indices. |
|
366 class LArgument : public LAllocation |
|
367 { |
|
368 public: |
|
369 explicit LArgument(int32_t index) |
|
370 : LAllocation(ARGUMENT_SLOT, index) |
|
371 { } |
|
372 |
|
373 int32_t index() const { |
|
374 return data(); |
|
375 } |
|
376 }; |
|
377 |
|
378 // Represents storage for a definition. |
|
379 class LDefinition |
|
380 { |
|
381 // Bits containing policy, type, and virtual register. |
|
382 uint32_t bits_; |
|
383 |
|
384 // Before register allocation, this optionally contains a fixed policy. |
|
385 // Register allocation assigns this field to a physical policy if none is |
|
386 // preset. |
|
387 // |
|
388 // Right now, pre-allocated outputs are limited to the following: |
|
389 // * Physical argument stack slots. |
|
390 // * Physical registers. |
|
391 LAllocation output_; |
|
392 |
|
393 static const uint32_t TYPE_BITS = 3; |
|
394 static const uint32_t TYPE_SHIFT = 0; |
|
395 static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1; |
|
396 static const uint32_t POLICY_BITS = 2; |
|
397 static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS; |
|
398 static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1; |
|
399 |
|
400 static const uint32_t VREG_BITS = (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS); |
|
401 static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS; |
|
402 static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1; |
|
403 |
|
404 public: |
|
405 // Note that definitions, by default, are always allocated a register, |
|
406 // unless the policy specifies that an input can be re-used and that input |
|
407 // is a stack slot. |
|
408 enum Policy { |
|
409 // A random register of an appropriate class will be assigned. |
|
410 DEFAULT, |
|
411 |
|
412 // The policy is predetermined by the LAllocation attached to this |
|
413 // definition. The allocation may be: |
|
414 // * A register, which may not appear as any fixed temporary. |
|
415 // * A stack slot or argument. |
|
416 // |
|
417 // Register allocation will not modify a preset allocation. |
|
418 PRESET, |
|
419 |
|
420 // One definition per instruction must re-use the first input |
|
421 // allocation, which (for now) must be a register. |
|
422 MUST_REUSE_INPUT, |
|
423 |
|
424 // This definition's virtual register is the same as another; this is |
|
425 // for instructions which consume a register and silently define it as |
|
426 // the same register. It is not legal to use this if doing so would |
|
427 // change the type of the virtual register. |
|
428 PASSTHROUGH |
|
429 }; |
|
430 |
|
431 enum Type { |
|
432 GENERAL, // Generic, integer or pointer-width data (GPR). |
|
433 INT32, // int32 data (GPR). |
|
434 OBJECT, // Pointer that may be collected as garbage (GPR). |
|
435 SLOTS, // Slots/elements pointer that may be moved by minor GCs (GPR). |
|
436 FLOAT32, // 32-bit floating-point value (FPU). |
|
437 DOUBLE, // 64-bit floating-point value (FPU). |
|
438 #ifdef JS_NUNBOX32 |
|
439 // A type virtual register must be followed by a payload virtual |
|
440 // register, as both will be tracked as a single gcthing. |
|
441 TYPE, |
|
442 PAYLOAD |
|
443 #else |
|
444 BOX // Joined box, for punbox systems. (GPR, gcthing) |
|
445 #endif |
|
446 }; |
|
447 |
|
448 void set(uint32_t index, Type type, Policy policy) { |
|
449 JS_STATIC_ASSERT(MAX_VIRTUAL_REGISTERS <= VREG_MASK); |
|
450 bits_ = (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT); |
|
451 } |
|
452 |
|
453 public: |
|
454 LDefinition(uint32_t index, Type type, Policy policy = DEFAULT) { |
|
455 set(index, type, policy); |
|
456 } |
|
457 |
|
458 LDefinition(Type type, Policy policy = DEFAULT) { |
|
459 set(0, type, policy); |
|
460 } |
|
461 |
|
462 LDefinition(Type type, const LAllocation &a) |
|
463 : output_(a) |
|
464 { |
|
465 set(0, type, PRESET); |
|
466 } |
|
467 |
|
468 LDefinition(uint32_t index, Type type, const LAllocation &a) |
|
469 : output_(a) |
|
470 { |
|
471 set(index, type, PRESET); |
|
472 } |
|
473 |
|
474 LDefinition() : bits_(0) |
|
475 { } |
|
476 |
|
477 static LDefinition BogusTemp() { |
|
478 return LDefinition(GENERAL, LConstantIndex::Bogus()); |
|
479 } |
|
480 |
|
481 Policy policy() const { |
|
482 return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK); |
|
483 } |
|
484 Type type() const { |
|
485 return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); |
|
486 } |
|
487 bool isFloatReg() const { |
|
488 return type() == FLOAT32 || type() == DOUBLE; |
|
489 } |
|
490 uint32_t virtualRegister() const { |
|
491 return (bits_ >> VREG_SHIFT) & VREG_MASK; |
|
492 } |
|
493 LAllocation *output() { |
|
494 return &output_; |
|
495 } |
|
496 const LAllocation *output() const { |
|
497 return &output_; |
|
498 } |
|
499 bool isPreset() const { |
|
500 return policy() == PRESET; |
|
501 } |
|
502 bool isBogusTemp() const { |
|
503 return isPreset() && output()->isConstantIndex(); |
|
504 } |
|
505 void setVirtualRegister(uint32_t index) { |
|
506 JS_ASSERT(index < VREG_MASK); |
|
507 bits_ &= ~(VREG_MASK << VREG_SHIFT); |
|
508 bits_ |= index << VREG_SHIFT; |
|
509 } |
|
510 void setOutput(const LAllocation &a) { |
|
511 output_ = a; |
|
512 if (!a.isUse()) { |
|
513 bits_ &= ~(POLICY_MASK << POLICY_SHIFT); |
|
514 bits_ |= PRESET << POLICY_SHIFT; |
|
515 } |
|
516 } |
|
517 void setReusedInput(uint32_t operand) { |
|
518 output_ = LConstantIndex::FromIndex(operand); |
|
519 } |
|
520 uint32_t getReusedInput() const { |
|
521 JS_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT); |
|
522 return output_.toConstantIndex()->index(); |
|
523 } |
|
524 |
|
525 static inline Type TypeFrom(MIRType type) { |
|
526 switch (type) { |
|
527 case MIRType_Boolean: |
|
528 case MIRType_Int32: |
|
529 // The stack slot allocator doesn't currently support allocating |
|
530 // 1-byte slots, so for now we lower MIRType_Boolean into INT32. |
|
531 static_assert(sizeof(bool) <= sizeof(int32_t), "bool doesn't fit in an int32 slot"); |
|
532 return LDefinition::INT32; |
|
533 case MIRType_String: |
|
534 case MIRType_Object: |
|
535 return LDefinition::OBJECT; |
|
536 case MIRType_Double: |
|
537 return LDefinition::DOUBLE; |
|
538 case MIRType_Float32: |
|
539 return LDefinition::FLOAT32; |
|
540 #if defined(JS_PUNBOX64) |
|
541 case MIRType_Value: |
|
542 return LDefinition::BOX; |
|
543 #endif |
|
544 case MIRType_Slots: |
|
545 case MIRType_Elements: |
|
546 return LDefinition::SLOTS; |
|
547 case MIRType_Pointer: |
|
548 return LDefinition::GENERAL; |
|
549 case MIRType_ForkJoinContext: |
|
550 return LDefinition::GENERAL; |
|
551 default: |
|
552 MOZ_ASSUME_UNREACHABLE("unexpected type"); |
|
553 } |
|
554 } |
|
555 }; |
|
556 |
|
557 // Forward declarations of LIR types. |
|
558 #define LIROP(op) class L##op; |
|
559 LIR_OPCODE_LIST(LIROP) |
|
560 #undef LIROP |
|
561 |
|
562 class LSnapshot; |
|
563 class LSafepoint; |
|
564 class LInstructionVisitor; |
|
565 |
|
566 class LInstruction |
|
567 : public TempObject, |
|
568 public InlineListNode<LInstruction> |
|
569 { |
|
570 uint32_t id_; |
|
571 |
|
572 // This snapshot could be set after a ResumePoint. It is used to restart |
|
573 // from the resume point pc. |
|
574 LSnapshot *snapshot_; |
|
575 |
|
576 // Structure capturing the set of stack slots and registers which are known |
|
577 // to hold either gcthings or Values. |
|
578 LSafepoint *safepoint_; |
|
579 |
|
580 protected: |
|
581 MDefinition *mir_; |
|
582 |
|
583 LInstruction() |
|
584 : id_(0), |
|
585 snapshot_(nullptr), |
|
586 safepoint_(nullptr), |
|
587 mir_(nullptr) |
|
588 { } |
|
589 |
|
590 public: |
|
591 class InputIterator; |
|
592 enum Opcode { |
|
593 # define LIROP(name) LOp_##name, |
|
594 LIR_OPCODE_LIST(LIROP) |
|
595 # undef LIROP |
|
596 LOp_Invalid |
|
597 }; |
|
598 |
|
599 const char *opName() { |
|
600 switch (op()) { |
|
601 # define LIR_NAME_INS(name) \ |
|
602 case LOp_##name: return #name; |
|
603 LIR_OPCODE_LIST(LIR_NAME_INS) |
|
604 # undef LIR_NAME_INS |
|
605 default: |
|
606 return "Invalid"; |
|
607 } |
|
608 } |
|
609 |
|
610 // Hook for opcodes to add extra high level detail about what code will be |
|
611 // emitted for the op. |
|
612 virtual const char *extraName() const { |
|
613 return nullptr; |
|
614 } |
|
615 |
|
616 public: |
|
617 virtual Opcode op() const = 0; |
|
618 |
|
619 // Returns the number of outputs of this instruction. If an output is |
|
620 // unallocated, it is an LDefinition, defining a virtual register. |
|
621 virtual size_t numDefs() const = 0; |
|
622 virtual LDefinition *getDef(size_t index) = 0; |
|
623 virtual void setDef(size_t index, const LDefinition &def) = 0; |
|
624 |
|
625 // Returns information about operands. |
|
626 virtual size_t numOperands() const = 0; |
|
627 virtual LAllocation *getOperand(size_t index) = 0; |
|
628 virtual void setOperand(size_t index, const LAllocation &a) = 0; |
|
629 |
|
630 // Returns information about temporary registers needed. Each temporary |
|
631 // register is an LUse with a TEMPORARY policy, or a fixed register. |
|
632 virtual size_t numTemps() const = 0; |
|
633 virtual LDefinition *getTemp(size_t index) = 0; |
|
634 virtual void setTemp(size_t index, const LDefinition &a) = 0; |
|
635 |
|
636 // Returns the number of successors of this instruction, if it is a control |
|
637 // transfer instruction, or zero otherwise. |
|
638 virtual size_t numSuccessors() const = 0; |
|
639 virtual MBasicBlock *getSuccessor(size_t i) const = 0; |
|
640 virtual void setSuccessor(size_t i, MBasicBlock *successor) = 0; |
|
641 |
|
642 virtual bool isCall() const { |
|
643 return false; |
|
644 } |
|
645 uint32_t id() const { |
|
646 return id_; |
|
647 } |
|
648 void setId(uint32_t id) { |
|
649 JS_ASSERT(!id_); |
|
650 JS_ASSERT(id); |
|
651 id_ = id; |
|
652 } |
|
653 LSnapshot *snapshot() const { |
|
654 return snapshot_; |
|
655 } |
|
656 LSafepoint *safepoint() const { |
|
657 return safepoint_; |
|
658 } |
|
659 void setMir(MDefinition *mir) { |
|
660 mir_ = mir; |
|
661 } |
|
662 MDefinition *mirRaw() const { |
|
663 /* Untyped MIR for this op. Prefer mir() methods in subclasses. */ |
|
664 return mir_; |
|
665 } |
|
666 void assignSnapshot(LSnapshot *snapshot); |
|
667 void initSafepoint(TempAllocator &alloc); |
|
668 |
|
669 // For an instruction which has a MUST_REUSE_INPUT output, whether that |
|
670 // output register will be restored to its original value when bailing out. |
|
671 virtual bool recoversInput() const { |
|
672 return false; |
|
673 } |
|
674 |
|
675 virtual void dump(FILE *fp); |
|
676 void dump(); |
|
677 static void printName(FILE *fp, Opcode op); |
|
678 virtual void printName(FILE *fp); |
|
679 virtual void printOperands(FILE *fp); |
|
680 virtual void printInfo(FILE *fp) { } |
|
681 |
|
682 public: |
|
683 // Opcode testing and casts. |
|
684 # define LIROP(name) \ |
|
685 bool is##name() const { \ |
|
686 return op() == LOp_##name; \ |
|
687 } \ |
|
688 inline L##name *to##name(); |
|
689 LIR_OPCODE_LIST(LIROP) |
|
690 # undef LIROP |
|
691 |
|
692 virtual bool accept(LInstructionVisitor *visitor) = 0; |
|
693 }; |
|
694 |
|
695 class LInstructionVisitor |
|
696 { |
|
697 LInstruction *ins_; |
|
698 |
|
699 protected: |
|
700 jsbytecode *lastPC_; |
|
701 |
|
702 LInstruction *instruction() { |
|
703 return ins_; |
|
704 } |
|
705 |
|
706 public: |
|
707 void setInstruction(LInstruction *ins) { |
|
708 ins_ = ins; |
|
709 if (ins->mirRaw()) |
|
710 lastPC_ = ins->mirRaw()->trackedPc(); |
|
711 } |
|
712 |
|
713 LInstructionVisitor() |
|
714 : ins_(nullptr), |
|
715 lastPC_(nullptr) |
|
716 {} |
|
717 |
|
718 public: |
|
719 #define VISIT_INS(op) virtual bool visit##op(L##op *) { MOZ_ASSUME_UNREACHABLE("NYI: " #op); } |
|
720 LIR_OPCODE_LIST(VISIT_INS) |
|
721 #undef VISIT_INS |
|
722 }; |
|
723 |
|
724 typedef InlineList<LInstruction>::iterator LInstructionIterator; |
|
725 typedef InlineList<LInstruction>::reverse_iterator LInstructionReverseIterator; |
|
726 |
|
727 class LPhi; |
|
728 class LMoveGroup; |
|
729 class LBlock : public TempObject |
|
730 { |
|
731 MBasicBlock *block_; |
|
732 Vector<LPhi *, 4, IonAllocPolicy> phis_; |
|
733 InlineList<LInstruction> instructions_; |
|
734 LMoveGroup *entryMoveGroup_; |
|
735 LMoveGroup *exitMoveGroup_; |
|
736 Label label_; |
|
737 |
|
738 LBlock(TempAllocator &alloc, MBasicBlock *block) |
|
739 : block_(block), |
|
740 phis_(alloc), |
|
741 entryMoveGroup_(nullptr), |
|
742 exitMoveGroup_(nullptr) |
|
743 { } |
|
744 |
|
745 public: |
|
746 static LBlock *New(TempAllocator &alloc, MBasicBlock *from) { |
|
747 return new(alloc) LBlock(alloc, from); |
|
748 } |
|
749 void add(LInstruction *ins) { |
|
750 instructions_.pushBack(ins); |
|
751 } |
|
752 bool addPhi(LPhi *phi) { |
|
753 return phis_.append(phi); |
|
754 } |
|
755 size_t numPhis() const { |
|
756 return phis_.length(); |
|
757 } |
|
758 LPhi *getPhi(size_t index) const { |
|
759 return phis_[index]; |
|
760 } |
|
761 void removePhi(size_t index) { |
|
762 phis_.erase(&phis_[index]); |
|
763 } |
|
764 void clearPhis() { |
|
765 phis_.clear(); |
|
766 } |
|
767 MBasicBlock *mir() const { |
|
768 return block_; |
|
769 } |
|
770 LInstructionIterator begin() { |
|
771 return instructions_.begin(); |
|
772 } |
|
773 LInstructionIterator begin(LInstruction *at) { |
|
774 return instructions_.begin(at); |
|
775 } |
|
776 LInstructionIterator end() { |
|
777 return instructions_.end(); |
|
778 } |
|
779 LInstructionReverseIterator rbegin() { |
|
780 return instructions_.rbegin(); |
|
781 } |
|
782 LInstructionReverseIterator rbegin(LInstruction *at) { |
|
783 return instructions_.rbegin(at); |
|
784 } |
|
785 LInstructionReverseIterator rend() { |
|
786 return instructions_.rend(); |
|
787 } |
|
788 InlineList<LInstruction> &instructions() { |
|
789 return instructions_; |
|
790 } |
|
791 void insertAfter(LInstruction *at, LInstruction *ins) { |
|
792 instructions_.insertAfter(at, ins); |
|
793 } |
|
794 void insertBefore(LInstruction *at, LInstruction *ins) { |
|
795 JS_ASSERT(!at->isLabel()); |
|
796 instructions_.insertBefore(at, ins); |
|
797 } |
|
798 uint32_t firstId(); |
|
799 uint32_t lastId(); |
|
800 Label *label() { |
|
801 return &label_; |
|
802 } |
|
803 LMoveGroup *getEntryMoveGroup(TempAllocator &alloc); |
|
804 LMoveGroup *getExitMoveGroup(TempAllocator &alloc); |
|
805 }; |
|
806 |
|
807 template <size_t Defs, size_t Operands, size_t Temps> |
|
808 class LInstructionHelper : public LInstruction |
|
809 { |
|
810 mozilla::Array<LDefinition, Defs> defs_; |
|
811 mozilla::Array<LAllocation, Operands> operands_; |
|
812 mozilla::Array<LDefinition, Temps> temps_; |
|
813 |
|
814 public: |
|
815 size_t numDefs() const MOZ_FINAL MOZ_OVERRIDE { |
|
816 return Defs; |
|
817 } |
|
818 LDefinition *getDef(size_t index) MOZ_FINAL MOZ_OVERRIDE { |
|
819 return &defs_[index]; |
|
820 } |
|
821 size_t numOperands() const MOZ_FINAL MOZ_OVERRIDE { |
|
822 return Operands; |
|
823 } |
|
824 LAllocation *getOperand(size_t index) MOZ_FINAL MOZ_OVERRIDE { |
|
825 return &operands_[index]; |
|
826 } |
|
827 size_t numTemps() const MOZ_FINAL MOZ_OVERRIDE { |
|
828 return Temps; |
|
829 } |
|
830 LDefinition *getTemp(size_t index) MOZ_FINAL MOZ_OVERRIDE { |
|
831 return &temps_[index]; |
|
832 } |
|
833 |
|
834 void setDef(size_t index, const LDefinition &def) MOZ_FINAL MOZ_OVERRIDE { |
|
835 defs_[index] = def; |
|
836 } |
|
837 void setOperand(size_t index, const LAllocation &a) MOZ_FINAL MOZ_OVERRIDE { |
|
838 operands_[index] = a; |
|
839 } |
|
840 void setTemp(size_t index, const LDefinition &a) MOZ_FINAL MOZ_OVERRIDE { |
|
841 temps_[index] = a; |
|
842 } |
|
843 |
|
844 size_t numSuccessors() const { |
|
845 return 0; |
|
846 } |
|
847 MBasicBlock *getSuccessor(size_t i) const { |
|
848 JS_ASSERT(false); |
|
849 return nullptr; |
|
850 } |
|
851 void setSuccessor(size_t i, MBasicBlock *successor) { |
|
852 JS_ASSERT(false); |
|
853 } |
|
854 |
|
855 // Default accessors, assuming a single input and output, respectively. |
|
856 const LAllocation *input() { |
|
857 JS_ASSERT(numOperands() == 1); |
|
858 return getOperand(0); |
|
859 } |
|
860 const LDefinition *output() { |
|
861 JS_ASSERT(numDefs() == 1); |
|
862 return getDef(0); |
|
863 } |
|
864 |
|
865 virtual void printInfo(FILE *fp) { |
|
866 printOperands(fp); |
|
867 } |
|
868 }; |
|
869 |
|
870 template <size_t Defs, size_t Operands, size_t Temps> |
|
871 class LCallInstructionHelper : public LInstructionHelper<Defs, Operands, Temps> |
|
872 { |
|
873 public: |
|
874 virtual bool isCall() const { |
|
875 return true; |
|
876 } |
|
877 }; |
|
878 |
|
879 class LRecoverInfo : public TempObject |
|
880 { |
|
881 public: |
|
882 typedef Vector<MResumePoint *, 2, IonAllocPolicy> Instructions; |
|
883 |
|
884 private: |
|
885 // List of instructions needed to recover the stack frames. |
|
886 // Outer frames are stored before inner frames. |
|
887 Instructions instructions_; |
|
888 |
|
889 // Cached offset where this resume point is encoded. |
|
890 RecoverOffset recoverOffset_; |
|
891 |
|
892 LRecoverInfo(TempAllocator &alloc); |
|
893 bool init(MResumePoint *mir); |
|
894 |
|
895 public: |
|
896 static LRecoverInfo *New(MIRGenerator *gen, MResumePoint *mir); |
|
897 |
|
898 // Resume point of the inner most function. |
|
899 MResumePoint *mir() const { |
|
900 return instructions_.back(); |
|
901 } |
|
902 RecoverOffset recoverOffset() const { |
|
903 return recoverOffset_; |
|
904 } |
|
905 void setRecoverOffset(RecoverOffset offset) { |
|
906 JS_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET); |
|
907 recoverOffset_ = offset; |
|
908 } |
|
909 |
|
910 MResumePoint **begin() { |
|
911 return instructions_.begin(); |
|
912 } |
|
913 MResumePoint **end() { |
|
914 return instructions_.end(); |
|
915 } |
|
916 }; |
|
917 |
|
918 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike MResumePoints, |
|
919 // they cannot be shared, as they are filled in by the register allocator in |
|
920 // order to capture the precise low-level stack state in between an |
|
921 // instruction's input and output. During code generation, LSnapshots are |
|
922 // compressed and saved in the compiled script. |
|
923 class LSnapshot : public TempObject |
|
924 { |
|
925 private: |
|
926 uint32_t numSlots_; |
|
927 LAllocation *slots_; |
|
928 LRecoverInfo *recoverInfo_; |
|
929 SnapshotOffset snapshotOffset_; |
|
930 BailoutId bailoutId_; |
|
931 BailoutKind bailoutKind_; |
|
932 |
|
933 LSnapshot(LRecoverInfo *recover, BailoutKind kind); |
|
934 bool init(MIRGenerator *gen); |
|
935 |
|
936 public: |
|
937 static LSnapshot *New(MIRGenerator *gen, LRecoverInfo *recover, BailoutKind kind); |
|
938 |
|
939 size_t numEntries() const { |
|
940 return numSlots_; |
|
941 } |
|
942 size_t numSlots() const { |
|
943 return numSlots_ / BOX_PIECES; |
|
944 } |
|
945 LAllocation *payloadOfSlot(size_t i) { |
|
946 JS_ASSERT(i < numSlots()); |
|
947 size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1); |
|
948 return getEntry(entryIndex); |
|
949 } |
|
950 #ifdef JS_NUNBOX32 |
|
951 LAllocation *typeOfSlot(size_t i) { |
|
952 JS_ASSERT(i < numSlots()); |
|
953 size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2); |
|
954 return getEntry(entryIndex); |
|
955 } |
|
956 #endif |
|
957 LAllocation *getEntry(size_t i) { |
|
958 JS_ASSERT(i < numSlots_); |
|
959 return &slots_[i]; |
|
960 } |
|
961 void setEntry(size_t i, const LAllocation &alloc) { |
|
962 JS_ASSERT(i < numSlots_); |
|
963 slots_[i] = alloc; |
|
964 } |
|
965 LRecoverInfo *recoverInfo() const { |
|
966 return recoverInfo_; |
|
967 } |
|
968 MResumePoint *mir() const { |
|
969 return recoverInfo()->mir(); |
|
970 } |
|
971 SnapshotOffset snapshotOffset() const { |
|
972 return snapshotOffset_; |
|
973 } |
|
974 BailoutId bailoutId() const { |
|
975 return bailoutId_; |
|
976 } |
|
977 void setSnapshotOffset(SnapshotOffset offset) { |
|
978 JS_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET); |
|
979 snapshotOffset_ = offset; |
|
980 } |
|
981 void setBailoutId(BailoutId id) { |
|
982 JS_ASSERT(bailoutId_ == INVALID_BAILOUT_ID); |
|
983 bailoutId_ = id; |
|
984 } |
|
985 BailoutKind bailoutKind() const { |
|
986 return bailoutKind_; |
|
987 } |
|
988 void setBailoutKind(BailoutKind kind) { |
|
989 bailoutKind_ = kind; |
|
990 } |
|
991 void rewriteRecoveredInput(LUse input); |
|
992 }; |
|
993 |
|
994 struct SafepointNunboxEntry { |
|
995 LAllocation type; |
|
996 LAllocation payload; |
|
997 |
|
998 SafepointNunboxEntry() { } |
|
999 SafepointNunboxEntry(LAllocation type, LAllocation payload) |
|
1000 : type(type), payload(payload) |
|
1001 { } |
|
1002 }; |
|
1003 |
|
1004 class LSafepoint : public TempObject |
|
1005 { |
|
1006 typedef SafepointNunboxEntry NunboxEntry; |
|
1007 |
|
1008 public: |
|
1009 typedef Vector<uint32_t, 0, IonAllocPolicy> SlotList; |
|
1010 typedef Vector<NunboxEntry, 0, IonAllocPolicy> NunboxList; |
|
1011 |
|
1012 private: |
|
1013 // The information in a safepoint describes the registers and gc related |
|
1014 // values that are live at the start of the associated instruction. |
|
1015 |
|
1016 // The set of registers which are live at an OOL call made within the |
|
1017 // instruction. This includes any registers for inputs which are not |
|
1018 // use-at-start, any registers for temps, and any registers live after the |
|
1019 // call except outputs of the instruction. |
|
1020 // |
|
1021 // For call instructions, the live regs are empty. Call instructions may |
|
1022 // have register inputs or temporaries, which will *not* be in the live |
|
1023 // registers: if passed to the call, the values passed will be marked via |
|
1024 // MarkJitExitFrame, and no registers can be live after the instruction |
|
1025 // except its outputs. |
|
1026 RegisterSet liveRegs_; |
|
1027 |
|
1028 // The subset of liveRegs which contains gcthing pointers. |
|
1029 GeneralRegisterSet gcRegs_; |
|
1030 |
|
1031 #ifdef CHECK_OSIPOINT_REGISTERS |
|
1032 // Clobbered regs of the current instruction. This set is never written to |
|
1033 // the safepoint; it's only used by assertions during compilation. |
|
1034 RegisterSet clobberedRegs_; |
|
1035 #endif |
|
1036 |
|
1037 // Offset to a position in the safepoint stream, or |
|
1038 // INVALID_SAFEPOINT_OFFSET. |
|
1039 uint32_t safepointOffset_; |
|
1040 |
|
1041 // Assembler buffer displacement to OSI point's call location. |
|
1042 uint32_t osiCallPointOffset_; |
|
1043 |
|
1044 // List of stack slots which have gcthing pointers. |
|
1045 SlotList gcSlots_; |
|
1046 |
|
1047 // List of stack slots which have Values. |
|
1048 SlotList valueSlots_; |
|
1049 |
|
1050 #ifdef JS_NUNBOX32 |
|
1051 // List of registers (in liveRegs) and stack slots which contain pieces of Values. |
|
1052 NunboxList nunboxParts_; |
|
1053 |
|
1054 // Number of nunboxParts which are not completely filled in. |
|
1055 uint32_t partialNunboxes_; |
|
1056 #elif JS_PUNBOX64 |
|
1057 // The subset of liveRegs which have Values. |
|
1058 GeneralRegisterSet valueRegs_; |
|
1059 #endif |
|
1060 |
|
1061 // The subset of liveRegs which contains pointers to slots/elements. |
|
1062 GeneralRegisterSet slotsOrElementsRegs_; |
|
1063 |
|
1064 // List of stack slots which have slots/elements pointers. |
|
1065 SlotList slotsOrElementsSlots_; |
|
1066 |
|
1067 public: |
|
1068 void assertInvariants() { |
|
1069 // Every register in valueRegs and gcRegs should also be in liveRegs. |
|
1070 #ifndef JS_NUNBOX32 |
|
1071 JS_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0); |
|
1072 #endif |
|
1073 JS_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0); |
|
1074 } |
|
1075 |
|
1076 LSafepoint(TempAllocator &alloc) |
|
1077 : safepointOffset_(INVALID_SAFEPOINT_OFFSET) |
|
1078 , osiCallPointOffset_(0) |
|
1079 , gcSlots_(alloc) |
|
1080 , valueSlots_(alloc) |
|
1081 #ifdef JS_NUNBOX32 |
|
1082 , nunboxParts_(alloc) |
|
1083 , partialNunboxes_(0) |
|
1084 #endif |
|
1085 , slotsOrElementsSlots_(alloc) |
|
1086 { |
|
1087 assertInvariants(); |
|
1088 } |
|
1089 void addLiveRegister(AnyRegister reg) { |
|
1090 liveRegs_.addUnchecked(reg); |
|
1091 assertInvariants(); |
|
1092 } |
|
1093 const RegisterSet &liveRegs() const { |
|
1094 return liveRegs_; |
|
1095 } |
|
1096 #ifdef CHECK_OSIPOINT_REGISTERS |
|
1097 void addClobberedRegister(AnyRegister reg) { |
|
1098 clobberedRegs_.addUnchecked(reg); |
|
1099 assertInvariants(); |
|
1100 } |
|
1101 const RegisterSet &clobberedRegs() const { |
|
1102 return clobberedRegs_; |
|
1103 } |
|
1104 #endif |
|
1105 void addGcRegister(Register reg) { |
|
1106 gcRegs_.addUnchecked(reg); |
|
1107 assertInvariants(); |
|
1108 } |
|
1109 GeneralRegisterSet gcRegs() const { |
|
1110 return gcRegs_; |
|
1111 } |
|
1112 bool addGcSlot(uint32_t slot) { |
|
1113 bool result = gcSlots_.append(slot); |
|
1114 if (result) |
|
1115 assertInvariants(); |
|
1116 return result; |
|
1117 } |
|
1118 SlotList &gcSlots() { |
|
1119 return gcSlots_; |
|
1120 } |
|
1121 |
|
1122 SlotList &slotsOrElementsSlots() { |
|
1123 return slotsOrElementsSlots_; |
|
1124 } |
|
1125 GeneralRegisterSet slotsOrElementsRegs() const { |
|
1126 return slotsOrElementsRegs_; |
|
1127 } |
|
1128 void addSlotsOrElementsRegister(Register reg) { |
|
1129 slotsOrElementsRegs_.addUnchecked(reg); |
|
1130 assertInvariants(); |
|
1131 } |
|
1132 bool addSlotsOrElementsSlot(uint32_t slot) { |
|
1133 bool result = slotsOrElementsSlots_.append(slot); |
|
1134 if (result) |
|
1135 assertInvariants(); |
|
1136 return result; |
|
1137 } |
|
1138 bool addSlotsOrElementsPointer(LAllocation alloc) { |
|
1139 if (alloc.isStackSlot()) |
|
1140 return addSlotsOrElementsSlot(alloc.toStackSlot()->slot()); |
|
1141 JS_ASSERT(alloc.isRegister()); |
|
1142 addSlotsOrElementsRegister(alloc.toRegister().gpr()); |
|
1143 assertInvariants(); |
|
1144 return true; |
|
1145 } |
|
1146 bool hasSlotsOrElementsPointer(LAllocation alloc) const { |
|
1147 if (alloc.isRegister()) |
|
1148 return slotsOrElementsRegs().has(alloc.toRegister().gpr()); |
|
1149 if (alloc.isStackSlot()) { |
|
1150 for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) { |
|
1151 if (slotsOrElementsSlots_[i] == alloc.toStackSlot()->slot()) |
|
1152 return true; |
|
1153 } |
|
1154 return false; |
|
1155 } |
|
1156 return false; |
|
1157 } |
|
1158 |
|
1159 bool addGcPointer(LAllocation alloc) { |
|
1160 if (alloc.isStackSlot()) |
|
1161 return addGcSlot(alloc.toStackSlot()->slot()); |
|
1162 if (alloc.isRegister()) |
|
1163 addGcRegister(alloc.toRegister().gpr()); |
|
1164 assertInvariants(); |
|
1165 return true; |
|
1166 } |
|
1167 |
|
1168 bool hasGcPointer(LAllocation alloc) const { |
|
1169 if (alloc.isRegister()) |
|
1170 return gcRegs().has(alloc.toRegister().gpr()); |
|
1171 if (alloc.isStackSlot()) { |
|
1172 for (size_t i = 0; i < gcSlots_.length(); i++) { |
|
1173 if (gcSlots_[i] == alloc.toStackSlot()->slot()) |
|
1174 return true; |
|
1175 } |
|
1176 return false; |
|
1177 } |
|
1178 JS_ASSERT(alloc.isArgument()); |
|
1179 return true; |
|
1180 } |
|
1181 |
|
1182 bool addValueSlot(uint32_t slot) { |
|
1183 bool result = valueSlots_.append(slot); |
|
1184 if (result) |
|
1185 assertInvariants(); |
|
1186 return result; |
|
1187 } |
|
1188 SlotList &valueSlots() { |
|
1189 return valueSlots_; |
|
1190 } |
|
1191 |
|
1192 bool hasValueSlot(uint32_t slot) const { |
|
1193 for (size_t i = 0; i < valueSlots_.length(); i++) { |
|
1194 if (valueSlots_[i] == slot) |
|
1195 return true; |
|
1196 } |
|
1197 return false; |
|
1198 } |
|
1199 |
|
1200 #ifdef JS_NUNBOX32 |
|
1201 |
|
1202 bool addNunboxParts(LAllocation type, LAllocation payload) { |
|
1203 bool result = nunboxParts_.append(NunboxEntry(type, payload)); |
|
1204 if (result) |
|
1205 assertInvariants(); |
|
1206 return result; |
|
1207 } |
|
1208 |
|
1209 bool addNunboxType(uint32_t typeVreg, LAllocation type) { |
|
1210 for (size_t i = 0; i < nunboxParts_.length(); i++) { |
|
1211 if (nunboxParts_[i].type == type) |
|
1212 return true; |
|
1213 if (nunboxParts_[i].type == LUse(typeVreg, LUse::ANY)) { |
|
1214 nunboxParts_[i].type = type; |
|
1215 partialNunboxes_--; |
|
1216 return true; |
|
1217 } |
|
1218 } |
|
1219 partialNunboxes_++; |
|
1220 |
|
1221 // vregs for nunbox pairs are adjacent, with the type coming first. |
|
1222 uint32_t payloadVreg = typeVreg + 1; |
|
1223 bool result = nunboxParts_.append(NunboxEntry(type, LUse(payloadVreg, LUse::ANY))); |
|
1224 if (result) |
|
1225 assertInvariants(); |
|
1226 return result; |
|
1227 } |
|
1228 |
|
1229 bool hasNunboxType(LAllocation type) const { |
|
1230 if (type.isArgument()) |
|
1231 return true; |
|
1232 if (type.isStackSlot() && hasValueSlot(type.toStackSlot()->slot() + 1)) |
|
1233 return true; |
|
1234 for (size_t i = 0; i < nunboxParts_.length(); i++) { |
|
1235 if (nunboxParts_[i].type == type) |
|
1236 return true; |
|
1237 } |
|
1238 return false; |
|
1239 } |
|
1240 |
|
1241 bool addNunboxPayload(uint32_t payloadVreg, LAllocation payload) { |
|
1242 for (size_t i = 0; i < nunboxParts_.length(); i++) { |
|
1243 if (nunboxParts_[i].payload == payload) |
|
1244 return true; |
|
1245 if (nunboxParts_[i].payload == LUse(payloadVreg, LUse::ANY)) { |
|
1246 partialNunboxes_--; |
|
1247 nunboxParts_[i].payload = payload; |
|
1248 return true; |
|
1249 } |
|
1250 } |
|
1251 partialNunboxes_++; |
|
1252 |
|
1253 // vregs for nunbox pairs are adjacent, with the type coming first. |
|
1254 uint32_t typeVreg = payloadVreg - 1; |
|
1255 bool result = nunboxParts_.append(NunboxEntry(LUse(typeVreg, LUse::ANY), payload)); |
|
1256 if (result) |
|
1257 assertInvariants(); |
|
1258 return result; |
|
1259 } |
|
1260 |
|
1261 bool hasNunboxPayload(LAllocation payload) const { |
|
1262 if (payload.isArgument()) |
|
1263 return true; |
|
1264 if (payload.isStackSlot() && hasValueSlot(payload.toStackSlot()->slot())) |
|
1265 return true; |
|
1266 for (size_t i = 0; i < nunboxParts_.length(); i++) { |
|
1267 if (nunboxParts_[i].payload == payload) |
|
1268 return true; |
|
1269 } |
|
1270 return false; |
|
1271 } |
|
1272 |
|
1273 NunboxList &nunboxParts() { |
|
1274 return nunboxParts_; |
|
1275 } |
|
1276 |
|
1277 uint32_t partialNunboxes() { |
|
1278 return partialNunboxes_; |
|
1279 } |
|
1280 |
|
1281 #elif JS_PUNBOX64 |
|
1282 |
|
1283 void addValueRegister(Register reg) { |
|
1284 valueRegs_.add(reg); |
|
1285 assertInvariants(); |
|
1286 } |
|
1287 GeneralRegisterSet valueRegs() const { |
|
1288 return valueRegs_; |
|
1289 } |
|
1290 |
|
1291 bool addBoxedValue(LAllocation alloc) { |
|
1292 if (alloc.isRegister()) { |
|
1293 Register reg = alloc.toRegister().gpr(); |
|
1294 if (!valueRegs().has(reg)) |
|
1295 addValueRegister(reg); |
|
1296 return true; |
|
1297 } |
|
1298 if (alloc.isStackSlot()) { |
|
1299 uint32_t slot = alloc.toStackSlot()->slot(); |
|
1300 for (size_t i = 0; i < valueSlots().length(); i++) { |
|
1301 if (valueSlots()[i] == slot) |
|
1302 return true; |
|
1303 } |
|
1304 return addValueSlot(slot); |
|
1305 } |
|
1306 JS_ASSERT(alloc.isArgument()); |
|
1307 return true; |
|
1308 } |
|
1309 |
|
1310 bool hasBoxedValue(LAllocation alloc) const { |
|
1311 if (alloc.isRegister()) |
|
1312 return valueRegs().has(alloc.toRegister().gpr()); |
|
1313 if (alloc.isStackSlot()) |
|
1314 return hasValueSlot(alloc.toStackSlot()->slot()); |
|
1315 JS_ASSERT(alloc.isArgument()); |
|
1316 return true; |
|
1317 } |
|
1318 |
|
1319 #endif // JS_PUNBOX64 |
|
1320 |
|
1321 bool encoded() const { |
|
1322 return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; |
|
1323 } |
|
1324 uint32_t offset() const { |
|
1325 JS_ASSERT(encoded()); |
|
1326 return safepointOffset_; |
|
1327 } |
|
1328 void setOffset(uint32_t offset) { |
|
1329 safepointOffset_ = offset; |
|
1330 } |
|
1331 uint32_t osiReturnPointOffset() const { |
|
1332 // In general, pointer arithmetic on code is bad, but in this case, |
|
1333 // getting the return address from a call instruction, stepping over pools |
|
1334 // would be wrong. |
|
1335 return osiCallPointOffset_ + Assembler::patchWrite_NearCallSize(); |
|
1336 } |
|
1337 uint32_t osiCallPointOffset() const { |
|
1338 return osiCallPointOffset_; |
|
1339 } |
|
1340 void setOsiCallPointOffset(uint32_t osiCallPointOffset) { |
|
1341 JS_ASSERT(!osiCallPointOffset_); |
|
1342 osiCallPointOffset_ = osiCallPointOffset; |
|
1343 } |
|
1344 void fixupOffset(MacroAssembler *masm) { |
|
1345 osiCallPointOffset_ = masm->actualOffset(osiCallPointOffset_); |
|
1346 } |
|
1347 }; |
|
1348 |
|
1349 class LInstruction::InputIterator |
|
1350 { |
|
1351 private: |
|
1352 LInstruction &ins_; |
|
1353 size_t idx_; |
|
1354 bool snapshot_; |
|
1355 |
|
1356 void handleOperandsEnd() { |
|
1357 // Iterate on the snapshot when iteration over all operands is done. |
|
1358 if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) { |
|
1359 idx_ = 0; |
|
1360 snapshot_ = true; |
|
1361 } |
|
1362 } |
|
1363 |
|
1364 public: |
|
1365 InputIterator(LInstruction &ins) : |
|
1366 ins_(ins), |
|
1367 idx_(0), |
|
1368 snapshot_(false) |
|
1369 { |
|
1370 handleOperandsEnd(); |
|
1371 } |
|
1372 |
|
1373 bool more() const { |
|
1374 if (snapshot_) |
|
1375 return idx_ < ins_.snapshot()->numEntries(); |
|
1376 if (idx_ < ins_.numOperands()) |
|
1377 return true; |
|
1378 if (ins_.snapshot() && ins_.snapshot()->numEntries()) |
|
1379 return true; |
|
1380 return false; |
|
1381 } |
|
1382 |
|
1383 bool isSnapshotInput() const { |
|
1384 return snapshot_; |
|
1385 } |
|
1386 |
|
1387 void next() { |
|
1388 JS_ASSERT(more()); |
|
1389 idx_++; |
|
1390 handleOperandsEnd(); |
|
1391 } |
|
1392 |
|
1393 void replace(const LAllocation &alloc) { |
|
1394 if (snapshot_) |
|
1395 ins_.snapshot()->setEntry(idx_, alloc); |
|
1396 else |
|
1397 ins_.setOperand(idx_, alloc); |
|
1398 } |
|
1399 |
|
1400 LAllocation *operator *() const { |
|
1401 if (snapshot_) |
|
1402 return ins_.snapshot()->getEntry(idx_); |
|
1403 return ins_.getOperand(idx_); |
|
1404 } |
|
1405 |
|
1406 LAllocation *operator ->() const { |
|
1407 return **this; |
|
1408 } |
|
1409 }; |
|
1410 |
|
1411 class LIRGraph |
|
1412 { |
|
1413 struct ValueHasher |
|
1414 { |
|
1415 typedef Value Lookup; |
|
1416 static HashNumber hash(const Value &v) { |
|
1417 return HashNumber(v.asRawBits()); |
|
1418 } |
|
1419 static bool match(const Value &lhs, const Value &rhs) { |
|
1420 return lhs == rhs; |
|
1421 } |
|
1422 |
|
1423 #ifdef DEBUG |
|
1424 bool canOptimizeOutIfUnused(); |
|
1425 #endif |
|
1426 }; |
|
1427 |
|
1428 |
|
1429 Vector<LBlock *, 16, IonAllocPolicy> blocks_; |
|
1430 Vector<Value, 0, IonAllocPolicy> constantPool_; |
|
1431 typedef HashMap<Value, uint32_t, ValueHasher, IonAllocPolicy> ConstantPoolMap; |
|
1432 ConstantPoolMap constantPoolMap_; |
|
1433 Vector<LInstruction *, 0, IonAllocPolicy> safepoints_; |
|
1434 Vector<LInstruction *, 0, IonAllocPolicy> nonCallSafepoints_; |
|
1435 uint32_t numVirtualRegisters_; |
|
1436 uint32_t numInstructions_; |
|
1437 |
|
1438 // Number of stack slots needed for local spills. |
|
1439 uint32_t localSlotCount_; |
|
1440 // Number of stack slots needed for argument construction for calls. |
|
1441 uint32_t argumentSlotCount_; |
|
1442 |
|
1443 // Snapshot taken before any LIR has been lowered. |
|
1444 LSnapshot *entrySnapshot_; |
|
1445 |
|
1446 // LBlock containing LOsrEntry, or nullptr. |
|
1447 LBlock *osrBlock_; |
|
1448 |
|
1449 MIRGraph &mir_; |
|
1450 |
|
1451 public: |
|
1452 LIRGraph(MIRGraph *mir); |
|
1453 |
|
1454 bool init() { |
|
1455 return constantPoolMap_.init(); |
|
1456 } |
|
1457 MIRGraph &mir() const { |
|
1458 return mir_; |
|
1459 } |
|
1460 size_t numBlocks() const { |
|
1461 return blocks_.length(); |
|
1462 } |
|
1463 LBlock *getBlock(size_t i) const { |
|
1464 return blocks_[i]; |
|
1465 } |
|
1466 uint32_t numBlockIds() const { |
|
1467 return mir_.numBlockIds(); |
|
1468 } |
|
1469 bool addBlock(LBlock *block) { |
|
1470 return blocks_.append(block); |
|
1471 } |
|
1472 uint32_t getVirtualRegister() { |
|
1473 numVirtualRegisters_ += VREG_INCREMENT; |
|
1474 return numVirtualRegisters_; |
|
1475 } |
|
1476 uint32_t numVirtualRegisters() const { |
|
1477 // Virtual registers are 1-based, not 0-based, so add one as a |
|
1478 // convenience for 0-based arrays. |
|
1479 return numVirtualRegisters_ + 1; |
|
1480 } |
|
1481 uint32_t getInstructionId() { |
|
1482 return numInstructions_++; |
|
1483 } |
|
1484 uint32_t numInstructions() const { |
|
1485 return numInstructions_; |
|
1486 } |
|
1487 void setLocalSlotCount(uint32_t localSlotCount) { |
|
1488 localSlotCount_ = localSlotCount; |
|
1489 } |
|
1490 uint32_t localSlotCount() const { |
|
1491 return localSlotCount_; |
|
1492 } |
|
1493 // Return the localSlotCount() value rounded up so that it satisfies the |
|
1494 // platform stack alignment requirement, and so that it's a multiple of |
|
1495 // the number of slots per Value. |
|
1496 uint32_t paddedLocalSlotCount() const { |
|
1497 // Round to StackAlignment, but also round to at least sizeof(Value) in |
|
1498 // case that's greater, because StackOffsetOfPassedArg rounds argument |
|
1499 // slots to 8-byte boundaries. |
|
1500 size_t Alignment = Max(sizeof(StackAlignment), sizeof(Value)); |
|
1501 return AlignBytes(localSlotCount(), Alignment); |
|
1502 } |
|
1503 size_t paddedLocalSlotsSize() const { |
|
1504 return paddedLocalSlotCount(); |
|
1505 } |
|
1506 void setArgumentSlotCount(uint32_t argumentSlotCount) { |
|
1507 argumentSlotCount_ = argumentSlotCount; |
|
1508 } |
|
1509 uint32_t argumentSlotCount() const { |
|
1510 return argumentSlotCount_; |
|
1511 } |
|
1512 size_t argumentsSize() const { |
|
1513 return argumentSlotCount() * sizeof(Value); |
|
1514 } |
|
1515 uint32_t totalSlotCount() const { |
|
1516 return paddedLocalSlotCount() + argumentsSize(); |
|
1517 } |
|
1518 bool addConstantToPool(const Value &v, uint32_t *index); |
|
1519 size_t numConstants() const { |
|
1520 return constantPool_.length(); |
|
1521 } |
|
1522 Value *constantPool() { |
|
1523 return &constantPool_[0]; |
|
1524 } |
|
1525 void setEntrySnapshot(LSnapshot *snapshot) { |
|
1526 JS_ASSERT(!entrySnapshot_); |
|
1527 JS_ASSERT(snapshot->bailoutKind() == Bailout_Normal); |
|
1528 snapshot->setBailoutKind(Bailout_ArgumentCheck); |
|
1529 entrySnapshot_ = snapshot; |
|
1530 } |
|
1531 LSnapshot *entrySnapshot() const { |
|
1532 JS_ASSERT(entrySnapshot_); |
|
1533 return entrySnapshot_; |
|
1534 } |
|
1535 void setOsrBlock(LBlock *block) { |
|
1536 JS_ASSERT(!osrBlock_); |
|
1537 osrBlock_ = block; |
|
1538 } |
|
1539 LBlock *osrBlock() const { |
|
1540 return osrBlock_; |
|
1541 } |
|
1542 bool noteNeedsSafepoint(LInstruction *ins); |
|
1543 size_t numNonCallSafepoints() const { |
|
1544 return nonCallSafepoints_.length(); |
|
1545 } |
|
1546 LInstruction *getNonCallSafepoint(size_t i) const { |
|
1547 return nonCallSafepoints_[i]; |
|
1548 } |
|
1549 size_t numSafepoints() const { |
|
1550 return safepoints_.length(); |
|
1551 } |
|
1552 LInstruction *getSafepoint(size_t i) const { |
|
1553 return safepoints_[i]; |
|
1554 } |
|
1555 void removeBlock(size_t i); |
|
1556 }; |
|
1557 |
|
1558 LAllocation::LAllocation(const AnyRegister ®) |
|
1559 { |
|
1560 if (reg.isFloat()) |
|
1561 *this = LFloatReg(reg.fpu()); |
|
1562 else |
|
1563 *this = LGeneralReg(reg.gpr()); |
|
1564 } |
|
1565 |
|
1566 AnyRegister |
|
1567 LAllocation::toRegister() const |
|
1568 { |
|
1569 JS_ASSERT(isRegister()); |
|
1570 if (isFloatReg()) |
|
1571 return AnyRegister(toFloatReg()->reg()); |
|
1572 return AnyRegister(toGeneralReg()->reg()); |
|
1573 } |
|
1574 |
|
1575 } // namespace jit |
|
1576 } // namespace js |
|
1577 |
|
1578 #define LIR_HEADER(opcode) \ |
|
1579 Opcode op() const { \ |
|
1580 return LInstruction::LOp_##opcode; \ |
|
1581 } \ |
|
1582 bool accept(LInstructionVisitor *visitor) { \ |
|
1583 visitor->setInstruction(this); \ |
|
1584 return visitor->visit##opcode(this); \ |
|
1585 } |
|
1586 |
|
1587 #include "jit/LIR-Common.h" |
|
1588 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) |
|
1589 # if defined(JS_CODEGEN_X86) |
|
1590 # include "jit/x86/LIR-x86.h" |
|
1591 # elif defined(JS_CODEGEN_X64) |
|
1592 # include "jit/x64/LIR-x64.h" |
|
1593 # endif |
|
1594 # include "jit/shared/LIR-x86-shared.h" |
|
1595 #elif defined(JS_CODEGEN_ARM) |
|
1596 # include "jit/arm/LIR-arm.h" |
|
1597 #elif defined(JS_CODEGEN_MIPS) |
|
1598 # include "jit/mips/LIR-mips.h" |
|
1599 #else |
|
1600 # error "Unknown architecture!" |
|
1601 #endif |
|
1602 |
|
1603 #undef LIR_HEADER |
|
1604 |
|
1605 namespace js { |
|
1606 namespace jit { |
|
1607 |
|
1608 #define LIROP(name) \ |
|
1609 L##name *LInstruction::to##name() \ |
|
1610 { \ |
|
1611 JS_ASSERT(is##name()); \ |
|
1612 return static_cast<L##name *>(this); \ |
|
1613 } |
|
1614 LIR_OPCODE_LIST(LIROP) |
|
1615 #undef LIROP |
|
1616 |
|
1617 #define LALLOC_CAST(type) \ |
|
1618 L##type *LAllocation::to##type() { \ |
|
1619 JS_ASSERT(is##type()); \ |
|
1620 return static_cast<L##type *>(this); \ |
|
1621 } |
|
1622 #define LALLOC_CONST_CAST(type) \ |
|
1623 const L##type *LAllocation::to##type() const { \ |
|
1624 JS_ASSERT(is##type()); \ |
|
1625 return static_cast<const L##type *>(this); \ |
|
1626 } |
|
1627 |
|
1628 LALLOC_CAST(Use) |
|
1629 LALLOC_CONST_CAST(Use) |
|
1630 LALLOC_CONST_CAST(GeneralReg) |
|
1631 LALLOC_CONST_CAST(FloatReg) |
|
1632 LALLOC_CONST_CAST(StackSlot) |
|
1633 LALLOC_CONST_CAST(Argument) |
|
1634 LALLOC_CONST_CAST(ConstantIndex) |
|
1635 |
|
1636 #undef LALLOC_CAST |
|
1637 |
|
1638 #ifdef JS_NUNBOX32 |
|
1639 static inline signed |
|
1640 OffsetToOtherHalfOfNunbox(LDefinition::Type type) |
|
1641 { |
|
1642 JS_ASSERT(type == LDefinition::TYPE || type == LDefinition::PAYLOAD); |
|
1643 signed offset = (type == LDefinition::TYPE) |
|
1644 ? PAYLOAD_INDEX - TYPE_INDEX |
|
1645 : TYPE_INDEX - PAYLOAD_INDEX; |
|
1646 return offset; |
|
1647 } |
|
1648 |
|
1649 static inline void |
|
1650 AssertTypesFormANunbox(LDefinition::Type type1, LDefinition::Type type2) |
|
1651 { |
|
1652 JS_ASSERT((type1 == LDefinition::TYPE && type2 == LDefinition::PAYLOAD) || |
|
1653 (type2 == LDefinition::TYPE && type1 == LDefinition::PAYLOAD)); |
|
1654 } |
|
1655 |
|
1656 static inline unsigned |
|
1657 OffsetOfNunboxSlot(LDefinition::Type type) |
|
1658 { |
|
1659 if (type == LDefinition::PAYLOAD) |
|
1660 return NUNBOX32_PAYLOAD_OFFSET; |
|
1661 return NUNBOX32_TYPE_OFFSET; |
|
1662 } |
|
1663 |
|
1664 // Note that stack indexes for LStackSlot are modelled backwards, so a |
|
1665 // double-sized slot starting at 2 has its next word at 1, *not* 3. |
|
1666 static inline unsigned |
|
1667 BaseOfNunboxSlot(LDefinition::Type type, unsigned slot) |
|
1668 { |
|
1669 if (type == LDefinition::PAYLOAD) |
|
1670 return slot + NUNBOX32_PAYLOAD_OFFSET; |
|
1671 return slot + NUNBOX32_TYPE_OFFSET; |
|
1672 } |
|
1673 #endif |
|
1674 |
|
1675 } // namespace jit |
|
1676 } // namespace js |
|
1677 |
|
1678 #endif /* jit_LIR_h */ |