|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
|
2 * vim: set ts=8 sts=4 et sw=4 tw=99: |
|
3 * This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "jit/arm/MoveEmitter-arm.h" |
|
8 |
|
9 using namespace js; |
|
10 using namespace js::jit; |
|
11 |
|
12 MoveEmitterARM::MoveEmitterARM(MacroAssemblerARMCompat &masm) |
|
13 : inCycle_(false), |
|
14 masm(masm), |
|
15 pushedAtCycle_(-1), |
|
16 pushedAtSpill_(-1), |
|
17 spilledReg_(InvalidReg), |
|
18 spilledFloatReg_(InvalidFloatReg) |
|
19 { |
|
20 pushedAtStart_ = masm.framePushed(); |
|
21 } |
|
22 |
|
23 void |
|
24 MoveEmitterARM::emit(const MoveResolver &moves) |
|
25 { |
|
26 if (moves.hasCycles()) { |
|
27 // Reserve stack for cycle resolution |
|
28 masm.reserveStack(sizeof(double)); |
|
29 pushedAtCycle_ = masm.framePushed(); |
|
30 } |
|
31 |
|
32 for (size_t i = 0; i < moves.numMoves(); i++) |
|
33 emit(moves.getMove(i)); |
|
34 } |
|
35 |
|
36 MoveEmitterARM::~MoveEmitterARM() |
|
37 { |
|
38 assertDone(); |
|
39 } |
|
40 |
|
41 Operand |
|
42 MoveEmitterARM::cycleSlot() const |
|
43 { |
|
44 int offset = masm.framePushed() - pushedAtCycle_; |
|
45 JS_ASSERT(offset < 4096 && offset > -4096); |
|
46 return Operand(StackPointer, offset); |
|
47 } |
|
48 |
|
49 // THIS IS ALWAYS AN LDRAddr. It should not be wrapped in an operand, methinks |
|
50 Operand |
|
51 MoveEmitterARM::spillSlot() const |
|
52 { |
|
53 int offset = masm.framePushed() - pushedAtSpill_; |
|
54 JS_ASSERT(offset < 4096 && offset > -4096); |
|
55 return Operand(StackPointer, offset); |
|
56 } |
|
57 |
|
58 Operand |
|
59 MoveEmitterARM::toOperand(const MoveOperand &operand, bool isFloat) const |
|
60 { |
|
61 if (operand.isMemoryOrEffectiveAddress()) { |
|
62 if (operand.base() != StackPointer) { |
|
63 JS_ASSERT(operand.disp() < 1024 && operand.disp() > -1024); |
|
64 return Operand(operand.base(), operand.disp()); |
|
65 } |
|
66 |
|
67 JS_ASSERT(operand.disp() >= 0); |
|
68 |
|
69 // Otherwise, the stack offset may need to be adjusted. |
|
70 return Operand(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_)); |
|
71 } |
|
72 |
|
73 if (operand.isGeneralReg()) |
|
74 return Operand(operand.reg()); |
|
75 |
|
76 JS_ASSERT(operand.isFloatReg()); |
|
77 return Operand(operand.floatReg()); |
|
78 } |
|
79 |
|
80 Register |
|
81 MoveEmitterARM::tempReg() |
|
82 { |
|
83 if (spilledReg_ != InvalidReg) |
|
84 return spilledReg_; |
|
85 |
|
86 // For now, just pick r12/ip as the eviction point. This is totally |
|
87 // random, and if it ends up being bad, we can use actual heuristics later. |
|
88 // r12 is actually a bad choice. it is the scratch register, which is frequently |
|
89 // used for address computations, such as those found when we attempt to access |
|
90 // values more than 4096 off of the stack pointer. |
|
91 // instead, use lr, the LinkRegister. |
|
92 spilledReg_ = r14; |
|
93 if (pushedAtSpill_ == -1) { |
|
94 masm.Push(spilledReg_); |
|
95 pushedAtSpill_ = masm.framePushed(); |
|
96 } else { |
|
97 masm.ma_str(spilledReg_, spillSlot()); |
|
98 } |
|
99 return spilledReg_; |
|
100 } |
|
101 |
|
102 void |
|
103 MoveEmitterARM::breakCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type) |
|
104 { |
|
105 // There is some pattern: |
|
106 // (A -> B) |
|
107 // (B -> A) |
|
108 // |
|
109 // This case handles (A -> B), which we reach first. We save B, then allow |
|
110 // the original move to continue. |
|
111 switch (type) { |
|
112 case MoveOp::FLOAT32: |
|
113 case MoveOp::DOUBLE: |
|
114 if (to.isMemory()) { |
|
115 FloatRegister temp = ScratchFloatReg; |
|
116 masm.ma_vldr(toOperand(to, true), temp); |
|
117 masm.ma_vstr(temp, cycleSlot()); |
|
118 } else { |
|
119 masm.ma_vstr(to.floatReg(), cycleSlot()); |
|
120 } |
|
121 break; |
|
122 case MoveOp::INT32: |
|
123 case MoveOp::GENERAL: |
|
124 // an non-vfp value |
|
125 if (to.isMemory()) { |
|
126 Register temp = tempReg(); |
|
127 masm.ma_ldr(toOperand(to, false), temp); |
|
128 masm.ma_str(temp, cycleSlot()); |
|
129 } else { |
|
130 if (to.reg() == spilledReg_) { |
|
131 // If the destination was spilled, restore it first. |
|
132 masm.ma_ldr(spillSlot(), spilledReg_); |
|
133 spilledReg_ = InvalidReg; |
|
134 } |
|
135 masm.ma_str(to.reg(), cycleSlot()); |
|
136 } |
|
137 break; |
|
138 default: |
|
139 MOZ_ASSUME_UNREACHABLE("Unexpected move type"); |
|
140 } |
|
141 } |
|
142 |
|
143 void |
|
144 MoveEmitterARM::completeCycle(const MoveOperand &from, const MoveOperand &to, MoveOp::Type type) |
|
145 { |
|
146 // There is some pattern: |
|
147 // (A -> B) |
|
148 // (B -> A) |
|
149 // |
|
150 // This case handles (B -> A), which we reach last. We emit a move from the |
|
151 // saved value of B, to A. |
|
152 switch (type) { |
|
153 case MoveOp::FLOAT32: |
|
154 case MoveOp::DOUBLE: |
|
155 if (to.isMemory()) { |
|
156 FloatRegister temp = ScratchFloatReg; |
|
157 masm.ma_vldr(cycleSlot(), temp); |
|
158 masm.ma_vstr(temp, toOperand(to, true)); |
|
159 } else { |
|
160 masm.ma_vldr(cycleSlot(), to.floatReg()); |
|
161 } |
|
162 break; |
|
163 case MoveOp::INT32: |
|
164 case MoveOp::GENERAL: |
|
165 if (to.isMemory()) { |
|
166 Register temp = tempReg(); |
|
167 masm.ma_ldr(cycleSlot(), temp); |
|
168 masm.ma_str(temp, toOperand(to, false)); |
|
169 } else { |
|
170 if (to.reg() == spilledReg_) { |
|
171 // Make sure we don't re-clobber the spilled register later. |
|
172 spilledReg_ = InvalidReg; |
|
173 } |
|
174 masm.ma_ldr(cycleSlot(), to.reg()); |
|
175 } |
|
176 break; |
|
177 default: |
|
178 MOZ_ASSUME_UNREACHABLE("Unexpected move type"); |
|
179 } |
|
180 } |
|
181 |
|
182 void |
|
183 MoveEmitterARM::emitMove(const MoveOperand &from, const MoveOperand &to) |
|
184 { |
|
185 if (to.isGeneralReg() && to.reg() == spilledReg_) { |
|
186 // If the destination is the spilled register, make sure we |
|
187 // don't re-clobber its value. |
|
188 spilledReg_ = InvalidReg; |
|
189 } |
|
190 |
|
191 if (from.isGeneralReg()) { |
|
192 if (from.reg() == spilledReg_) { |
|
193 // If the source is a register that has been spilled, make sure |
|
194 // to load the source back into that register. |
|
195 masm.ma_ldr(spillSlot(), spilledReg_); |
|
196 spilledReg_ = InvalidReg; |
|
197 } |
|
198 switch (toOperand(to, false).getTag()) { |
|
199 case Operand::OP2: |
|
200 // secretly must be a register |
|
201 masm.ma_mov(from.reg(), to.reg()); |
|
202 break; |
|
203 case Operand::MEM: |
|
204 masm.ma_str(from.reg(), toOperand(to, false)); |
|
205 break; |
|
206 default: |
|
207 MOZ_ASSUME_UNREACHABLE("strange move!"); |
|
208 } |
|
209 } else if (to.isGeneralReg()) { |
|
210 JS_ASSERT(from.isMemoryOrEffectiveAddress()); |
|
211 if (from.isMemory()) |
|
212 masm.ma_ldr(toOperand(from, false), to.reg()); |
|
213 else |
|
214 masm.ma_add(from.base(), Imm32(from.disp()), to.reg()); |
|
215 } else { |
|
216 // Memory to memory gpr move. |
|
217 Register reg = tempReg(); |
|
218 |
|
219 JS_ASSERT(from.isMemoryOrEffectiveAddress()); |
|
220 if (from.isMemory()) |
|
221 masm.ma_ldr(toOperand(from, false), reg); |
|
222 else |
|
223 masm.ma_add(from.base(), Imm32(from.disp()), reg); |
|
224 JS_ASSERT(to.base() != reg); |
|
225 masm.ma_str(reg, toOperand(to, false)); |
|
226 } |
|
227 } |
|
228 |
|
229 void |
|
230 MoveEmitterARM::emitFloat32Move(const MoveOperand &from, const MoveOperand &to) |
|
231 { |
|
232 if (from.isFloatReg()) { |
|
233 if (to.isFloatReg()) |
|
234 masm.ma_vmov_f32(from.floatReg(), to.floatReg()); |
|
235 else |
|
236 masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), |
|
237 toOperand(to, true)); |
|
238 } else if (to.isFloatReg()) { |
|
239 masm.ma_vldr(toOperand(from, true), |
|
240 VFPRegister(to.floatReg()).singleOverlay()); |
|
241 } else { |
|
242 // Memory to memory move. |
|
243 JS_ASSERT(from.isMemory()); |
|
244 FloatRegister reg = ScratchFloatReg; |
|
245 masm.ma_vldr(toOperand(from, true), |
|
246 VFPRegister(reg).singleOverlay()); |
|
247 masm.ma_vstr(VFPRegister(reg).singleOverlay(), |
|
248 toOperand(to, true)); |
|
249 } |
|
250 } |
|
251 |
|
252 void |
|
253 MoveEmitterARM::emitDoubleMove(const MoveOperand &from, const MoveOperand &to) |
|
254 { |
|
255 if (from.isFloatReg()) { |
|
256 if (to.isFloatReg()) |
|
257 masm.ma_vmov(from.floatReg(), to.floatReg()); |
|
258 else |
|
259 masm.ma_vstr(from.floatReg(), toOperand(to, true)); |
|
260 } else if (to.isFloatReg()) { |
|
261 masm.ma_vldr(toOperand(from, true), to.floatReg()); |
|
262 } else { |
|
263 // Memory to memory move. |
|
264 JS_ASSERT(from.isMemory()); |
|
265 FloatRegister reg = ScratchFloatReg; |
|
266 masm.ma_vldr(toOperand(from, true), reg); |
|
267 masm.ma_vstr(reg, toOperand(to, true)); |
|
268 } |
|
269 } |
|
270 |
|
271 void |
|
272 MoveEmitterARM::emit(const MoveOp &move) |
|
273 { |
|
274 const MoveOperand &from = move.from(); |
|
275 const MoveOperand &to = move.to(); |
|
276 |
|
277 if (move.isCycleEnd()) { |
|
278 JS_ASSERT(inCycle_); |
|
279 completeCycle(from, to, move.type()); |
|
280 inCycle_ = false; |
|
281 return; |
|
282 } |
|
283 |
|
284 if (move.isCycleBegin()) { |
|
285 JS_ASSERT(!inCycle_); |
|
286 breakCycle(from, to, move.endCycleType()); |
|
287 inCycle_ = true; |
|
288 } |
|
289 |
|
290 switch (move.type()) { |
|
291 case MoveOp::FLOAT32: |
|
292 emitFloat32Move(from, to); |
|
293 break; |
|
294 case MoveOp::DOUBLE: |
|
295 emitDoubleMove(from, to); |
|
296 break; |
|
297 case MoveOp::INT32: |
|
298 case MoveOp::GENERAL: |
|
299 emitMove(from, to); |
|
300 break; |
|
301 default: |
|
302 MOZ_ASSUME_UNREACHABLE("Unexpected move type"); |
|
303 } |
|
304 } |
|
305 |
|
306 void |
|
307 MoveEmitterARM::assertDone() |
|
308 { |
|
309 JS_ASSERT(!inCycle_); |
|
310 } |
|
311 |
|
312 void |
|
313 MoveEmitterARM::finish() |
|
314 { |
|
315 assertDone(); |
|
316 |
|
317 if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg) |
|
318 masm.ma_ldr(spillSlot(), spilledReg_); |
|
319 masm.freeStack(masm.framePushed() - pushedAtStart_); |
|
320 } |