Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/arm/MacroAssembler-arm.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/Casting.h" |
michael@0 | 10 | #include "mozilla/DebugOnly.h" |
michael@0 | 11 | #include "mozilla/MathAlgorithms.h" |
michael@0 | 12 | |
michael@0 | 13 | #include "jit/arm/Simulator-arm.h" |
michael@0 | 14 | #include "jit/Bailouts.h" |
michael@0 | 15 | #include "jit/BaselineFrame.h" |
michael@0 | 16 | #include "jit/IonFrames.h" |
michael@0 | 17 | #include "jit/MoveEmitter.h" |
michael@0 | 18 | |
michael@0 | 19 | using namespace js; |
michael@0 | 20 | using namespace jit; |
michael@0 | 21 | |
michael@0 | 22 | using mozilla::Abs; |
michael@0 | 23 | using mozilla::BitwiseCast; |
michael@0 | 24 | |
michael@0 | 25 | bool |
michael@0 | 26 | isValueDTRDCandidate(ValueOperand &val) |
michael@0 | 27 | { |
michael@0 | 28 | // In order to be used for a DTRD memory function, the two target registers |
michael@0 | 29 | // need to be a) Adjacent, with the tag larger than the payload, and |
michael@0 | 30 | // b) Aligned to a multiple of two. |
michael@0 | 31 | if ((val.typeReg().code() != (val.payloadReg().code() + 1))) |
michael@0 | 32 | return false; |
michael@0 | 33 | if ((val.payloadReg().code() & 1) != 0) |
michael@0 | 34 | return false; |
michael@0 | 35 | return true; |
michael@0 | 36 | } |
michael@0 | 37 | |
michael@0 | 38 | void |
michael@0 | 39 | MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) |
michael@0 | 40 | { |
michael@0 | 41 | // Note that C++ bool is only 1 byte, so zero extend it to clear the |
michael@0 | 42 | // higher-order bits. |
michael@0 | 43 | ma_and(Imm32(0xff), source, dest); |
michael@0 | 44 | } |
michael@0 | 45 | |
michael@0 | 46 | void |
michael@0 | 47 | MacroAssemblerARM::convertInt32ToDouble(const Register &src, const FloatRegister &dest_) |
michael@0 | 48 | { |
michael@0 | 49 | // direct conversions aren't possible. |
michael@0 | 50 | VFPRegister dest = VFPRegister(dest_); |
michael@0 | 51 | as_vxfer(src, InvalidReg, dest.sintOverlay(), |
michael@0 | 52 | CoreToFloat); |
michael@0 | 53 | as_vcvt(dest, dest.sintOverlay()); |
michael@0 | 54 | } |
michael@0 | 55 | |
michael@0 | 56 | void |
michael@0 | 57 | MacroAssemblerARM::convertInt32ToDouble(const Address &src, FloatRegister dest) |
michael@0 | 58 | { |
michael@0 | 59 | ma_vldr(Operand(src), ScratchFloatReg); |
michael@0 | 60 | as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay()); |
michael@0 | 61 | } |
michael@0 | 62 | |
michael@0 | 63 | void |
michael@0 | 64 | MacroAssemblerARM::convertUInt32ToDouble(const Register &src, const FloatRegister &dest_) |
michael@0 | 65 | { |
michael@0 | 66 | // direct conversions aren't possible. |
michael@0 | 67 | VFPRegister dest = VFPRegister(dest_); |
michael@0 | 68 | as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); |
michael@0 | 69 | as_vcvt(dest, dest.uintOverlay()); |
michael@0 | 70 | } |
michael@0 | 71 | |
michael@0 | 72 | void |
michael@0 | 73 | MacroAssemblerARM::convertUInt32ToFloat32(const Register &src, const FloatRegister &dest_) |
michael@0 | 74 | { |
michael@0 | 75 | // direct conversions aren't possible. |
michael@0 | 76 | VFPRegister dest = VFPRegister(dest_); |
michael@0 | 77 | as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat); |
michael@0 | 78 | as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay()); |
michael@0 | 79 | } |
michael@0 | 80 | |
michael@0 | 81 | void MacroAssemblerARM::convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest, |
michael@0 | 82 | Condition c) |
michael@0 | 83 | { |
michael@0 | 84 | as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c); |
michael@0 | 85 | } |
michael@0 | 86 | |
michael@0 | 87 | // there are two options for implementing emitTruncateDouble. |
michael@0 | 88 | // 1) convert the floating point value to an integer, if it did not fit, |
michael@0 | 89 | // then it was clamped to INT_MIN/INT_MAX, and we can test it. |
michael@0 | 90 | // NOTE: if the value really was supposed to be INT_MAX / INT_MIN |
michael@0 | 91 | // then it will be wrong. |
michael@0 | 92 | // 2) convert the floating point value to an integer, if it did not fit, |
michael@0 | 93 | // then it set one or two bits in the fpcsr. Check those. |
michael@0 | 94 | void |
michael@0 | 95 | MacroAssemblerARM::branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail) |
michael@0 | 96 | { |
michael@0 | 97 | ma_vcvt_F64_I32(src, ScratchFloatReg); |
michael@0 | 98 | ma_vxfer(ScratchFloatReg, dest); |
michael@0 | 99 | ma_cmp(dest, Imm32(0x7fffffff)); |
michael@0 | 100 | ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
michael@0 | 101 | ma_b(fail, Assembler::Equal); |
michael@0 | 102 | } |
michael@0 | 103 | |
michael@0 | 104 | // Checks whether a double is representable as a 32-bit integer. If so, the |
michael@0 | 105 | // integer is written to the output register. Otherwise, a bailout is taken to |
michael@0 | 106 | // the given snapshot. This function overwrites the scratch float register. |
michael@0 | 107 | void |
michael@0 | 108 | MacroAssemblerARM::convertDoubleToInt32(const FloatRegister &src, const Register &dest, |
michael@0 | 109 | Label *fail, bool negativeZeroCheck) |
michael@0 | 110 | { |
michael@0 | 111 | // convert the floating point value to an integer, if it did not fit, |
michael@0 | 112 | // then when we convert it *back* to a float, it will have a |
michael@0 | 113 | // different value, which we can test. |
michael@0 | 114 | ma_vcvt_F64_I32(src, ScratchFloatReg); |
michael@0 | 115 | // move the value into the dest register. |
michael@0 | 116 | ma_vxfer(ScratchFloatReg, dest); |
michael@0 | 117 | ma_vcvt_I32_F64(ScratchFloatReg, ScratchFloatReg); |
michael@0 | 118 | ma_vcmp(src, ScratchFloatReg); |
michael@0 | 119 | as_vmrs(pc); |
michael@0 | 120 | ma_b(fail, Assembler::VFP_NotEqualOrUnordered); |
michael@0 | 121 | |
michael@0 | 122 | if (negativeZeroCheck) { |
michael@0 | 123 | ma_cmp(dest, Imm32(0)); |
michael@0 | 124 | // Test and bail for -0.0, when integer result is 0 |
michael@0 | 125 | // Move the top word of the double into the output reg, if it is non-zero, |
michael@0 | 126 | // then the original value was -0.0 |
michael@0 | 127 | as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1); |
michael@0 | 128 | ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); |
michael@0 | 129 | ma_b(fail, Assembler::Equal); |
michael@0 | 130 | } |
michael@0 | 131 | } |
michael@0 | 132 | |
michael@0 | 133 | // Checks whether a float32 is representable as a 32-bit integer. If so, the |
michael@0 | 134 | // integer is written to the output register. Otherwise, a bailout is taken to |
michael@0 | 135 | // the given snapshot. This function overwrites the scratch float register. |
michael@0 | 136 | void |
michael@0 | 137 | MacroAssemblerARM::convertFloat32ToInt32(const FloatRegister &src, const Register &dest, |
michael@0 | 138 | Label *fail, bool negativeZeroCheck) |
michael@0 | 139 | { |
michael@0 | 140 | // convert the floating point value to an integer, if it did not fit, |
michael@0 | 141 | // then when we convert it *back* to a float, it will have a |
michael@0 | 142 | // different value, which we can test. |
michael@0 | 143 | ma_vcvt_F32_I32(src, ScratchFloatReg); |
michael@0 | 144 | // move the value into the dest register. |
michael@0 | 145 | ma_vxfer(ScratchFloatReg, dest); |
michael@0 | 146 | ma_vcvt_I32_F32(ScratchFloatReg, ScratchFloatReg); |
michael@0 | 147 | ma_vcmp_f32(src, ScratchFloatReg); |
michael@0 | 148 | as_vmrs(pc); |
michael@0 | 149 | ma_b(fail, Assembler::VFP_NotEqualOrUnordered); |
michael@0 | 150 | |
michael@0 | 151 | if (negativeZeroCheck) { |
michael@0 | 152 | ma_cmp(dest, Imm32(0)); |
michael@0 | 153 | // Test and bail for -0.0, when integer result is 0 |
michael@0 | 154 | // Move the float into the output reg, and if it is non-zero then |
michael@0 | 155 | // the original value was -0.0 |
michael@0 | 156 | as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, Assembler::Equal, 0); |
michael@0 | 157 | ma_cmp(dest, Imm32(0x80000000), Assembler::Equal); |
michael@0 | 158 | ma_b(fail, Assembler::Equal); |
michael@0 | 159 | } |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | void |
michael@0 | 163 | MacroAssemblerARM::convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest) { |
michael@0 | 164 | as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay()); |
michael@0 | 165 | } |
michael@0 | 166 | |
michael@0 | 167 | void |
michael@0 | 168 | MacroAssemblerARM::branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail) { |
michael@0 | 169 | ma_vcvt_F32_I32(src, ScratchFloatReg); |
michael@0 | 170 | ma_vxfer(ScratchFloatReg, dest); |
michael@0 | 171 | ma_cmp(dest, Imm32(0x7fffffff)); |
michael@0 | 172 | ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); |
michael@0 | 173 | ma_b(fail, Assembler::Equal); |
michael@0 | 174 | } |
michael@0 | 175 | |
michael@0 | 176 | void |
michael@0 | 177 | MacroAssemblerARM::convertInt32ToFloat32(const Register &src, const FloatRegister &dest_) { |
michael@0 | 178 | // direct conversions aren't possible. |
michael@0 | 179 | VFPRegister dest = VFPRegister(dest_).singleOverlay(); |
michael@0 | 180 | as_vxfer(src, InvalidReg, dest.sintOverlay(), |
michael@0 | 181 | CoreToFloat); |
michael@0 | 182 | as_vcvt(dest, dest.sintOverlay()); |
michael@0 | 183 | } |
michael@0 | 184 | |
michael@0 | 185 | void |
michael@0 | 186 | MacroAssemblerARM::convertInt32ToFloat32(const Address &src, FloatRegister dest) { |
michael@0 | 187 | ma_vldr(Operand(src), ScratchFloatReg); |
michael@0 | 188 | as_vcvt(dest, VFPRegister(ScratchFloatReg).sintOverlay()); |
michael@0 | 189 | } |
michael@0 | 190 | |
michael@0 | 191 | void |
michael@0 | 192 | MacroAssemblerARM::addDouble(FloatRegister src, FloatRegister dest) |
michael@0 | 193 | { |
michael@0 | 194 | ma_vadd(dest, src, dest); |
michael@0 | 195 | } |
michael@0 | 196 | |
michael@0 | 197 | void |
michael@0 | 198 | MacroAssemblerARM::subDouble(FloatRegister src, FloatRegister dest) |
michael@0 | 199 | { |
michael@0 | 200 | ma_vsub(dest, src, dest); |
michael@0 | 201 | } |
michael@0 | 202 | |
michael@0 | 203 | void |
michael@0 | 204 | MacroAssemblerARM::mulDouble(FloatRegister src, FloatRegister dest) |
michael@0 | 205 | { |
michael@0 | 206 | ma_vmul(dest, src, dest); |
michael@0 | 207 | } |
michael@0 | 208 | |
michael@0 | 209 | void |
michael@0 | 210 | MacroAssemblerARM::divDouble(FloatRegister src, FloatRegister dest) |
michael@0 | 211 | { |
michael@0 | 212 | ma_vdiv(dest, src, dest); |
michael@0 | 213 | } |
michael@0 | 214 | |
michael@0 | 215 | void |
michael@0 | 216 | MacroAssemblerARM::negateDouble(FloatRegister reg) |
michael@0 | 217 | { |
michael@0 | 218 | ma_vneg(reg, reg); |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | void |
michael@0 | 222 | MacroAssemblerARM::inc64(AbsoluteAddress dest) |
michael@0 | 223 | { |
michael@0 | 224 | |
michael@0 | 225 | ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex); |
michael@0 | 226 | |
michael@0 | 227 | ma_mov(Imm32((int32_t)dest.addr), ScratchRegister); |
michael@0 | 228 | |
michael@0 | 229 | ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(0)), r0, r1); |
michael@0 | 230 | |
michael@0 | 231 | ma_add(Imm32(1), r0, SetCond); |
michael@0 | 232 | ma_adc(Imm32(0), r1, NoSetCond); |
michael@0 | 233 | |
michael@0 | 234 | ma_strd(r0, r1, EDtrAddr(ScratchRegister, EDtrOffImm(0))); |
michael@0 | 235 | |
michael@0 | 236 | ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex); |
michael@0 | 237 | |
michael@0 | 238 | } |
michael@0 | 239 | |
michael@0 | 240 | bool |
michael@0 | 241 | MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, |
michael@0 | 242 | SetCond_ sc, Condition c) |
michael@0 | 243 | { |
michael@0 | 244 | if ((sc == SetCond && ! condsAreSafe(op)) || !can_dbl(op)) |
michael@0 | 245 | return false; |
michael@0 | 246 | ALUOp interop = getDestVariant(op); |
michael@0 | 247 | Imm8::TwoImm8mData both = Imm8::encodeTwoImms(imm.value); |
michael@0 | 248 | if (both.fst.invalid) |
michael@0 | 249 | return false; |
michael@0 | 250 | // for the most part, there is no good reason to set the condition |
michael@0 | 251 | // codes for the first instruction. |
michael@0 | 252 | // we can do better things if the second instruction doesn't |
michael@0 | 253 | // have a dest, such as check for overflow by doing first operation |
michael@0 | 254 | // don't do second operation if first operation overflowed. |
michael@0 | 255 | // this preserves the overflow condition code. |
michael@0 | 256 | // unfortunately, it is horribly brittle. |
michael@0 | 257 | as_alu(ScratchRegister, src1, both.fst, interop, NoSetCond, c); |
michael@0 | 258 | as_alu(dest, ScratchRegister, both.snd, op, sc, c); |
michael@0 | 259 | return true; |
michael@0 | 260 | } |
michael@0 | 261 | |
michael@0 | 262 | |
michael@0 | 263 | void |
michael@0 | 264 | MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest, |
michael@0 | 265 | ALUOp op, |
michael@0 | 266 | SetCond_ sc, Condition c) |
michael@0 | 267 | { |
michael@0 | 268 | // As it turns out, if you ask for a compare-like instruction |
michael@0 | 269 | // you *probably* want it to set condition codes. |
michael@0 | 270 | if (dest == InvalidReg) |
michael@0 | 271 | JS_ASSERT(sc == SetCond); |
michael@0 | 272 | |
michael@0 | 273 | // The operator gives us the ability to determine how |
michael@0 | 274 | // this can be used. |
michael@0 | 275 | Imm8 imm8 = Imm8(imm.value); |
michael@0 | 276 | // ONE INSTRUCTION: |
michael@0 | 277 | // If we can encode it using an imm8m, then do so. |
michael@0 | 278 | if (!imm8.invalid) { |
michael@0 | 279 | as_alu(dest, src1, imm8, op, sc, c); |
michael@0 | 280 | return; |
michael@0 | 281 | } |
michael@0 | 282 | // ONE INSTRUCTION, NEGATED: |
michael@0 | 283 | Imm32 negImm = imm; |
michael@0 | 284 | Register negDest; |
michael@0 | 285 | ALUOp negOp = ALUNeg(op, dest, &negImm, &negDest); |
michael@0 | 286 | Imm8 negImm8 = Imm8(negImm.value); |
michael@0 | 287 | // add r1, r2, -15 can be replaced with |
michael@0 | 288 | // sub r1, r2, 15 |
michael@0 | 289 | // for bonus points, dest can be replaced (nearly always invalid => ScratchRegister) |
michael@0 | 290 | // This is useful if we wish to negate tst. tst has an invalid (aka not used) dest, |
michael@0 | 291 | // but its negation is bic *requires* a dest. We can accomodate, but it will need to clobber |
michael@0 | 292 | // *something*, and the scratch register isn't being used, so... |
michael@0 | 293 | if (negOp != op_invalid && !negImm8.invalid) { |
michael@0 | 294 | as_alu(negDest, src1, negImm8, negOp, sc, c); |
michael@0 | 295 | return; |
michael@0 | 296 | } |
michael@0 | 297 | |
michael@0 | 298 | if (hasMOVWT()) { |
michael@0 | 299 | // If the operation is a move-a-like then we can try to use movw to |
michael@0 | 300 | // move the bits into the destination. Otherwise, we'll need to |
michael@0 | 301 | // fall back on a multi-instruction format :( |
michael@0 | 302 | // movw/movt don't set condition codes, so don't hold your breath. |
michael@0 | 303 | if (sc == NoSetCond && (op == op_mov || op == op_mvn)) { |
michael@0 | 304 | // ARMv7 supports movw/movt. movw zero-extends |
michael@0 | 305 | // its 16 bit argument, so we can set the register |
michael@0 | 306 | // this way. |
michael@0 | 307 | // movt leaves the bottom 16 bits in tact, so |
michael@0 | 308 | // it is unsuitable to move a constant that |
michael@0 | 309 | if (op == op_mov && ((imm.value & ~ 0xffff) == 0)) { |
michael@0 | 310 | JS_ASSERT(src1 == InvalidReg); |
michael@0 | 311 | as_movw(dest, (uint16_t)imm.value, c); |
michael@0 | 312 | return; |
michael@0 | 313 | } |
michael@0 | 314 | |
michael@0 | 315 | // If they asked for a mvn rfoo, imm, where ~imm fits into 16 bits |
michael@0 | 316 | // then do it. |
michael@0 | 317 | if (op == op_mvn && (((~imm.value) & ~ 0xffff) == 0)) { |
michael@0 | 318 | JS_ASSERT(src1 == InvalidReg); |
michael@0 | 319 | as_movw(dest, (uint16_t)~imm.value, c); |
michael@0 | 320 | return; |
michael@0 | 321 | } |
michael@0 | 322 | |
michael@0 | 323 | // TODO: constant dedup may enable us to add dest, r0, 23 *if* |
michael@0 | 324 | // we are attempting to load a constant that looks similar to one |
michael@0 | 325 | // that already exists |
michael@0 | 326 | // If it can't be done with a single movw |
michael@0 | 327 | // then we *need* to use two instructions |
michael@0 | 328 | // since this must be some sort of a move operation, we can just use |
michael@0 | 329 | // a movw/movt pair and get the whole thing done in two moves. This |
michael@0 | 330 | // does not work for ops like add, sinc we'd need to do |
michael@0 | 331 | // movw tmp; movt tmp; add dest, tmp, src1 |
michael@0 | 332 | if (op == op_mvn) |
michael@0 | 333 | imm.value = ~imm.value; |
michael@0 | 334 | as_movw(dest, imm.value & 0xffff, c); |
michael@0 | 335 | as_movt(dest, (imm.value >> 16) & 0xffff, c); |
michael@0 | 336 | return; |
michael@0 | 337 | } |
michael@0 | 338 | // If we weren't doing a movalike, a 16 bit immediate |
michael@0 | 339 | // will require 2 instructions. With the same amount of |
michael@0 | 340 | // space and (less)time, we can do two 8 bit operations, reusing |
michael@0 | 341 | // the dest register. e.g. |
michael@0 | 342 | // movw tmp, 0xffff; add dest, src, tmp ror 4 |
michael@0 | 343 | // vs. |
michael@0 | 344 | // add dest, src, 0xff0; add dest, dest, 0xf000000f |
michael@0 | 345 | // it turns out that there are some immediates that we miss with the |
michael@0 | 346 | // second approach. A sample value is: add dest, src, 0x1fffe |
michael@0 | 347 | // this can be done by movw tmp, 0xffff; add dest, src, tmp lsl 1 |
michael@0 | 348 | // since imm8m's only get even offsets, we cannot encode this. |
michael@0 | 349 | // I'll try to encode as two imm8's first, since they are faster. |
michael@0 | 350 | // Both operations should take 1 cycle, where as add dest, tmp ror 4 |
michael@0 | 351 | // takes two cycles to execute. |
michael@0 | 352 | } |
michael@0 | 353 | |
michael@0 | 354 | // Either a) this isn't ARMv7 b) this isn't a move |
michael@0 | 355 | // start by attempting to generate a two instruction form. |
michael@0 | 356 | // Some things cannot be made into two-inst forms correctly. |
michael@0 | 357 | // namely, adds dest, src, 0xffff. |
michael@0 | 358 | // Since we want the condition codes (and don't know which ones will |
michael@0 | 359 | // be checked), we need to assume that the overflow flag will be checked |
michael@0 | 360 | // and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not |
michael@0 | 361 | // guaranteed to set the overflow flag the same as the (theoretical) |
michael@0 | 362 | // one instruction variant. |
michael@0 | 363 | if (alu_dbl(src1, imm, dest, op, sc, c)) |
michael@0 | 364 | return; |
michael@0 | 365 | |
michael@0 | 366 | // And try with its negative. |
michael@0 | 367 | if (negOp != op_invalid && |
michael@0 | 368 | alu_dbl(src1, negImm, negDest, negOp, sc, c)) |
michael@0 | 369 | return; |
michael@0 | 370 | |
michael@0 | 371 | // Well, damn. We can use two 16 bit mov's, then do the op |
michael@0 | 372 | // or we can do a single load from a pool then op. |
michael@0 | 373 | if (hasMOVWT()) { |
michael@0 | 374 | // Try to load the immediate into a scratch register |
michael@0 | 375 | // then use that |
michael@0 | 376 | as_movw(ScratchRegister, imm.value & 0xffff, c); |
michael@0 | 377 | if ((imm.value >> 16) != 0) |
michael@0 | 378 | as_movt(ScratchRegister, (imm.value >> 16) & 0xffff, c); |
michael@0 | 379 | } else { |
michael@0 | 380 | // Going to have to use a load. If the operation is a move, then just move it into the |
michael@0 | 381 | // destination register |
michael@0 | 382 | if (op == op_mov) { |
michael@0 | 383 | as_Imm32Pool(dest, imm.value, c); |
michael@0 | 384 | return; |
michael@0 | 385 | } else { |
michael@0 | 386 | // If this isn't just going into a register, then stick it in a temp, and then proceed. |
michael@0 | 387 | as_Imm32Pool(ScratchRegister, imm.value, c); |
michael@0 | 388 | } |
michael@0 | 389 | } |
michael@0 | 390 | as_alu(dest, src1, O2Reg(ScratchRegister), op, sc, c); |
michael@0 | 391 | } |
michael@0 | 392 | |
michael@0 | 393 | void |
michael@0 | 394 | MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest, ALUOp op, |
michael@0 | 395 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 396 | { |
michael@0 | 397 | JS_ASSERT(op2.getTag() == Operand::OP2); |
michael@0 | 398 | as_alu(dest, src1, op2.toOp2(), op, sc, c); |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | void |
michael@0 | 402 | MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op, SetCond_ sc, Condition c) |
michael@0 | 403 | { |
michael@0 | 404 | as_alu(dest, src1, op2, op, sc, c); |
michael@0 | 405 | } |
michael@0 | 406 | |
michael@0 | 407 | void |
michael@0 | 408 | MacroAssemblerARM::ma_nop() |
michael@0 | 409 | { |
michael@0 | 410 | as_nop(); |
michael@0 | 411 | } |
michael@0 | 412 | |
michael@0 | 413 | Instruction * |
michael@0 | 414 | NextInst(Instruction *i) |
michael@0 | 415 | { |
michael@0 | 416 | if (i == nullptr) |
michael@0 | 417 | return nullptr; |
michael@0 | 418 | return i->next(); |
michael@0 | 419 | } |
michael@0 | 420 | |
michael@0 | 421 | void |
michael@0 | 422 | MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest, Assembler::Condition c, |
michael@0 | 423 | RelocStyle rs, Instruction *i) |
michael@0 | 424 | { |
michael@0 | 425 | int32_t imm = imm_.value; |
michael@0 | 426 | if (i) { |
michael@0 | 427 | // Make sure the current instruction is not an artificial guard |
michael@0 | 428 | // inserted by the assembler buffer. |
michael@0 | 429 | // The InstructionIterator already does this and handles edge cases, |
michael@0 | 430 | // so, just asking an iterator for its current instruction should be |
michael@0 | 431 | // enough to make sure we don't accidentally inspect an artificial guard. |
michael@0 | 432 | i = InstructionIterator(i).cur(); |
michael@0 | 433 | } |
michael@0 | 434 | switch(rs) { |
michael@0 | 435 | case L_MOVWT: |
michael@0 | 436 | as_movw(dest, Imm16(imm & 0xffff), c, i); |
michael@0 | 437 | // i can be nullptr here. that just means "insert in the next in sequence." |
michael@0 | 438 | // NextInst is special cased to not do anything when it is passed nullptr, so |
michael@0 | 439 | // two consecutive instructions will be inserted. |
michael@0 | 440 | i = NextInst(i); |
michael@0 | 441 | as_movt(dest, Imm16(imm >> 16 & 0xffff), c, i); |
michael@0 | 442 | break; |
michael@0 | 443 | case L_LDR: |
michael@0 | 444 | if(i == nullptr) |
michael@0 | 445 | as_Imm32Pool(dest, imm, c); |
michael@0 | 446 | else |
michael@0 | 447 | as_WritePoolEntry(i, c, imm); |
michael@0 | 448 | break; |
michael@0 | 449 | } |
michael@0 | 450 | } |
michael@0 | 451 | |
michael@0 | 452 | void |
michael@0 | 453 | MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest, |
michael@0 | 454 | Assembler::Condition c, RelocStyle rs, Instruction *i) |
michael@0 | 455 | { |
michael@0 | 456 | return ma_movPatchable(Imm32(int32_t(imm.value)), dest, c, rs, i); |
michael@0 | 457 | } |
michael@0 | 458 | |
michael@0 | 459 | void |
michael@0 | 460 | MacroAssemblerARM::ma_mov(Register src, Register dest, |
michael@0 | 461 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 462 | { |
michael@0 | 463 | if (sc == SetCond || dest != src) |
michael@0 | 464 | as_mov(dest, O2Reg(src), sc, c); |
michael@0 | 465 | } |
michael@0 | 466 | |
michael@0 | 467 | void |
michael@0 | 468 | MacroAssemblerARM::ma_mov(Imm32 imm, Register dest, |
michael@0 | 469 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 470 | { |
michael@0 | 471 | ma_alu(InvalidReg, imm, dest, op_mov, sc, c); |
michael@0 | 472 | } |
michael@0 | 473 | |
michael@0 | 474 | void |
michael@0 | 475 | MacroAssemblerARM::ma_mov(ImmWord imm, Register dest, |
michael@0 | 476 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 477 | { |
michael@0 | 478 | ma_alu(InvalidReg, Imm32(imm.value), dest, op_mov, sc, c); |
michael@0 | 479 | } |
michael@0 | 480 | |
michael@0 | 481 | void |
michael@0 | 482 | MacroAssemblerARM::ma_mov(const ImmGCPtr &ptr, Register dest) |
michael@0 | 483 | { |
michael@0 | 484 | // As opposed to x86/x64 version, the data relocation has to be executed |
michael@0 | 485 | // before to recover the pointer, and not after. |
michael@0 | 486 | writeDataRelocation(ptr); |
michael@0 | 487 | RelocStyle rs; |
michael@0 | 488 | if (hasMOVWT()) |
michael@0 | 489 | rs = L_MOVWT; |
michael@0 | 490 | else |
michael@0 | 491 | rs = L_LDR; |
michael@0 | 492 | |
michael@0 | 493 | ma_movPatchable(Imm32(ptr.value), dest, Always, rs); |
michael@0 | 494 | } |
michael@0 | 495 | |
michael@0 | 496 | // Shifts (just a move with a shifting op2) |
michael@0 | 497 | void |
michael@0 | 498 | MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) |
michael@0 | 499 | { |
michael@0 | 500 | as_mov(dst, lsl(src, shift.value)); |
michael@0 | 501 | } |
michael@0 | 502 | void |
michael@0 | 503 | MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst) |
michael@0 | 504 | { |
michael@0 | 505 | as_mov(dst, lsr(src, shift.value)); |
michael@0 | 506 | } |
michael@0 | 507 | void |
michael@0 | 508 | MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst) |
michael@0 | 509 | { |
michael@0 | 510 | as_mov(dst, asr(src, shift.value)); |
michael@0 | 511 | } |
michael@0 | 512 | void |
michael@0 | 513 | MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst) |
michael@0 | 514 | { |
michael@0 | 515 | as_mov(dst, ror(src, shift.value)); |
michael@0 | 516 | } |
michael@0 | 517 | void |
michael@0 | 518 | MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) |
michael@0 | 519 | { |
michael@0 | 520 | as_mov(dst, rol(src, shift.value)); |
michael@0 | 521 | } |
michael@0 | 522 | // Shifts (just a move with a shifting op2) |
michael@0 | 523 | void |
michael@0 | 524 | MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) |
michael@0 | 525 | { |
michael@0 | 526 | as_mov(dst, lsl(src, shift)); |
michael@0 | 527 | } |
michael@0 | 528 | void |
michael@0 | 529 | MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst) |
michael@0 | 530 | { |
michael@0 | 531 | as_mov(dst, lsr(src, shift)); |
michael@0 | 532 | } |
michael@0 | 533 | void |
michael@0 | 534 | MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst) |
michael@0 | 535 | { |
michael@0 | 536 | as_mov(dst, asr(src, shift)); |
michael@0 | 537 | } |
michael@0 | 538 | void |
michael@0 | 539 | MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst) |
michael@0 | 540 | { |
michael@0 | 541 | as_mov(dst, ror(src, shift)); |
michael@0 | 542 | } |
michael@0 | 543 | void |
michael@0 | 544 | MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst) |
michael@0 | 545 | { |
michael@0 | 546 | ma_rsb(shift, Imm32(32), ScratchRegister); |
michael@0 | 547 | as_mov(dst, ror(src, ScratchRegister)); |
michael@0 | 548 | } |
michael@0 | 549 | |
michael@0 | 550 | // Move not (dest <- ~src) |
michael@0 | 551 | |
michael@0 | 552 | void |
michael@0 | 553 | MacroAssemblerARM::ma_mvn(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 554 | { |
michael@0 | 555 | ma_alu(InvalidReg, imm, dest, op_mvn, sc, c); |
michael@0 | 556 | } |
michael@0 | 557 | |
michael@0 | 558 | void |
michael@0 | 559 | MacroAssemblerARM::ma_mvn(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 560 | { |
michael@0 | 561 | as_alu(dest, InvalidReg, O2Reg(src1), op_mvn, sc, c); |
michael@0 | 562 | } |
michael@0 | 563 | |
michael@0 | 564 | // Negate (dest <- -src), src is a register, rather than a general op2. |
michael@0 | 565 | void |
michael@0 | 566 | MacroAssemblerARM::ma_neg(Register src1, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 567 | { |
michael@0 | 568 | as_rsb(dest, src1, Imm8(0), sc, c); |
michael@0 | 569 | } |
michael@0 | 570 | |
michael@0 | 571 | // And. |
michael@0 | 572 | void |
michael@0 | 573 | MacroAssemblerARM::ma_and(Register src, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 574 | { |
michael@0 | 575 | ma_and(dest, src, dest); |
michael@0 | 576 | } |
michael@0 | 577 | void |
michael@0 | 578 | MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest, |
michael@0 | 579 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 580 | { |
michael@0 | 581 | as_and(dest, src1, O2Reg(src2), sc, c); |
michael@0 | 582 | } |
michael@0 | 583 | void |
michael@0 | 584 | MacroAssemblerARM::ma_and(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 585 | { |
michael@0 | 586 | ma_alu(dest, imm, dest, op_and, sc, c); |
michael@0 | 587 | } |
michael@0 | 588 | void |
michael@0 | 589 | MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest, |
michael@0 | 590 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 591 | { |
michael@0 | 592 | ma_alu(src1, imm, dest, op_and, sc, c); |
michael@0 | 593 | } |
michael@0 | 594 | |
michael@0 | 595 | |
michael@0 | 596 | // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2). |
michael@0 | 597 | void |
michael@0 | 598 | MacroAssemblerARM::ma_bic(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 599 | { |
michael@0 | 600 | ma_alu(dest, imm, dest, op_bic, sc, c); |
michael@0 | 601 | } |
michael@0 | 602 | |
michael@0 | 603 | // Exclusive or. |
michael@0 | 604 | void |
michael@0 | 605 | MacroAssemblerARM::ma_eor(Register src, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 606 | { |
michael@0 | 607 | ma_eor(dest, src, dest, sc, c); |
michael@0 | 608 | } |
michael@0 | 609 | void |
michael@0 | 610 | MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest, |
michael@0 | 611 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 612 | { |
michael@0 | 613 | as_eor(dest, src1, O2Reg(src2), sc, c); |
michael@0 | 614 | } |
michael@0 | 615 | void |
michael@0 | 616 | MacroAssemblerARM::ma_eor(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 617 | { |
michael@0 | 618 | ma_alu(dest, imm, dest, op_eor, sc, c); |
michael@0 | 619 | } |
michael@0 | 620 | void |
michael@0 | 621 | MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest, |
michael@0 | 622 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 623 | { |
michael@0 | 624 | ma_alu(src1, imm, dest, op_eor, sc, c); |
michael@0 | 625 | } |
michael@0 | 626 | |
michael@0 | 627 | // Or. |
michael@0 | 628 | void |
michael@0 | 629 | MacroAssemblerARM::ma_orr(Register src, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 630 | { |
michael@0 | 631 | ma_orr(dest, src, dest, sc, c); |
michael@0 | 632 | } |
michael@0 | 633 | void |
michael@0 | 634 | MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest, |
michael@0 | 635 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 636 | { |
michael@0 | 637 | as_orr(dest, src1, O2Reg(src2), sc, c); |
michael@0 | 638 | } |
michael@0 | 639 | void |
michael@0 | 640 | MacroAssemblerARM::ma_orr(Imm32 imm, Register dest, SetCond_ sc, Assembler::Condition c) |
michael@0 | 641 | { |
michael@0 | 642 | ma_alu(dest, imm, dest, op_orr, sc, c); |
michael@0 | 643 | } |
michael@0 | 644 | void |
michael@0 | 645 | MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest, |
michael@0 | 646 | SetCond_ sc, Assembler::Condition c) |
michael@0 | 647 | { |
michael@0 | 648 | ma_alu(src1, imm, dest, op_orr, sc, c); |
michael@0 | 649 | } |
michael@0 | 650 | |
michael@0 | 651 | // Arithmetic-based ops. |
michael@0 | 652 | // Add with carry. |
michael@0 | 653 | void |
michael@0 | 654 | MacroAssemblerARM::ma_adc(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
michael@0 | 655 | { |
michael@0 | 656 | ma_alu(dest, imm, dest, op_adc, sc, c); |
michael@0 | 657 | } |
michael@0 | 658 | void |
michael@0 | 659 | MacroAssemblerARM::ma_adc(Register src, Register dest, SetCond_ sc, Condition c) |
michael@0 | 660 | { |
michael@0 | 661 | as_alu(dest, dest, O2Reg(src), op_adc, sc, c); |
michael@0 | 662 | } |
michael@0 | 663 | void |
michael@0 | 664 | MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 665 | { |
michael@0 | 666 | as_alu(dest, src1, O2Reg(src2), op_adc, sc, c); |
michael@0 | 667 | } |
michael@0 | 668 | |
michael@0 | 669 | // Add. |
michael@0 | 670 | void |
michael@0 | 671 | MacroAssemblerARM::ma_add(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
michael@0 | 672 | { |
michael@0 | 673 | ma_alu(dest, imm, dest, op_add, sc, c); |
michael@0 | 674 | } |
michael@0 | 675 | |
michael@0 | 676 | void |
michael@0 | 677 | MacroAssemblerARM::ma_add(Register src1, Register dest, SetCond_ sc, Condition c) |
michael@0 | 678 | { |
michael@0 | 679 | ma_alu(dest, O2Reg(src1), dest, op_add, sc, c); |
michael@0 | 680 | } |
michael@0 | 681 | void |
michael@0 | 682 | MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 683 | { |
michael@0 | 684 | as_alu(dest, src1, O2Reg(src2), op_add, sc, c); |
michael@0 | 685 | } |
michael@0 | 686 | void |
michael@0 | 687 | MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) |
michael@0 | 688 | { |
michael@0 | 689 | ma_alu(src1, op, dest, op_add, sc, c); |
michael@0 | 690 | } |
michael@0 | 691 | void |
michael@0 | 692 | MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) |
michael@0 | 693 | { |
michael@0 | 694 | ma_alu(src1, op, dest, op_add, sc, c); |
michael@0 | 695 | } |
michael@0 | 696 | |
michael@0 | 697 | // Subtract with carry. |
michael@0 | 698 | void |
michael@0 | 699 | MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
michael@0 | 700 | { |
michael@0 | 701 | ma_alu(dest, imm, dest, op_sbc, sc, c); |
michael@0 | 702 | } |
michael@0 | 703 | void |
michael@0 | 704 | MacroAssemblerARM::ma_sbc(Register src1, Register dest, SetCond_ sc, Condition c) |
michael@0 | 705 | { |
michael@0 | 706 | as_alu(dest, dest, O2Reg(src1), op_sbc, sc, c); |
michael@0 | 707 | } |
michael@0 | 708 | void |
michael@0 | 709 | MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 710 | { |
michael@0 | 711 | as_alu(dest, src1, O2Reg(src2), op_sbc, sc, c); |
michael@0 | 712 | } |
michael@0 | 713 | |
michael@0 | 714 | // Subtract. |
michael@0 | 715 | void |
michael@0 | 716 | MacroAssemblerARM::ma_sub(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
michael@0 | 717 | { |
michael@0 | 718 | ma_alu(dest, imm, dest, op_sub, sc, c); |
michael@0 | 719 | } |
michael@0 | 720 | void |
michael@0 | 721 | MacroAssemblerARM::ma_sub(Register src1, Register dest, SetCond_ sc, Condition c) |
michael@0 | 722 | { |
michael@0 | 723 | ma_alu(dest, Operand(src1), dest, op_sub, sc, c); |
michael@0 | 724 | } |
michael@0 | 725 | void |
michael@0 | 726 | MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 727 | { |
michael@0 | 728 | ma_alu(src1, Operand(src2), dest, op_sub, sc, c); |
michael@0 | 729 | } |
michael@0 | 730 | void |
michael@0 | 731 | MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SetCond_ sc, Condition c) |
michael@0 | 732 | { |
michael@0 | 733 | ma_alu(src1, op, dest, op_sub, sc, c); |
michael@0 | 734 | } |
michael@0 | 735 | void |
michael@0 | 736 | MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest, SetCond_ sc, Condition c) |
michael@0 | 737 | { |
michael@0 | 738 | ma_alu(src1, op, dest, op_sub, sc, c); |
michael@0 | 739 | } |
michael@0 | 740 | |
michael@0 | 741 | // Severse subtract. |
michael@0 | 742 | void |
michael@0 | 743 | MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
michael@0 | 744 | { |
michael@0 | 745 | ma_alu(dest, imm, dest, op_rsb, sc, c); |
michael@0 | 746 | } |
michael@0 | 747 | void |
michael@0 | 748 | MacroAssemblerARM::ma_rsb(Register src1, Register dest, SetCond_ sc, Condition c) |
michael@0 | 749 | { |
michael@0 | 750 | as_alu(dest, dest, O2Reg(src1), op_add, sc, c); |
michael@0 | 751 | } |
michael@0 | 752 | void |
michael@0 | 753 | MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 754 | { |
michael@0 | 755 | as_alu(dest, src1, O2Reg(src2), op_rsb, sc, c); |
michael@0 | 756 | } |
michael@0 | 757 | void |
michael@0 | 758 | MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 759 | { |
michael@0 | 760 | ma_alu(src1, op2, dest, op_rsb, sc, c); |
michael@0 | 761 | } |
michael@0 | 762 | |
michael@0 | 763 | // Reverse subtract with carry. |
michael@0 | 764 | void |
michael@0 | 765 | MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest, SetCond_ sc, Condition c) |
michael@0 | 766 | { |
michael@0 | 767 | ma_alu(dest, imm, dest, op_rsc, sc, c); |
michael@0 | 768 | } |
michael@0 | 769 | void |
michael@0 | 770 | MacroAssemblerARM::ma_rsc(Register src1, Register dest, SetCond_ sc, Condition c) |
michael@0 | 771 | { |
michael@0 | 772 | as_alu(dest, dest, O2Reg(src1), op_rsc, sc, c); |
michael@0 | 773 | } |
michael@0 | 774 | void |
michael@0 | 775 | MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest, SetCond_ sc, Condition c) |
michael@0 | 776 | { |
michael@0 | 777 | as_alu(dest, src1, O2Reg(src2), op_rsc, sc, c); |
michael@0 | 778 | } |
michael@0 | 779 | |
michael@0 | 780 | // Compares/tests. |
michael@0 | 781 | // Compare negative (sets condition codes as src1 + src2 would). |
michael@0 | 782 | void |
michael@0 | 783 | MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm, Condition c) |
michael@0 | 784 | { |
michael@0 | 785 | ma_alu(src1, imm, InvalidReg, op_cmn, SetCond, c); |
michael@0 | 786 | } |
michael@0 | 787 | void |
michael@0 | 788 | MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) |
michael@0 | 789 | { |
michael@0 | 790 | as_alu(InvalidReg, src2, O2Reg(src1), op_cmn, SetCond, c); |
michael@0 | 791 | } |
michael@0 | 792 | void |
michael@0 | 793 | MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) |
michael@0 | 794 | { |
michael@0 | 795 | MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
michael@0 | 796 | } |
michael@0 | 797 | |
michael@0 | 798 | // Compare (src - src2). |
michael@0 | 799 | void |
michael@0 | 800 | MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm, Condition c) |
michael@0 | 801 | { |
michael@0 | 802 | ma_alu(src1, imm, InvalidReg, op_cmp, SetCond, c); |
michael@0 | 803 | } |
michael@0 | 804 | |
michael@0 | 805 | void |
michael@0 | 806 | MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr, Condition c) |
michael@0 | 807 | { |
michael@0 | 808 | ma_cmp(src1, Imm32(ptr.value), c); |
michael@0 | 809 | } |
michael@0 | 810 | |
michael@0 | 811 | void |
michael@0 | 812 | MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr, Condition c) |
michael@0 | 813 | { |
michael@0 | 814 | ma_mov(ptr, ScratchRegister); |
michael@0 | 815 | ma_cmp(src1, ScratchRegister, c); |
michael@0 | 816 | } |
michael@0 | 817 | void |
michael@0 | 818 | MacroAssemblerARM::ma_cmp(Register src1, Operand op, Condition c) |
michael@0 | 819 | { |
michael@0 | 820 | switch (op.getTag()) { |
michael@0 | 821 | case Operand::OP2: |
michael@0 | 822 | as_cmp(src1, op.toOp2(), c); |
michael@0 | 823 | break; |
michael@0 | 824 | case Operand::MEM: |
michael@0 | 825 | ma_ldr(op, ScratchRegister); |
michael@0 | 826 | as_cmp(src1, O2Reg(ScratchRegister), c); |
michael@0 | 827 | break; |
michael@0 | 828 | default: |
michael@0 | 829 | MOZ_ASSUME_UNREACHABLE("trying to compare FP and integer registers"); |
michael@0 | 830 | } |
michael@0 | 831 | } |
michael@0 | 832 | void |
michael@0 | 833 | MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) |
michael@0 | 834 | { |
michael@0 | 835 | as_cmp(src1, O2Reg(src2), c); |
michael@0 | 836 | } |
michael@0 | 837 | |
michael@0 | 838 | // Test for equality, (src1^src2). |
michael@0 | 839 | void |
michael@0 | 840 | MacroAssemblerARM::ma_teq(Register src1, Imm32 imm, Condition c) |
michael@0 | 841 | { |
michael@0 | 842 | ma_alu(src1, imm, InvalidReg, op_teq, SetCond, c); |
michael@0 | 843 | } |
michael@0 | 844 | void |
michael@0 | 845 | MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) |
michael@0 | 846 | { |
michael@0 | 847 | as_tst(src1, O2Reg(src2), c); |
michael@0 | 848 | } |
michael@0 | 849 | void |
michael@0 | 850 | MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) |
michael@0 | 851 | { |
michael@0 | 852 | as_teq(src1, op.toOp2(), c); |
michael@0 | 853 | } |
michael@0 | 854 | |
michael@0 | 855 | |
michael@0 | 856 | // Test (src1 & src2). |
michael@0 | 857 | void |
michael@0 | 858 | MacroAssemblerARM::ma_tst(Register src1, Imm32 imm, Condition c) |
michael@0 | 859 | { |
michael@0 | 860 | ma_alu(src1, imm, InvalidReg, op_tst, SetCond, c); |
michael@0 | 861 | } |
michael@0 | 862 | void |
michael@0 | 863 | MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) |
michael@0 | 864 | { |
michael@0 | 865 | as_tst(src1, O2Reg(src2), c); |
michael@0 | 866 | } |
michael@0 | 867 | void |
michael@0 | 868 | MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c) |
michael@0 | 869 | { |
michael@0 | 870 | as_tst(src1, op.toOp2(), c); |
michael@0 | 871 | } |
michael@0 | 872 | |
michael@0 | 873 | void |
michael@0 | 874 | MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest) |
michael@0 | 875 | { |
michael@0 | 876 | as_mul(dest, src1, src2); |
michael@0 | 877 | } |
michael@0 | 878 | void |
michael@0 | 879 | MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest) |
michael@0 | 880 | { |
michael@0 | 881 | |
michael@0 | 882 | ma_mov(imm, ScratchRegister); |
michael@0 | 883 | as_mul( dest, src1, ScratchRegister); |
michael@0 | 884 | } |
michael@0 | 885 | |
michael@0 | 886 | Assembler::Condition |
michael@0 | 887 | MacroAssemblerARM::ma_check_mul(Register src1, Register src2, Register dest, Condition cond) |
michael@0 | 888 | { |
michael@0 | 889 | // TODO: this operation is illegal on armv6 and earlier if src2 == ScratchRegister |
michael@0 | 890 | // or src2 == dest. |
michael@0 | 891 | if (cond == Equal || cond == NotEqual) { |
michael@0 | 892 | as_smull(ScratchRegister, dest, src1, src2, SetCond); |
michael@0 | 893 | return cond; |
michael@0 | 894 | } |
michael@0 | 895 | |
michael@0 | 896 | if (cond == Overflow) { |
michael@0 | 897 | as_smull(ScratchRegister, dest, src1, src2); |
michael@0 | 898 | as_cmp(ScratchRegister, asr(dest, 31)); |
michael@0 | 899 | return NotEqual; |
michael@0 | 900 | } |
michael@0 | 901 | |
michael@0 | 902 | MOZ_ASSUME_UNREACHABLE("Condition NYI"); |
michael@0 | 903 | } |
michael@0 | 904 | |
michael@0 | 905 | Assembler::Condition |
michael@0 | 906 | MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond) |
michael@0 | 907 | { |
michael@0 | 908 | ma_mov(imm, ScratchRegister); |
michael@0 | 909 | if (cond == Equal || cond == NotEqual) { |
michael@0 | 910 | as_smull(ScratchRegister, dest, ScratchRegister, src1, SetCond); |
michael@0 | 911 | return cond; |
michael@0 | 912 | } |
michael@0 | 913 | |
michael@0 | 914 | if (cond == Overflow) { |
michael@0 | 915 | as_smull(ScratchRegister, dest, ScratchRegister, src1); |
michael@0 | 916 | as_cmp(ScratchRegister, asr(dest, 31)); |
michael@0 | 917 | return NotEqual; |
michael@0 | 918 | } |
michael@0 | 919 | |
michael@0 | 920 | MOZ_ASSUME_UNREACHABLE("Condition NYI"); |
michael@0 | 921 | } |
michael@0 | 922 | |
michael@0 | 923 | void |
michael@0 | 924 | MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold, Register tmp, |
michael@0 | 925 | int32_t shift) |
michael@0 | 926 | { |
michael@0 | 927 | // MATH: |
michael@0 | 928 | // We wish to compute x % (1<<y) - 1 for a known constant, y. |
michael@0 | 929 | // first, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as |
michael@0 | 930 | // a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n |
michael@0 | 931 | // now, since both addition and multiplication commute with modulus, |
michael@0 | 932 | // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C == |
michael@0 | 933 | // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)... |
michael@0 | 934 | // now, since b == C + 1, b % C == 1, and b^n % C == 1 |
michael@0 | 935 | // this means that the whole thing simplifies to: |
michael@0 | 936 | // c_0 + c_1 + c_2 ... c_n % C |
michael@0 | 937 | // each c_n can easily be computed by a shift/bitextract, and the modulus can be maintained |
michael@0 | 938 | // by simply subtracting by C whenever the number gets over C. |
michael@0 | 939 | int32_t mask = (1 << shift) - 1; |
michael@0 | 940 | Label head; |
michael@0 | 941 | |
michael@0 | 942 | // hold holds -1 if the value was negative, 1 otherwise. |
michael@0 | 943 | // ScratchRegister holds the remaining bits that have not been processed |
michael@0 | 944 | // lr serves as a temporary location to store extracted bits into as well |
michael@0 | 945 | // as holding the trial subtraction as a temp value |
michael@0 | 946 | // dest is the accumulator (and holds the final result) |
michael@0 | 947 | |
michael@0 | 948 | // move the whole value into tmp, setting the codition codes so we can |
michael@0 | 949 | // muck with them later. |
michael@0 | 950 | // |
michael@0 | 951 | // Note that we cannot use ScratchRegister in place of tmp here, as ma_and |
michael@0 | 952 | // below on certain architectures move the mask into ScratchRegister |
michael@0 | 953 | // before performing the bitwise and. |
michael@0 | 954 | as_mov(tmp, O2Reg(src), SetCond); |
michael@0 | 955 | // Zero out the dest. |
michael@0 | 956 | ma_mov(Imm32(0), dest); |
michael@0 | 957 | // Set the hold appropriately. |
michael@0 | 958 | ma_mov(Imm32(1), hold); |
michael@0 | 959 | ma_mov(Imm32(-1), hold, NoSetCond, Signed); |
michael@0 | 960 | ma_rsb(Imm32(0), tmp, SetCond, Signed); |
michael@0 | 961 | // Begin the main loop. |
michael@0 | 962 | bind(&head); |
michael@0 | 963 | |
michael@0 | 964 | // Extract the bottom bits into lr. |
michael@0 | 965 | ma_and(Imm32(mask), tmp, secondScratchReg_); |
michael@0 | 966 | // Add those bits to the accumulator. |
michael@0 | 967 | ma_add(secondScratchReg_, dest, dest); |
michael@0 | 968 | // Do a trial subtraction, this is the same operation as cmp, but we store the dest |
michael@0 | 969 | ma_sub(dest, Imm32(mask), secondScratchReg_, SetCond); |
michael@0 | 970 | // If (sum - C) > 0, store sum - C back into sum, thus performing a modulus. |
michael@0 | 971 | ma_mov(secondScratchReg_, dest, NoSetCond, NotSigned); |
michael@0 | 972 | // Get rid of the bits that we extracted before, and set the condition codes |
michael@0 | 973 | as_mov(tmp, lsr(tmp, shift), SetCond); |
michael@0 | 974 | // If the shift produced zero, finish, otherwise, continue in the loop. |
michael@0 | 975 | ma_b(&head, NonZero); |
michael@0 | 976 | // Check the hold to see if we need to negate the result. Hold can only be 1 or -1, |
michael@0 | 977 | // so this will never set the 0 flag. |
michael@0 | 978 | ma_cmp(hold, Imm32(0)); |
michael@0 | 979 | // If the hold was non-zero, negate the result to be in line with what JS wants |
michael@0 | 980 | // this will set the condition codes if we try to negate |
michael@0 | 981 | ma_rsb(Imm32(0), dest, SetCond, Signed); |
michael@0 | 982 | // Since the Zero flag is not set by the compare, we can *only* set the Zero flag |
michael@0 | 983 | // in the rsb, so Zero is set iff we negated zero (e.g. the result of the computation was -0.0). |
michael@0 | 984 | |
michael@0 | 985 | } |
michael@0 | 986 | |
michael@0 | 987 | void |
michael@0 | 988 | MacroAssemblerARM::ma_smod(Register num, Register div, Register dest) |
michael@0 | 989 | { |
michael@0 | 990 | as_sdiv(ScratchRegister, num, div); |
michael@0 | 991 | as_mls(dest, num, ScratchRegister, div); |
michael@0 | 992 | } |
michael@0 | 993 | |
michael@0 | 994 | void |
michael@0 | 995 | MacroAssemblerARM::ma_umod(Register num, Register div, Register dest) |
michael@0 | 996 | { |
michael@0 | 997 | as_udiv(ScratchRegister, num, div); |
michael@0 | 998 | as_mls(dest, num, ScratchRegister, div); |
michael@0 | 999 | } |
michael@0 | 1000 | |
michael@0 | 1001 | // division |
michael@0 | 1002 | void |
michael@0 | 1003 | MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest, Condition cond) |
michael@0 | 1004 | { |
michael@0 | 1005 | as_sdiv(dest, num, div, cond); |
michael@0 | 1006 | } |
michael@0 | 1007 | |
michael@0 | 1008 | void |
michael@0 | 1009 | MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest, Condition cond) |
michael@0 | 1010 | { |
michael@0 | 1011 | as_udiv(dest, num, div, cond); |
michael@0 | 1012 | } |
michael@0 | 1013 | |
michael@0 | 1014 | // Memory. |
michael@0 | 1015 | // Shortcut for when we know we're transferring 32 bits of data. |
michael@0 | 1016 | void |
michael@0 | 1017 | MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, |
michael@0 | 1018 | Index mode, Assembler::Condition cc) |
michael@0 | 1019 | { |
michael@0 | 1020 | ma_dataTransferN(ls, 32, true, rn, offset, rt, mode, cc); |
michael@0 | 1021 | } |
michael@0 | 1022 | |
michael@0 | 1023 | void |
michael@0 | 1024 | MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Register rm, Register rt, |
michael@0 | 1025 | Index mode, Assembler::Condition cc) |
michael@0 | 1026 | { |
michael@0 | 1027 | MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
michael@0 | 1028 | } |
michael@0 | 1029 | |
michael@0 | 1030 | void |
michael@0 | 1031 | MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode, Condition cc) |
michael@0 | 1032 | { |
michael@0 | 1033 | as_dtr(IsStore, 32, mode, rt, addr, cc); |
michael@0 | 1034 | } |
michael@0 | 1035 | |
michael@0 | 1036 | void |
michael@0 | 1037 | MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Operand &addr, Index mode, Condition cc) |
michael@0 | 1038 | { |
michael@0 | 1039 | ma_dataTransferN(ls, 32, true, |
michael@0 | 1040 | Register::FromCode(addr.base()), Imm32(addr.disp()), |
michael@0 | 1041 | rt, mode, cc); |
michael@0 | 1042 | } |
michael@0 | 1043 | |
michael@0 | 1044 | void |
michael@0 | 1045 | MacroAssemblerARM::ma_str(Register rt, const Operand &addr, Index mode, Condition cc) |
michael@0 | 1046 | { |
michael@0 | 1047 | ma_dtr(IsStore, rt, addr, mode, cc); |
michael@0 | 1048 | } |
michael@0 | 1049 | void |
michael@0 | 1050 | MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode, Condition cc) |
michael@0 | 1051 | { |
michael@0 | 1052 | JS_ASSERT((rt.code() & 1) == 0); |
michael@0 | 1053 | JS_ASSERT(rt2.value.code() == rt.code() + 1); |
michael@0 | 1054 | as_extdtr(IsStore, 64, true, mode, rt, addr, cc); |
michael@0 | 1055 | } |
michael@0 | 1056 | |
michael@0 | 1057 | void |
michael@0 | 1058 | MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode, Condition cc) |
michael@0 | 1059 | { |
michael@0 | 1060 | as_dtr(IsLoad, 32, mode, rt, addr, cc); |
michael@0 | 1061 | } |
michael@0 | 1062 | void |
michael@0 | 1063 | MacroAssemblerARM::ma_ldr(const Operand &addr, Register rt, Index mode, Condition cc) |
michael@0 | 1064 | { |
michael@0 | 1065 | ma_dtr(IsLoad, rt, addr, mode, cc); |
michael@0 | 1066 | } |
michael@0 | 1067 | |
michael@0 | 1068 | void |
michael@0 | 1069 | MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode, Condition cc) |
michael@0 | 1070 | { |
michael@0 | 1071 | as_dtr(IsLoad, 8, mode, rt, addr, cc); |
michael@0 | 1072 | } |
michael@0 | 1073 | |
michael@0 | 1074 | void |
michael@0 | 1075 | MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode, Condition cc) |
michael@0 | 1076 | { |
michael@0 | 1077 | as_extdtr(IsLoad, 16, true, mode, rt, addr, cc); |
michael@0 | 1078 | } |
michael@0 | 1079 | |
michael@0 | 1080 | void |
michael@0 | 1081 | MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode, Condition cc) |
michael@0 | 1082 | { |
michael@0 | 1083 | as_extdtr(IsLoad, 16, false, mode, rt, addr, cc); |
michael@0 | 1084 | } |
michael@0 | 1085 | void |
michael@0 | 1086 | MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode, Condition cc) |
michael@0 | 1087 | { |
michael@0 | 1088 | as_extdtr(IsLoad, 8, true, mode, rt, addr, cc); |
michael@0 | 1089 | } |
michael@0 | 1090 | void |
michael@0 | 1091 | MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, |
michael@0 | 1092 | Index mode, Condition cc) |
michael@0 | 1093 | { |
michael@0 | 1094 | JS_ASSERT((rt.code() & 1) == 0); |
michael@0 | 1095 | JS_ASSERT(rt2.value.code() == rt.code() + 1); |
michael@0 | 1096 | as_extdtr(IsLoad, 64, true, mode, rt, addr, cc); |
michael@0 | 1097 | } |
michael@0 | 1098 | void |
michael@0 | 1099 | MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode, Condition cc) |
michael@0 | 1100 | { |
michael@0 | 1101 | as_extdtr(IsStore, 16, false, mode, rt, addr, cc); |
michael@0 | 1102 | } |
michael@0 | 1103 | |
michael@0 | 1104 | void |
michael@0 | 1105 | MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc) |
michael@0 | 1106 | { |
michael@0 | 1107 | as_dtr(IsStore, 8, mode, rt, addr, cc); |
michael@0 | 1108 | } |
michael@0 | 1109 | |
michael@0 | 1110 | // Specialty for moving N bits of data, where n == 8,16,32,64. |
michael@0 | 1111 | BufferOffset |
michael@0 | 1112 | MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, |
michael@0 | 1113 | Register rn, Register rm, Register rt, |
michael@0 | 1114 | Index mode, Assembler::Condition cc, unsigned shiftAmount) |
michael@0 | 1115 | { |
michael@0 | 1116 | if (size == 32 || (size == 8 && !IsSigned)) { |
michael@0 | 1117 | return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc); |
michael@0 | 1118 | } else { |
michael@0 | 1119 | if (shiftAmount != 0) { |
michael@0 | 1120 | JS_ASSERT(rn != ScratchRegister); |
michael@0 | 1121 | JS_ASSERT(rt != ScratchRegister); |
michael@0 | 1122 | ma_lsl(Imm32(shiftAmount), rm, ScratchRegister); |
michael@0 | 1123 | rm = ScratchRegister; |
michael@0 | 1124 | } |
michael@0 | 1125 | return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)), cc); |
michael@0 | 1126 | } |
michael@0 | 1127 | } |
michael@0 | 1128 | |
michael@0 | 1129 | BufferOffset |
michael@0 | 1130 | MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned, |
michael@0 | 1131 | Register rn, Imm32 offset, Register rt, |
michael@0 | 1132 | Index mode, Assembler::Condition cc) |
michael@0 | 1133 | { |
michael@0 | 1134 | int off = offset.value; |
michael@0 | 1135 | // we can encode this as a standard ldr... MAKE IT SO |
michael@0 | 1136 | if (size == 32 || (size == 8 && !IsSigned) ) { |
michael@0 | 1137 | if (off < 4096 && off > -4096) { |
michael@0 | 1138 | // This encodes as a single instruction, Emulating mode's behavior |
michael@0 | 1139 | // in a multi-instruction sequence is not necessary. |
michael@0 | 1140 | return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc); |
michael@0 | 1141 | } |
michael@0 | 1142 | |
michael@0 | 1143 | // We cannot encode this offset in a a single ldr. For mode == index, |
michael@0 | 1144 | // try to encode it as |add scratch, base, imm; ldr dest, [scratch, +offset]|. |
michael@0 | 1145 | // This does not wark for mode == PreIndex or mode == PostIndex. |
michael@0 | 1146 | // PreIndex is simple, just do the add into the base register first, then do |
michael@0 | 1147 | // a PreIndex'ed load. PostIndexed loads can be tricky. Normally, doing the load with |
michael@0 | 1148 | // an index of 0, then doing an add would work, but if the destination is the PC, |
michael@0 | 1149 | // you don't get to execute the instruction after the branch, which will lead to |
michael@0 | 1150 | // the base register not being updated correctly. Explicitly handle this case, without |
michael@0 | 1151 | // doing anything fancy, then handle all of the other cases. |
michael@0 | 1152 | |
michael@0 | 1153 | // mode == Offset |
michael@0 | 1154 | // add scratch, base, offset_hi |
michael@0 | 1155 | // ldr dest, [scratch, +offset_lo] |
michael@0 | 1156 | // |
michael@0 | 1157 | // mode == PreIndex |
michael@0 | 1158 | // add base, base, offset_hi |
michael@0 | 1159 | // ldr dest, [base, +offset_lo]! |
michael@0 | 1160 | // |
michael@0 | 1161 | // mode == PostIndex, dest == pc |
michael@0 | 1162 | // ldr scratch, [base] |
michael@0 | 1163 | // add base, base, offset_hi |
michael@0 | 1164 | // add base, base, offset_lo |
michael@0 | 1165 | // mov dest, scratch |
michael@0 | 1166 | // PostIndex with the pc as the destination needs to be handled |
michael@0 | 1167 | // specially, since in the code below, the write into 'dest' |
michael@0 | 1168 | // is going to alter the control flow, so the following instruction would |
michael@0 | 1169 | // never get emitted. |
michael@0 | 1170 | // |
michael@0 | 1171 | // mode == PostIndex, dest != pc |
michael@0 | 1172 | // ldr dest, [base], offset_lo |
michael@0 | 1173 | // add base, base, offset_hi |
michael@0 | 1174 | |
michael@0 | 1175 | if (rt == pc && mode == PostIndex && ls == IsLoad) { |
michael@0 | 1176 | ma_mov(rn, ScratchRegister); |
michael@0 | 1177 | ma_alu(rn, offset, rn, op_add); |
michael@0 | 1178 | return as_dtr(IsLoad, size, Offset, pc, DTRAddr(ScratchRegister, DtrOffImm(0)), cc); |
michael@0 | 1179 | } |
michael@0 | 1180 | |
michael@0 | 1181 | int bottom = off & 0xfff; |
michael@0 | 1182 | int neg_bottom = 0x1000 - bottom; |
michael@0 | 1183 | // For a regular offset, base == ScratchRegister does what we want. Modify the |
michael@0 | 1184 | // scratch register, leaving the actual base unscathed. |
michael@0 | 1185 | Register base = ScratchRegister; |
michael@0 | 1186 | // For the preindex case, we want to just re-use rn as the base register, so when |
michael@0 | 1187 | // the base register is updated *before* the load, rn is updated. |
michael@0 | 1188 | if (mode == PreIndex) |
michael@0 | 1189 | base = rn; |
michael@0 | 1190 | JS_ASSERT(mode != PostIndex); |
michael@0 | 1191 | // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. |
michael@0 | 1192 | // |
michael@0 | 1193 | // Note a neg_bottom of 0x1000 can not be encoded as an immediate negative offset in the |
michael@0 | 1194 | // instruction and this occurs when bottom is zero, so this case is guarded against below. |
michael@0 | 1195 | if (off < 0) { |
michael@0 | 1196 | Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off |
michael@0 | 1197 | if (!sub_off.invalid) { |
michael@0 | 1198 | as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom |
michael@0 | 1199 | return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); |
michael@0 | 1200 | } |
michael@0 | 1201 | sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off |
michael@0 | 1202 | if (!sub_off.invalid && bottom != 0) { |
michael@0 | 1203 | JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 |
michael@0 | 1204 | as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off |
michael@0 | 1205 | return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); |
michael@0 | 1206 | } |
michael@0 | 1207 | } else { |
michael@0 | 1208 | Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom |
michael@0 | 1209 | if (!sub_off.invalid) { |
michael@0 | 1210 | as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom |
michael@0 | 1211 | return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(bottom)), cc); |
michael@0 | 1212 | } |
michael@0 | 1213 | sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off |
michael@0 | 1214 | if (!sub_off.invalid && bottom != 0) { |
michael@0 | 1215 | JS_ASSERT(neg_bottom < 0x1000); // Guarded against by: bottom != 0 |
michael@0 | 1216 | as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off |
michael@0 | 1217 | return as_dtr(ls, size, Offset, rt, DTRAddr(ScratchRegister, DtrOffImm(-neg_bottom)), cc); |
michael@0 | 1218 | } |
michael@0 | 1219 | } |
michael@0 | 1220 | ma_mov(offset, ScratchRegister); |
michael@0 | 1221 | return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(ScratchRegister, LSL, 0))); |
michael@0 | 1222 | } else { |
michael@0 | 1223 | // should attempt to use the extended load/store instructions |
michael@0 | 1224 | if (off < 256 && off > -256) |
michael@0 | 1225 | return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffImm(off)), cc); |
michael@0 | 1226 | |
michael@0 | 1227 | // We cannot encode this offset in a single extldr. Try to encode it as |
michael@0 | 1228 | // an add scratch, base, imm; extldr dest, [scratch, +offset]. |
michael@0 | 1229 | int bottom = off & 0xff; |
michael@0 | 1230 | int neg_bottom = 0x100 - bottom; |
michael@0 | 1231 | // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. |
michael@0 | 1232 | // |
michael@0 | 1233 | // Note a neg_bottom of 0x100 can not be encoded as an immediate negative offset in the |
michael@0 | 1234 | // instruction and this occurs when bottom is zero, so this case is guarded against below. |
michael@0 | 1235 | if (off < 0) { |
michael@0 | 1236 | Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off |
michael@0 | 1237 | if (!sub_off.invalid) { |
michael@0 | 1238 | as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = off - bottom |
michael@0 | 1239 | return as_extdtr(ls, size, IsSigned, Offset, rt, |
michael@0 | 1240 | EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), |
michael@0 | 1241 | cc); |
michael@0 | 1242 | } |
michael@0 | 1243 | sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off |
michael@0 | 1244 | if (!sub_off.invalid && bottom != 0) { |
michael@0 | 1245 | JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 |
michael@0 | 1246 | as_sub(ScratchRegister, rn, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off |
michael@0 | 1247 | return as_extdtr(ls, size, IsSigned, Offset, rt, |
michael@0 | 1248 | EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), |
michael@0 | 1249 | cc); |
michael@0 | 1250 | } |
michael@0 | 1251 | } else { |
michael@0 | 1252 | Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom |
michael@0 | 1253 | if (!sub_off.invalid) { |
michael@0 | 1254 | as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = off - bottom |
michael@0 | 1255 | return as_extdtr(ls, size, IsSigned, Offset, rt, |
michael@0 | 1256 | EDtrAddr(ScratchRegister, EDtrOffImm(bottom)), |
michael@0 | 1257 | cc); |
michael@0 | 1258 | } |
michael@0 | 1259 | sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off |
michael@0 | 1260 | if (!sub_off.invalid && bottom != 0) { |
michael@0 | 1261 | JS_ASSERT(neg_bottom < 0x100); // Guarded against by: bottom != 0 |
michael@0 | 1262 | as_add(ScratchRegister, rn, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off |
michael@0 | 1263 | return as_extdtr(ls, size, IsSigned, Offset, rt, |
michael@0 | 1264 | EDtrAddr(ScratchRegister, EDtrOffImm(-neg_bottom)), |
michael@0 | 1265 | cc); |
michael@0 | 1266 | } |
michael@0 | 1267 | } |
michael@0 | 1268 | ma_mov(offset, ScratchRegister); |
michael@0 | 1269 | return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(ScratchRegister)), cc); |
michael@0 | 1270 | } |
michael@0 | 1271 | } |
michael@0 | 1272 | |
michael@0 | 1273 | void |
michael@0 | 1274 | MacroAssemblerARM::ma_pop(Register r) |
michael@0 | 1275 | { |
michael@0 | 1276 | ma_dtr(IsLoad, sp, Imm32(4), r, PostIndex); |
michael@0 | 1277 | if (r == pc) |
michael@0 | 1278 | m_buffer.markGuard(); |
michael@0 | 1279 | } |
michael@0 | 1280 | void |
michael@0 | 1281 | MacroAssemblerARM::ma_push(Register r) |
michael@0 | 1282 | { |
michael@0 | 1283 | // Pushing sp is not well defined: use two instructions. |
michael@0 | 1284 | if (r == sp) { |
michael@0 | 1285 | ma_mov(sp, ScratchRegister); |
michael@0 | 1286 | r = ScratchRegister; |
michael@0 | 1287 | } |
michael@0 | 1288 | ma_dtr(IsStore, sp,Imm32(-4), r, PreIndex); |
michael@0 | 1289 | } |
michael@0 | 1290 | |
michael@0 | 1291 | void |
michael@0 | 1292 | MacroAssemblerARM::ma_vpop(VFPRegister r) |
michael@0 | 1293 | { |
michael@0 | 1294 | startFloatTransferM(IsLoad, sp, IA, WriteBack); |
michael@0 | 1295 | transferFloatReg(r); |
michael@0 | 1296 | finishFloatTransfer(); |
michael@0 | 1297 | } |
michael@0 | 1298 | void |
michael@0 | 1299 | MacroAssemblerARM::ma_vpush(VFPRegister r) |
michael@0 | 1300 | { |
michael@0 | 1301 | startFloatTransferM(IsStore, sp, DB, WriteBack); |
michael@0 | 1302 | transferFloatReg(r); |
michael@0 | 1303 | finishFloatTransfer(); |
michael@0 | 1304 | } |
michael@0 | 1305 | |
michael@0 | 1306 | // Branches when done from within arm-specific code. |
michael@0 | 1307 | BufferOffset |
michael@0 | 1308 | MacroAssemblerARM::ma_b(Label *dest, Assembler::Condition c, bool isPatchable) |
michael@0 | 1309 | { |
michael@0 | 1310 | return as_b(dest, c, isPatchable); |
michael@0 | 1311 | } |
michael@0 | 1312 | |
michael@0 | 1313 | void |
michael@0 | 1314 | MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c) |
michael@0 | 1315 | { |
michael@0 | 1316 | as_bx(dest, c); |
michael@0 | 1317 | } |
michael@0 | 1318 | |
michael@0 | 1319 | static Assembler::RelocBranchStyle |
michael@0 | 1320 | b_type() |
michael@0 | 1321 | { |
michael@0 | 1322 | return Assembler::B_LDR; |
michael@0 | 1323 | } |
michael@0 | 1324 | void |
michael@0 | 1325 | MacroAssemblerARM::ma_b(void *target, Relocation::Kind reloc, Assembler::Condition c) |
michael@0 | 1326 | { |
michael@0 | 1327 | // we know the absolute address of the target, but not our final |
michael@0 | 1328 | // location (with relocating GC, we *can't* know our final location) |
michael@0 | 1329 | // for now, I'm going to be conservative, and load this with an |
michael@0 | 1330 | // absolute address |
michael@0 | 1331 | uint32_t trg = (uint32_t)target; |
michael@0 | 1332 | switch (b_type()) { |
michael@0 | 1333 | case Assembler::B_MOVWT: |
michael@0 | 1334 | as_movw(ScratchRegister, Imm16(trg & 0xffff), c); |
michael@0 | 1335 | as_movt(ScratchRegister, Imm16(trg >> 16), c); |
michael@0 | 1336 | // this is going to get the branch predictor pissed off. |
michael@0 | 1337 | as_bx(ScratchRegister, c); |
michael@0 | 1338 | break; |
michael@0 | 1339 | case Assembler::B_LDR_BX: |
michael@0 | 1340 | as_Imm32Pool(ScratchRegister, trg, c); |
michael@0 | 1341 | as_bx(ScratchRegister, c); |
michael@0 | 1342 | break; |
michael@0 | 1343 | case Assembler::B_LDR: |
michael@0 | 1344 | as_Imm32Pool(pc, trg, c); |
michael@0 | 1345 | if (c == Always) |
michael@0 | 1346 | m_buffer.markGuard(); |
michael@0 | 1347 | break; |
michael@0 | 1348 | default: |
michael@0 | 1349 | MOZ_ASSUME_UNREACHABLE("Other methods of generating tracable jumps NYI"); |
michael@0 | 1350 | } |
michael@0 | 1351 | } |
michael@0 | 1352 | |
michael@0 | 1353 | // This is almost NEVER necessary: we'll basically never be calling a label, |
michael@0 | 1354 | // except possibly in the crazy bailout-table case. |
michael@0 | 1355 | void |
michael@0 | 1356 | MacroAssemblerARM::ma_bl(Label *dest, Assembler::Condition c) |
michael@0 | 1357 | { |
michael@0 | 1358 | as_bl(dest, c); |
michael@0 | 1359 | } |
michael@0 | 1360 | |
michael@0 | 1361 | void |
michael@0 | 1362 | MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c) |
michael@0 | 1363 | { |
michael@0 | 1364 | as_blx(reg, c); |
michael@0 | 1365 | } |
michael@0 | 1366 | |
michael@0 | 1367 | // VFP/ALU |
michael@0 | 1368 | void |
michael@0 | 1369 | MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1370 | { |
michael@0 | 1371 | as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
michael@0 | 1372 | } |
michael@0 | 1373 | |
michael@0 | 1374 | void |
michael@0 | 1375 | MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1376 | { |
michael@0 | 1377 | as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
michael@0 | 1378 | VFPRegister(src2).singleOverlay()); |
michael@0 | 1379 | } |
michael@0 | 1380 | |
michael@0 | 1381 | void |
michael@0 | 1382 | MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1383 | { |
michael@0 | 1384 | as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
michael@0 | 1385 | } |
michael@0 | 1386 | |
michael@0 | 1387 | void |
michael@0 | 1388 | MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1389 | { |
michael@0 | 1390 | as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
michael@0 | 1391 | VFPRegister(src2).singleOverlay()); |
michael@0 | 1392 | } |
michael@0 | 1393 | |
michael@0 | 1394 | void |
michael@0 | 1395 | MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1396 | { |
michael@0 | 1397 | as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
michael@0 | 1398 | } |
michael@0 | 1399 | |
michael@0 | 1400 | void |
michael@0 | 1401 | MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1402 | { |
michael@0 | 1403 | as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
michael@0 | 1404 | VFPRegister(src2).singleOverlay()); |
michael@0 | 1405 | } |
michael@0 | 1406 | |
michael@0 | 1407 | void |
michael@0 | 1408 | MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1409 | { |
michael@0 | 1410 | as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2)); |
michael@0 | 1411 | } |
michael@0 | 1412 | |
michael@0 | 1413 | void |
michael@0 | 1414 | MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst) |
michael@0 | 1415 | { |
michael@0 | 1416 | as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(), |
michael@0 | 1417 | VFPRegister(src2).singleOverlay()); |
michael@0 | 1418 | } |
michael@0 | 1419 | |
michael@0 | 1420 | void |
michael@0 | 1421 | MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1422 | { |
michael@0 | 1423 | as_vmov(dest, src, cc); |
michael@0 | 1424 | } |
michael@0 | 1425 | |
michael@0 | 1426 | void |
michael@0 | 1427 | MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1428 | { |
michael@0 | 1429 | as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
michael@0 | 1430 | } |
michael@0 | 1431 | |
michael@0 | 1432 | void |
michael@0 | 1433 | MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1434 | { |
michael@0 | 1435 | as_vneg(dest, src, cc); |
michael@0 | 1436 | } |
michael@0 | 1437 | |
michael@0 | 1438 | void |
michael@0 | 1439 | MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1440 | { |
michael@0 | 1441 | as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
michael@0 | 1442 | } |
michael@0 | 1443 | |
michael@0 | 1444 | void |
michael@0 | 1445 | MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1446 | { |
michael@0 | 1447 | as_vabs(dest, src, cc); |
michael@0 | 1448 | } |
michael@0 | 1449 | |
michael@0 | 1450 | void |
michael@0 | 1451 | MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1452 | { |
michael@0 | 1453 | as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
michael@0 | 1454 | } |
michael@0 | 1455 | |
michael@0 | 1456 | void |
michael@0 | 1457 | MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1458 | { |
michael@0 | 1459 | as_vsqrt(dest, src, cc); |
michael@0 | 1460 | } |
michael@0 | 1461 | |
michael@0 | 1462 | void |
michael@0 | 1463 | MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1464 | { |
michael@0 | 1465 | as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); |
michael@0 | 1466 | } |
michael@0 | 1467 | |
michael@0 | 1468 | static inline uint32_t |
michael@0 | 1469 | DoubleHighWord(const double value) |
michael@0 | 1470 | { |
michael@0 | 1471 | return static_cast<uint32_t>(BitwiseCast<uint64_t>(value) >> 32); |
michael@0 | 1472 | } |
michael@0 | 1473 | |
michael@0 | 1474 | static inline uint32_t |
michael@0 | 1475 | DoubleLowWord(const double value) |
michael@0 | 1476 | { |
michael@0 | 1477 | return BitwiseCast<uint64_t>(value) & uint32_t(0xffffffff); |
michael@0 | 1478 | } |
michael@0 | 1479 | |
michael@0 | 1480 | void |
michael@0 | 1481 | MacroAssemblerARM::ma_vimm(double value, FloatRegister dest, Condition cc) |
michael@0 | 1482 | { |
michael@0 | 1483 | if (hasVFPv3()) { |
michael@0 | 1484 | if (DoubleLowWord(value) == 0) { |
michael@0 | 1485 | if (DoubleHighWord(value) == 0) { |
michael@0 | 1486 | // To zero a register, load 1.0, then execute dN <- dN - dN |
michael@0 | 1487 | as_vimm(dest, VFPImm::one, cc); |
michael@0 | 1488 | as_vsub(dest, dest, dest, cc); |
michael@0 | 1489 | return; |
michael@0 | 1490 | } |
michael@0 | 1491 | |
michael@0 | 1492 | VFPImm enc(DoubleHighWord(value)); |
michael@0 | 1493 | if (enc.isValid()) { |
michael@0 | 1494 | as_vimm(dest, enc, cc); |
michael@0 | 1495 | return; |
michael@0 | 1496 | } |
michael@0 | 1497 | |
michael@0 | 1498 | } |
michael@0 | 1499 | } |
michael@0 | 1500 | // Fall back to putting the value in a pool. |
michael@0 | 1501 | as_FImm64Pool(dest, value, cc); |
michael@0 | 1502 | } |
michael@0 | 1503 | |
michael@0 | 1504 | static inline uint32_t |
michael@0 | 1505 | Float32Word(const float value) |
michael@0 | 1506 | { |
michael@0 | 1507 | return BitwiseCast<uint32_t>(value); |
michael@0 | 1508 | } |
michael@0 | 1509 | |
michael@0 | 1510 | void |
michael@0 | 1511 | MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest, Condition cc) |
michael@0 | 1512 | { |
michael@0 | 1513 | VFPRegister vd = VFPRegister(dest).singleOverlay(); |
michael@0 | 1514 | if (hasVFPv3()) { |
michael@0 | 1515 | if (Float32Word(value) == 0) { |
michael@0 | 1516 | // To zero a register, load 1.0, then execute sN <- sN - sN |
michael@0 | 1517 | as_vimm(vd, VFPImm::one, cc); |
michael@0 | 1518 | as_vsub(vd, vd, vd, cc); |
michael@0 | 1519 | return; |
michael@0 | 1520 | } |
michael@0 | 1521 | |
michael@0 | 1522 | // Note that the vimm immediate float32 instruction encoding differs from the |
michael@0 | 1523 | // vimm immediate double encoding, but this difference matches the difference |
michael@0 | 1524 | // in the floating point formats, so it is possible to convert the float32 to |
michael@0 | 1525 | // a double and then use the double encoding paths. It is still necessary to |
michael@0 | 1526 | // firstly check that the double low word is zero because some float32 |
michael@0 | 1527 | // numbers set these bits and this can not be ignored. |
michael@0 | 1528 | double doubleValue = value; |
michael@0 | 1529 | if (DoubleLowWord(value) == 0) { |
michael@0 | 1530 | VFPImm enc(DoubleHighWord(doubleValue)); |
michael@0 | 1531 | if (enc.isValid()) { |
michael@0 | 1532 | as_vimm(vd, enc, cc); |
michael@0 | 1533 | return; |
michael@0 | 1534 | } |
michael@0 | 1535 | } |
michael@0 | 1536 | } |
michael@0 | 1537 | // Fall back to putting the value in a pool. |
michael@0 | 1538 | as_FImm32Pool(vd, value, cc); |
michael@0 | 1539 | } |
michael@0 | 1540 | |
michael@0 | 1541 | void |
michael@0 | 1542 | MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc) |
michael@0 | 1543 | { |
michael@0 | 1544 | as_vcmp(VFPRegister(src1), VFPRegister(src2), cc); |
michael@0 | 1545 | } |
michael@0 | 1546 | void |
michael@0 | 1547 | MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2, Condition cc) |
michael@0 | 1548 | { |
michael@0 | 1549 | as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(), cc); |
michael@0 | 1550 | } |
michael@0 | 1551 | void |
michael@0 | 1552 | MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc) |
michael@0 | 1553 | { |
michael@0 | 1554 | as_vcmpz(VFPRegister(src1), cc); |
michael@0 | 1555 | } |
michael@0 | 1556 | void |
michael@0 | 1557 | MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc) |
michael@0 | 1558 | { |
michael@0 | 1559 | as_vcmpz(VFPRegister(src1).singleOverlay(), cc); |
michael@0 | 1560 | } |
michael@0 | 1561 | |
michael@0 | 1562 | void |
michael@0 | 1563 | MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1564 | { |
michael@0 | 1565 | as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src), false, cc); |
michael@0 | 1566 | } |
michael@0 | 1567 | void |
michael@0 | 1568 | MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1569 | { |
michael@0 | 1570 | as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src), false, cc); |
michael@0 | 1571 | } |
michael@0 | 1572 | void |
michael@0 | 1573 | MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister dest, FloatRegister src, Condition cc) |
michael@0 | 1574 | { |
michael@0 | 1575 | as_vcvt(VFPRegister(dest), VFPRegister(src).sintOverlay(), false, cc); |
michael@0 | 1576 | } |
michael@0 | 1577 | void |
michael@0 | 1578 | MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister dest, FloatRegister src, Condition cc) |
michael@0 | 1579 | { |
michael@0 | 1580 | as_vcvt(VFPRegister(dest), VFPRegister(src).uintOverlay(), false, cc); |
michael@0 | 1581 | } |
michael@0 | 1582 | |
michael@0 | 1583 | void |
michael@0 | 1584 | MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1585 | { |
michael@0 | 1586 | as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(), false, cc); |
michael@0 | 1587 | } |
michael@0 | 1588 | void |
michael@0 | 1589 | MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest, Condition cc) |
michael@0 | 1590 | { |
michael@0 | 1591 | as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(), false, cc); |
michael@0 | 1592 | } |
michael@0 | 1593 | void |
michael@0 | 1594 | MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister dest, FloatRegister src, Condition cc) |
michael@0 | 1595 | { |
michael@0 | 1596 | as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(), false, cc); |
michael@0 | 1597 | } |
michael@0 | 1598 | void |
michael@0 | 1599 | MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister dest, FloatRegister src, Condition cc) |
michael@0 | 1600 | { |
michael@0 | 1601 | as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(), false, cc); |
michael@0 | 1602 | } |
michael@0 | 1603 | |
michael@0 | 1604 | void |
michael@0 | 1605 | MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest, Condition cc) |
michael@0 | 1606 | { |
michael@0 | 1607 | as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc); |
michael@0 | 1608 | } |
michael@0 | 1609 | |
michael@0 | 1610 | void |
michael@0 | 1611 | MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1, Register dest2, Condition cc) |
michael@0 | 1612 | { |
michael@0 | 1613 | as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc); |
michael@0 | 1614 | } |
michael@0 | 1615 | |
michael@0 | 1616 | void |
michael@0 | 1617 | MacroAssemblerARM::ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc) |
michael@0 | 1618 | { |
michael@0 | 1619 | as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc); |
michael@0 | 1620 | } |
michael@0 | 1621 | |
michael@0 | 1622 | void |
michael@0 | 1623 | MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest, Condition cc) |
michael@0 | 1624 | { |
michael@0 | 1625 | as_vxfer(dest, InvalidReg, src, FloatToCore, cc); |
michael@0 | 1626 | } |
michael@0 | 1627 | |
michael@0 | 1628 | void |
michael@0 | 1629 | MacroAssemblerARM::ma_vxfer(VFPRegister src, Register dest1, Register dest2, Condition cc) |
michael@0 | 1630 | { |
michael@0 | 1631 | as_vxfer(dest1, dest2, src, FloatToCore, cc); |
michael@0 | 1632 | } |
michael@0 | 1633 | |
michael@0 | 1634 | BufferOffset |
michael@0 | 1635 | MacroAssemblerARM::ma_vdtr(LoadStore ls, const Operand &addr, VFPRegister rt, Condition cc) |
michael@0 | 1636 | { |
michael@0 | 1637 | int off = addr.disp(); |
michael@0 | 1638 | JS_ASSERT((off & 3) == 0); |
michael@0 | 1639 | Register base = Register::FromCode(addr.base()); |
michael@0 | 1640 | if (off > -1024 && off < 1024) |
michael@0 | 1641 | return as_vdtr(ls, rt, addr.toVFPAddr(), cc); |
michael@0 | 1642 | |
michael@0 | 1643 | // We cannot encode this offset in a a single ldr. Try to encode it as |
michael@0 | 1644 | // an add scratch, base, imm; ldr dest, [scratch, +offset]. |
michael@0 | 1645 | int bottom = off & (0xff << 2); |
michael@0 | 1646 | int neg_bottom = (0x100 << 2) - bottom; |
michael@0 | 1647 | // At this point, both off - bottom and off + neg_bottom will be reasonable-ish quantities. |
michael@0 | 1648 | // |
michael@0 | 1649 | // Note a neg_bottom of 0x400 can not be encoded as an immediate negative offset in the |
michael@0 | 1650 | // instruction and this occurs when bottom is zero, so this case is guarded against below. |
michael@0 | 1651 | if (off < 0) { |
michael@0 | 1652 | Operand2 sub_off = Imm8(-(off-bottom)); // sub_off = bottom - off |
michael@0 | 1653 | if (!sub_off.invalid) { |
michael@0 | 1654 | as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = off - bottom |
michael@0 | 1655 | return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); |
michael@0 | 1656 | } |
michael@0 | 1657 | sub_off = Imm8(-(off+neg_bottom));// sub_off = -neg_bottom - off |
michael@0 | 1658 | if (!sub_off.invalid && bottom != 0) { |
michael@0 | 1659 | JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 |
michael@0 | 1660 | as_sub(ScratchRegister, base, sub_off, NoSetCond, cc); // - sub_off = neg_bottom + off |
michael@0 | 1661 | return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); |
michael@0 | 1662 | } |
michael@0 | 1663 | } else { |
michael@0 | 1664 | Operand2 sub_off = Imm8(off-bottom); // sub_off = off - bottom |
michael@0 | 1665 | if (!sub_off.invalid) { |
michael@0 | 1666 | as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = off - bottom |
michael@0 | 1667 | return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(bottom)), cc); |
michael@0 | 1668 | } |
michael@0 | 1669 | sub_off = Imm8(off+neg_bottom);// sub_off = neg_bottom + off |
michael@0 | 1670 | if (!sub_off.invalid && bottom != 0) { |
michael@0 | 1671 | JS_ASSERT(neg_bottom < 0x400); // Guarded against by: bottom != 0 |
michael@0 | 1672 | as_add(ScratchRegister, base, sub_off, NoSetCond, cc); // sub_off = neg_bottom + off |
michael@0 | 1673 | return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(-neg_bottom)), cc); |
michael@0 | 1674 | } |
michael@0 | 1675 | } |
michael@0 | 1676 | ma_add(base, Imm32(off), ScratchRegister, NoSetCond, cc); |
michael@0 | 1677 | return as_vdtr(ls, rt, VFPAddr(ScratchRegister, VFPOffImm(0)), cc); |
michael@0 | 1678 | } |
michael@0 | 1679 | |
michael@0 | 1680 | BufferOffset |
michael@0 | 1681 | MacroAssemblerARM::ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc) |
michael@0 | 1682 | { |
michael@0 | 1683 | return as_vdtr(IsLoad, dest, addr, cc); |
michael@0 | 1684 | } |
michael@0 | 1685 | BufferOffset |
michael@0 | 1686 | MacroAssemblerARM::ma_vldr(const Operand &addr, VFPRegister dest, Condition cc) |
michael@0 | 1687 | { |
michael@0 | 1688 | return ma_vdtr(IsLoad, addr, dest, cc); |
michael@0 | 1689 | } |
michael@0 | 1690 | BufferOffset |
michael@0 | 1691 | MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) |
michael@0 | 1692 | { |
michael@0 | 1693 | as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc); |
michael@0 | 1694 | return ma_vldr(Operand(ScratchRegister, 0), src, cc); |
michael@0 | 1695 | } |
michael@0 | 1696 | |
michael@0 | 1697 | BufferOffset |
michael@0 | 1698 | MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc) |
michael@0 | 1699 | { |
michael@0 | 1700 | return as_vdtr(IsStore, src, addr, cc); |
michael@0 | 1701 | } |
michael@0 | 1702 | |
michael@0 | 1703 | BufferOffset |
michael@0 | 1704 | MacroAssemblerARM::ma_vstr(VFPRegister src, const Operand &addr, Condition cc) |
michael@0 | 1705 | { |
michael@0 | 1706 | return ma_vdtr(IsStore, addr, src, cc); |
michael@0 | 1707 | } |
michael@0 | 1708 | BufferOffset |
michael@0 | 1709 | MacroAssemblerARM::ma_vstr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc) |
michael@0 | 1710 | { |
michael@0 | 1711 | as_add(ScratchRegister, base, lsl(index, shift), NoSetCond, cc); |
michael@0 | 1712 | return ma_vstr(src, Operand(ScratchRegister, 0), cc); |
michael@0 | 1713 | } |
michael@0 | 1714 | |
michael@0 | 1715 | bool |
michael@0 | 1716 | MacroAssemblerARMCompat::buildFakeExitFrame(const Register &scratch, uint32_t *offset) |
michael@0 | 1717 | { |
michael@0 | 1718 | DebugOnly<uint32_t> initialDepth = framePushed(); |
michael@0 | 1719 | uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); |
michael@0 | 1720 | |
michael@0 | 1721 | Push(Imm32(descriptor)); // descriptor_ |
michael@0 | 1722 | |
michael@0 | 1723 | enterNoPool(); |
michael@0 | 1724 | DebugOnly<uint32_t> offsetBeforePush = currentOffset(); |
michael@0 | 1725 | Push(pc); // actually pushes $pc + 8. |
michael@0 | 1726 | |
michael@0 | 1727 | // Consume an additional 4 bytes. The start of the next instruction will |
michael@0 | 1728 | // then be 8 bytes after the instruction for Push(pc); this offset can |
michael@0 | 1729 | // therefore be fed to the safepoint. |
michael@0 | 1730 | ma_nop(); |
michael@0 | 1731 | uint32_t pseudoReturnOffset = currentOffset(); |
michael@0 | 1732 | leaveNoPool(); |
michael@0 | 1733 | |
michael@0 | 1734 | JS_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size()); |
michael@0 | 1735 | JS_ASSERT(pseudoReturnOffset - offsetBeforePush == 8); |
michael@0 | 1736 | |
michael@0 | 1737 | *offset = pseudoReturnOffset; |
michael@0 | 1738 | return true; |
michael@0 | 1739 | } |
michael@0 | 1740 | |
michael@0 | 1741 | bool |
michael@0 | 1742 | MacroAssemblerARMCompat::buildOOLFakeExitFrame(void *fakeReturnAddr) |
michael@0 | 1743 | { |
michael@0 | 1744 | DebugOnly<uint32_t> initialDepth = framePushed(); |
michael@0 | 1745 | uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); |
michael@0 | 1746 | |
michael@0 | 1747 | Push(Imm32(descriptor)); // descriptor_ |
michael@0 | 1748 | Push(ImmPtr(fakeReturnAddr)); |
michael@0 | 1749 | |
michael@0 | 1750 | return true; |
michael@0 | 1751 | } |
michael@0 | 1752 | |
michael@0 | 1753 | void |
michael@0 | 1754 | MacroAssemblerARMCompat::callWithExitFrame(JitCode *target) |
michael@0 | 1755 | { |
michael@0 | 1756 | uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS); |
michael@0 | 1757 | Push(Imm32(descriptor)); // descriptor |
michael@0 | 1758 | |
michael@0 | 1759 | addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 1760 | RelocStyle rs; |
michael@0 | 1761 | if (hasMOVWT()) |
michael@0 | 1762 | rs = L_MOVWT; |
michael@0 | 1763 | else |
michael@0 | 1764 | rs = L_LDR; |
michael@0 | 1765 | |
michael@0 | 1766 | ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs); |
michael@0 | 1767 | ma_callIonHalfPush(ScratchRegister); |
michael@0 | 1768 | } |
michael@0 | 1769 | |
michael@0 | 1770 | void |
michael@0 | 1771 | MacroAssemblerARMCompat::callWithExitFrame(JitCode *target, Register dynStack) |
michael@0 | 1772 | { |
michael@0 | 1773 | ma_add(Imm32(framePushed()), dynStack); |
michael@0 | 1774 | makeFrameDescriptor(dynStack, JitFrame_IonJS); |
michael@0 | 1775 | Push(dynStack); // descriptor |
michael@0 | 1776 | |
michael@0 | 1777 | addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 1778 | RelocStyle rs; |
michael@0 | 1779 | if (hasMOVWT()) |
michael@0 | 1780 | rs = L_MOVWT; |
michael@0 | 1781 | else |
michael@0 | 1782 | rs = L_LDR; |
michael@0 | 1783 | |
michael@0 | 1784 | ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, rs); |
michael@0 | 1785 | ma_callIonHalfPush(ScratchRegister); |
michael@0 | 1786 | } |
michael@0 | 1787 | |
michael@0 | 1788 | void |
michael@0 | 1789 | MacroAssemblerARMCompat::callIon(const Register &callee) |
michael@0 | 1790 | { |
michael@0 | 1791 | JS_ASSERT((framePushed() & 3) == 0); |
michael@0 | 1792 | if ((framePushed() & 7) == 4) { |
michael@0 | 1793 | ma_callIonHalfPush(callee); |
michael@0 | 1794 | } else { |
michael@0 | 1795 | adjustFrame(sizeof(void*)); |
michael@0 | 1796 | ma_callIon(callee); |
michael@0 | 1797 | } |
michael@0 | 1798 | } |
michael@0 | 1799 | |
michael@0 | 1800 | void |
michael@0 | 1801 | MacroAssemblerARMCompat::reserveStack(uint32_t amount) |
michael@0 | 1802 | { |
michael@0 | 1803 | if (amount) |
michael@0 | 1804 | ma_sub(Imm32(amount), sp); |
michael@0 | 1805 | adjustFrame(amount); |
michael@0 | 1806 | } |
michael@0 | 1807 | void |
michael@0 | 1808 | MacroAssemblerARMCompat::freeStack(uint32_t amount) |
michael@0 | 1809 | { |
michael@0 | 1810 | JS_ASSERT(amount <= framePushed_); |
michael@0 | 1811 | if (amount) |
michael@0 | 1812 | ma_add(Imm32(amount), sp); |
michael@0 | 1813 | adjustFrame(-amount); |
michael@0 | 1814 | } |
michael@0 | 1815 | void |
michael@0 | 1816 | MacroAssemblerARMCompat::freeStack(Register amount) |
michael@0 | 1817 | { |
michael@0 | 1818 | ma_add(amount, sp); |
michael@0 | 1819 | } |
michael@0 | 1820 | |
michael@0 | 1821 | void |
michael@0 | 1822 | MacroAssembler::PushRegsInMask(RegisterSet set) |
michael@0 | 1823 | { |
michael@0 | 1824 | int32_t diffF = set.fpus().size() * sizeof(double); |
michael@0 | 1825 | int32_t diffG = set.gprs().size() * sizeof(intptr_t); |
michael@0 | 1826 | |
michael@0 | 1827 | if (set.gprs().size() > 1) { |
michael@0 | 1828 | adjustFrame(diffG); |
michael@0 | 1829 | startDataTransferM(IsStore, StackPointer, DB, WriteBack); |
michael@0 | 1830 | for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
michael@0 | 1831 | diffG -= sizeof(intptr_t); |
michael@0 | 1832 | transferReg(*iter); |
michael@0 | 1833 | } |
michael@0 | 1834 | finishDataTransfer(); |
michael@0 | 1835 | } else { |
michael@0 | 1836 | reserveStack(diffG); |
michael@0 | 1837 | for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
michael@0 | 1838 | diffG -= sizeof(intptr_t); |
michael@0 | 1839 | storePtr(*iter, Address(StackPointer, diffG)); |
michael@0 | 1840 | } |
michael@0 | 1841 | } |
michael@0 | 1842 | JS_ASSERT(diffG == 0); |
michael@0 | 1843 | |
michael@0 | 1844 | adjustFrame(diffF); |
michael@0 | 1845 | diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB); |
michael@0 | 1846 | JS_ASSERT(diffF == 0); |
michael@0 | 1847 | } |
michael@0 | 1848 | |
michael@0 | 1849 | void |
michael@0 | 1850 | MacroAssembler::PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore) |
michael@0 | 1851 | { |
michael@0 | 1852 | int32_t diffG = set.gprs().size() * sizeof(intptr_t); |
michael@0 | 1853 | int32_t diffF = set.fpus().size() * sizeof(double); |
michael@0 | 1854 | const int32_t reservedG = diffG; |
michael@0 | 1855 | const int32_t reservedF = diffF; |
michael@0 | 1856 | |
michael@0 | 1857 | // ARM can load multiple registers at once, but only if we want back all |
michael@0 | 1858 | // the registers we previously saved to the stack. |
michael@0 | 1859 | if (ignore.empty(true)) { |
michael@0 | 1860 | diffF -= transferMultipleByRuns(set.fpus(), IsLoad, StackPointer, IA); |
michael@0 | 1861 | adjustFrame(-reservedF); |
michael@0 | 1862 | } else { |
michael@0 | 1863 | for (FloatRegisterBackwardIterator iter(set.fpus()); iter.more(); iter++) { |
michael@0 | 1864 | diffF -= sizeof(double); |
michael@0 | 1865 | if (!ignore.has(*iter)) |
michael@0 | 1866 | loadDouble(Address(StackPointer, diffF), *iter); |
michael@0 | 1867 | } |
michael@0 | 1868 | freeStack(reservedF); |
michael@0 | 1869 | } |
michael@0 | 1870 | JS_ASSERT(diffF == 0); |
michael@0 | 1871 | |
michael@0 | 1872 | if (set.gprs().size() > 1 && ignore.empty(false)) { |
michael@0 | 1873 | startDataTransferM(IsLoad, StackPointer, IA, WriteBack); |
michael@0 | 1874 | for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
michael@0 | 1875 | diffG -= sizeof(intptr_t); |
michael@0 | 1876 | transferReg(*iter); |
michael@0 | 1877 | } |
michael@0 | 1878 | finishDataTransfer(); |
michael@0 | 1879 | adjustFrame(-reservedG); |
michael@0 | 1880 | } else { |
michael@0 | 1881 | for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) { |
michael@0 | 1882 | diffG -= sizeof(intptr_t); |
michael@0 | 1883 | if (!ignore.has(*iter)) |
michael@0 | 1884 | loadPtr(Address(StackPointer, diffG), *iter); |
michael@0 | 1885 | } |
michael@0 | 1886 | freeStack(reservedG); |
michael@0 | 1887 | } |
michael@0 | 1888 | JS_ASSERT(diffG == 0); |
michael@0 | 1889 | } |
michael@0 | 1890 | |
michael@0 | 1891 | void |
michael@0 | 1892 | MacroAssemblerARMCompat::add32(Register src, Register dest) |
michael@0 | 1893 | { |
michael@0 | 1894 | ma_add(src, dest, SetCond); |
michael@0 | 1895 | } |
michael@0 | 1896 | |
michael@0 | 1897 | void |
michael@0 | 1898 | MacroAssemblerARMCompat::add32(Imm32 imm, Register dest) |
michael@0 | 1899 | { |
michael@0 | 1900 | ma_add(imm, dest, SetCond); |
michael@0 | 1901 | } |
michael@0 | 1902 | |
michael@0 | 1903 | void |
michael@0 | 1904 | MacroAssemblerARMCompat::xor32(Imm32 imm, Register dest) |
michael@0 | 1905 | { |
michael@0 | 1906 | ma_eor(imm, dest, SetCond); |
michael@0 | 1907 | } |
michael@0 | 1908 | |
michael@0 | 1909 | void |
michael@0 | 1910 | MacroAssemblerARMCompat::add32(Imm32 imm, const Address &dest) |
michael@0 | 1911 | { |
michael@0 | 1912 | load32(dest, ScratchRegister); |
michael@0 | 1913 | ma_add(imm, ScratchRegister, SetCond); |
michael@0 | 1914 | store32(ScratchRegister, dest); |
michael@0 | 1915 | } |
michael@0 | 1916 | |
michael@0 | 1917 | void |
michael@0 | 1918 | MacroAssemblerARMCompat::sub32(Imm32 imm, Register dest) |
michael@0 | 1919 | { |
michael@0 | 1920 | ma_sub(imm, dest, SetCond); |
michael@0 | 1921 | } |
michael@0 | 1922 | |
michael@0 | 1923 | void |
michael@0 | 1924 | MacroAssemblerARMCompat::sub32(Register src, Register dest) |
michael@0 | 1925 | { |
michael@0 | 1926 | ma_sub(src, dest, SetCond); |
michael@0 | 1927 | } |
michael@0 | 1928 | |
michael@0 | 1929 | void |
michael@0 | 1930 | MacroAssemblerARMCompat::and32(Imm32 imm, Register dest) |
michael@0 | 1931 | { |
michael@0 | 1932 | ma_and(imm, dest, SetCond); |
michael@0 | 1933 | } |
michael@0 | 1934 | |
michael@0 | 1935 | void |
michael@0 | 1936 | MacroAssemblerARMCompat::addPtr(Register src, Register dest) |
michael@0 | 1937 | { |
michael@0 | 1938 | ma_add(src, dest); |
michael@0 | 1939 | } |
michael@0 | 1940 | |
michael@0 | 1941 | void |
michael@0 | 1942 | MacroAssemblerARMCompat::addPtr(const Address &src, Register dest) |
michael@0 | 1943 | { |
michael@0 | 1944 | load32(src, ScratchRegister); |
michael@0 | 1945 | ma_add(ScratchRegister, dest, SetCond); |
michael@0 | 1946 | } |
michael@0 | 1947 | |
michael@0 | 1948 | void |
michael@0 | 1949 | MacroAssemblerARMCompat::not32(Register reg) |
michael@0 | 1950 | { |
michael@0 | 1951 | ma_mvn(reg, reg); |
michael@0 | 1952 | } |
michael@0 | 1953 | |
michael@0 | 1954 | void |
michael@0 | 1955 | MacroAssemblerARMCompat::and32(Imm32 imm, const Address &dest) |
michael@0 | 1956 | { |
michael@0 | 1957 | load32(dest, ScratchRegister); |
michael@0 | 1958 | ma_and(imm, ScratchRegister); |
michael@0 | 1959 | store32(ScratchRegister, dest); |
michael@0 | 1960 | } |
michael@0 | 1961 | |
michael@0 | 1962 | void |
michael@0 | 1963 | MacroAssemblerARMCompat::or32(Imm32 imm, const Address &dest) |
michael@0 | 1964 | { |
michael@0 | 1965 | load32(dest, ScratchRegister); |
michael@0 | 1966 | ma_orr(imm, ScratchRegister); |
michael@0 | 1967 | store32(ScratchRegister, dest); |
michael@0 | 1968 | } |
michael@0 | 1969 | |
michael@0 | 1970 | void |
michael@0 | 1971 | MacroAssemblerARMCompat::xorPtr(Imm32 imm, Register dest) |
michael@0 | 1972 | { |
michael@0 | 1973 | ma_eor(imm, dest); |
michael@0 | 1974 | } |
michael@0 | 1975 | |
michael@0 | 1976 | void |
michael@0 | 1977 | MacroAssemblerARMCompat::xorPtr(Register src, Register dest) |
michael@0 | 1978 | { |
michael@0 | 1979 | ma_eor(src, dest); |
michael@0 | 1980 | } |
michael@0 | 1981 | |
michael@0 | 1982 | void |
michael@0 | 1983 | MacroAssemblerARMCompat::orPtr(Imm32 imm, Register dest) |
michael@0 | 1984 | { |
michael@0 | 1985 | ma_orr(imm, dest); |
michael@0 | 1986 | } |
michael@0 | 1987 | |
michael@0 | 1988 | void |
michael@0 | 1989 | MacroAssemblerARMCompat::orPtr(Register src, Register dest) |
michael@0 | 1990 | { |
michael@0 | 1991 | ma_orr(src, dest); |
michael@0 | 1992 | } |
michael@0 | 1993 | |
michael@0 | 1994 | void |
michael@0 | 1995 | MacroAssemblerARMCompat::andPtr(Imm32 imm, Register dest) |
michael@0 | 1996 | { |
michael@0 | 1997 | ma_and(imm, dest); |
michael@0 | 1998 | } |
michael@0 | 1999 | |
michael@0 | 2000 | void |
michael@0 | 2001 | MacroAssemblerARMCompat::andPtr(Register src, Register dest) |
michael@0 | 2002 | { |
michael@0 | 2003 | ma_and(src, dest); |
michael@0 | 2004 | } |
michael@0 | 2005 | |
michael@0 | 2006 | void |
michael@0 | 2007 | MacroAssemblerARMCompat::move32(const Imm32 &imm, const Register &dest) |
michael@0 | 2008 | { |
michael@0 | 2009 | ma_mov(imm, dest); |
michael@0 | 2010 | } |
michael@0 | 2011 | |
michael@0 | 2012 | void |
michael@0 | 2013 | MacroAssemblerARMCompat::move32(const Register &src, const Register &dest) { |
michael@0 | 2014 | ma_mov(src, dest); |
michael@0 | 2015 | } |
michael@0 | 2016 | |
michael@0 | 2017 | void |
michael@0 | 2018 | MacroAssemblerARMCompat::movePtr(const Register &src, const Register &dest) |
michael@0 | 2019 | { |
michael@0 | 2020 | ma_mov(src, dest); |
michael@0 | 2021 | } |
michael@0 | 2022 | void |
michael@0 | 2023 | MacroAssemblerARMCompat::movePtr(const ImmWord &imm, const Register &dest) |
michael@0 | 2024 | { |
michael@0 | 2025 | ma_mov(Imm32(imm.value), dest); |
michael@0 | 2026 | } |
michael@0 | 2027 | void |
michael@0 | 2028 | MacroAssemblerARMCompat::movePtr(const ImmGCPtr &imm, const Register &dest) |
michael@0 | 2029 | { |
michael@0 | 2030 | ma_mov(imm, dest); |
michael@0 | 2031 | } |
michael@0 | 2032 | void |
michael@0 | 2033 | MacroAssemblerARMCompat::movePtr(const ImmPtr &imm, const Register &dest) |
michael@0 | 2034 | { |
michael@0 | 2035 | movePtr(ImmWord(uintptr_t(imm.value)), dest); |
michael@0 | 2036 | } |
michael@0 | 2037 | void |
michael@0 | 2038 | MacroAssemblerARMCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest) |
michael@0 | 2039 | { |
michael@0 | 2040 | RelocStyle rs; |
michael@0 | 2041 | if (hasMOVWT()) |
michael@0 | 2042 | rs = L_MOVWT; |
michael@0 | 2043 | else |
michael@0 | 2044 | rs = L_LDR; |
michael@0 | 2045 | |
michael@0 | 2046 | enoughMemory_ &= append(AsmJSAbsoluteLink(nextOffset().getOffset(), imm.kind())); |
michael@0 | 2047 | ma_movPatchable(Imm32(-1), dest, Always, rs); |
michael@0 | 2048 | } |
michael@0 | 2049 | void |
michael@0 | 2050 | MacroAssemblerARMCompat::load8ZeroExtend(const Address &address, const Register &dest) |
michael@0 | 2051 | { |
michael@0 | 2052 | ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest); |
michael@0 | 2053 | } |
michael@0 | 2054 | |
michael@0 | 2055 | void |
michael@0 | 2056 | MacroAssemblerARMCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest) |
michael@0 | 2057 | { |
michael@0 | 2058 | Register base = src.base; |
michael@0 | 2059 | uint32_t scale = Imm32::ShiftOf(src.scale).value; |
michael@0 | 2060 | |
michael@0 | 2061 | if (src.offset != 0) { |
michael@0 | 2062 | ma_mov(base, ScratchRegister); |
michael@0 | 2063 | base = ScratchRegister; |
michael@0 | 2064 | ma_add(base, Imm32(src.offset), base); |
michael@0 | 2065 | } |
michael@0 | 2066 | ma_ldrb(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); |
michael@0 | 2067 | |
michael@0 | 2068 | } |
michael@0 | 2069 | |
michael@0 | 2070 | void |
michael@0 | 2071 | MacroAssemblerARMCompat::load8SignExtend(const Address &address, const Register &dest) |
michael@0 | 2072 | { |
michael@0 | 2073 | ma_dataTransferN(IsLoad, 8, true, address.base, Imm32(address.offset), dest); |
michael@0 | 2074 | } |
michael@0 | 2075 | |
michael@0 | 2076 | void |
michael@0 | 2077 | MacroAssemblerARMCompat::load8SignExtend(const BaseIndex &src, const Register &dest) |
michael@0 | 2078 | { |
michael@0 | 2079 | Register index = src.index; |
michael@0 | 2080 | |
michael@0 | 2081 | // ARMv7 does not have LSL on an index register with an extended load. |
michael@0 | 2082 | if (src.scale != TimesOne) { |
michael@0 | 2083 | ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); |
michael@0 | 2084 | index = ScratchRegister; |
michael@0 | 2085 | } |
michael@0 | 2086 | |
michael@0 | 2087 | if (src.offset != 0) { |
michael@0 | 2088 | if (index != ScratchRegister) { |
michael@0 | 2089 | ma_mov(index, ScratchRegister); |
michael@0 | 2090 | index = ScratchRegister; |
michael@0 | 2091 | } |
michael@0 | 2092 | ma_add(Imm32(src.offset), index); |
michael@0 | 2093 | } |
michael@0 | 2094 | ma_ldrsb(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
michael@0 | 2095 | } |
michael@0 | 2096 | |
michael@0 | 2097 | void |
michael@0 | 2098 | MacroAssemblerARMCompat::load16ZeroExtend(const Address &address, const Register &dest) |
michael@0 | 2099 | { |
michael@0 | 2100 | ma_dataTransferN(IsLoad, 16, false, address.base, Imm32(address.offset), dest); |
michael@0 | 2101 | } |
michael@0 | 2102 | |
michael@0 | 2103 | void |
michael@0 | 2104 | MacroAssemblerARMCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest) |
michael@0 | 2105 | { |
michael@0 | 2106 | Register index = src.index; |
michael@0 | 2107 | |
michael@0 | 2108 | // ARMv7 does not have LSL on an index register with an extended load. |
michael@0 | 2109 | if (src.scale != TimesOne) { |
michael@0 | 2110 | ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); |
michael@0 | 2111 | index = ScratchRegister; |
michael@0 | 2112 | } |
michael@0 | 2113 | |
michael@0 | 2114 | if (src.offset != 0) { |
michael@0 | 2115 | if (index != ScratchRegister) { |
michael@0 | 2116 | ma_mov(index, ScratchRegister); |
michael@0 | 2117 | index = ScratchRegister; |
michael@0 | 2118 | } |
michael@0 | 2119 | ma_add(Imm32(src.offset), index); |
michael@0 | 2120 | } |
michael@0 | 2121 | ma_ldrh(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
michael@0 | 2122 | } |
michael@0 | 2123 | |
michael@0 | 2124 | void |
michael@0 | 2125 | MacroAssemblerARMCompat::load16SignExtend(const Address &address, const Register &dest) |
michael@0 | 2126 | { |
michael@0 | 2127 | ma_dataTransferN(IsLoad, 16, true, address.base, Imm32(address.offset), dest); |
michael@0 | 2128 | } |
michael@0 | 2129 | |
michael@0 | 2130 | void |
michael@0 | 2131 | MacroAssemblerARMCompat::load16SignExtend(const BaseIndex &src, const Register &dest) |
michael@0 | 2132 | { |
michael@0 | 2133 | Register index = src.index; |
michael@0 | 2134 | |
michael@0 | 2135 | // We don't have LSL on index register yet. |
michael@0 | 2136 | if (src.scale != TimesOne) { |
michael@0 | 2137 | ma_lsl(Imm32::ShiftOf(src.scale), index, ScratchRegister); |
michael@0 | 2138 | index = ScratchRegister; |
michael@0 | 2139 | } |
michael@0 | 2140 | |
michael@0 | 2141 | if (src.offset != 0) { |
michael@0 | 2142 | if (index != ScratchRegister) { |
michael@0 | 2143 | ma_mov(index, ScratchRegister); |
michael@0 | 2144 | index = ScratchRegister; |
michael@0 | 2145 | } |
michael@0 | 2146 | ma_add(Imm32(src.offset), index); |
michael@0 | 2147 | } |
michael@0 | 2148 | ma_ldrsh(EDtrAddr(src.base, EDtrOffReg(index)), dest); |
michael@0 | 2149 | } |
michael@0 | 2150 | |
michael@0 | 2151 | void |
michael@0 | 2152 | MacroAssemblerARMCompat::load32(const Address &address, const Register &dest) |
michael@0 | 2153 | { |
michael@0 | 2154 | loadPtr(address, dest); |
michael@0 | 2155 | } |
michael@0 | 2156 | |
michael@0 | 2157 | void |
michael@0 | 2158 | MacroAssemblerARMCompat::load32(const BaseIndex &address, const Register &dest) |
michael@0 | 2159 | { |
michael@0 | 2160 | loadPtr(address, dest); |
michael@0 | 2161 | } |
michael@0 | 2162 | |
michael@0 | 2163 | void |
michael@0 | 2164 | MacroAssemblerARMCompat::load32(const AbsoluteAddress &address, const Register &dest) |
michael@0 | 2165 | { |
michael@0 | 2166 | loadPtr(address, dest); |
michael@0 | 2167 | } |
michael@0 | 2168 | void |
michael@0 | 2169 | MacroAssemblerARMCompat::loadPtr(const Address &address, const Register &dest) |
michael@0 | 2170 | { |
michael@0 | 2171 | ma_ldr(Operand(address), dest); |
michael@0 | 2172 | } |
michael@0 | 2173 | |
michael@0 | 2174 | void |
michael@0 | 2175 | MacroAssemblerARMCompat::loadPtr(const BaseIndex &src, const Register &dest) |
michael@0 | 2176 | { |
michael@0 | 2177 | Register base = src.base; |
michael@0 | 2178 | uint32_t scale = Imm32::ShiftOf(src.scale).value; |
michael@0 | 2179 | |
michael@0 | 2180 | if (src.offset != 0) { |
michael@0 | 2181 | ma_mov(base, ScratchRegister); |
michael@0 | 2182 | base = ScratchRegister; |
michael@0 | 2183 | ma_add(Imm32(src.offset), base); |
michael@0 | 2184 | } |
michael@0 | 2185 | ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), dest); |
michael@0 | 2186 | } |
michael@0 | 2187 | void |
michael@0 | 2188 | MacroAssemblerARMCompat::loadPtr(const AbsoluteAddress &address, const Register &dest) |
michael@0 | 2189 | { |
michael@0 | 2190 | movePtr(ImmWord(uintptr_t(address.addr)), ScratchRegister); |
michael@0 | 2191 | loadPtr(Address(ScratchRegister, 0x0), dest); |
michael@0 | 2192 | } |
michael@0 | 2193 | void |
michael@0 | 2194 | MacroAssemblerARMCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest) |
michael@0 | 2195 | { |
michael@0 | 2196 | movePtr(AsmJSImmPtr(address.kind()), ScratchRegister); |
michael@0 | 2197 | loadPtr(Address(ScratchRegister, 0x0), dest); |
michael@0 | 2198 | } |
michael@0 | 2199 | |
michael@0 | 2200 | Operand payloadOf(const Address &address) { |
michael@0 | 2201 | return Operand(address.base, address.offset); |
michael@0 | 2202 | } |
michael@0 | 2203 | Operand tagOf(const Address &address) { |
michael@0 | 2204 | return Operand(address.base, address.offset + 4); |
michael@0 | 2205 | } |
michael@0 | 2206 | |
michael@0 | 2207 | void |
michael@0 | 2208 | MacroAssemblerARMCompat::loadPrivate(const Address &address, const Register &dest) |
michael@0 | 2209 | { |
michael@0 | 2210 | ma_ldr(payloadOf(address), dest); |
michael@0 | 2211 | } |
michael@0 | 2212 | |
michael@0 | 2213 | void |
michael@0 | 2214 | MacroAssemblerARMCompat::loadDouble(const Address &address, const FloatRegister &dest) |
michael@0 | 2215 | { |
michael@0 | 2216 | ma_vldr(Operand(address), dest); |
michael@0 | 2217 | } |
michael@0 | 2218 | |
michael@0 | 2219 | void |
michael@0 | 2220 | MacroAssemblerARMCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest) |
michael@0 | 2221 | { |
michael@0 | 2222 | // VFP instructions don't even support register Base + register Index modes, so |
michael@0 | 2223 | // just add the index, then handle the offset like normal |
michael@0 | 2224 | Register base = src.base; |
michael@0 | 2225 | Register index = src.index; |
michael@0 | 2226 | uint32_t scale = Imm32::ShiftOf(src.scale).value; |
michael@0 | 2227 | int32_t offset = src.offset; |
michael@0 | 2228 | as_add(ScratchRegister, base, lsl(index, scale)); |
michael@0 | 2229 | |
michael@0 | 2230 | ma_vldr(Operand(ScratchRegister, offset), dest); |
michael@0 | 2231 | } |
michael@0 | 2232 | |
michael@0 | 2233 | void |
michael@0 | 2234 | MacroAssemblerARMCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest) |
michael@0 | 2235 | { |
michael@0 | 2236 | VFPRegister rt = dest; |
michael@0 | 2237 | ma_vldr(Operand(address), rt.singleOverlay()); |
michael@0 | 2238 | as_vcvt(rt, rt.singleOverlay()); |
michael@0 | 2239 | } |
michael@0 | 2240 | |
michael@0 | 2241 | void |
michael@0 | 2242 | MacroAssemblerARMCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest) |
michael@0 | 2243 | { |
michael@0 | 2244 | // VFP instructions don't even support register Base + register Index modes, so |
michael@0 | 2245 | // just add the index, then handle the offset like normal |
michael@0 | 2246 | Register base = src.base; |
michael@0 | 2247 | Register index = src.index; |
michael@0 | 2248 | uint32_t scale = Imm32::ShiftOf(src.scale).value; |
michael@0 | 2249 | int32_t offset = src.offset; |
michael@0 | 2250 | VFPRegister rt = dest; |
michael@0 | 2251 | as_add(ScratchRegister, base, lsl(index, scale)); |
michael@0 | 2252 | |
michael@0 | 2253 | ma_vldr(Operand(ScratchRegister, offset), rt.singleOverlay()); |
michael@0 | 2254 | as_vcvt(rt, rt.singleOverlay()); |
michael@0 | 2255 | } |
michael@0 | 2256 | |
michael@0 | 2257 | void |
michael@0 | 2258 | MacroAssemblerARMCompat::loadFloat32(const Address &address, const FloatRegister &dest) |
michael@0 | 2259 | { |
michael@0 | 2260 | ma_vldr(Operand(address), VFPRegister(dest).singleOverlay()); |
michael@0 | 2261 | } |
michael@0 | 2262 | |
michael@0 | 2263 | void |
michael@0 | 2264 | MacroAssemblerARMCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest) |
michael@0 | 2265 | { |
michael@0 | 2266 | // VFP instructions don't even support register Base + register Index modes, so |
michael@0 | 2267 | // just add the index, then handle the offset like normal |
michael@0 | 2268 | Register base = src.base; |
michael@0 | 2269 | Register index = src.index; |
michael@0 | 2270 | uint32_t scale = Imm32::ShiftOf(src.scale).value; |
michael@0 | 2271 | int32_t offset = src.offset; |
michael@0 | 2272 | as_add(ScratchRegister, base, lsl(index, scale)); |
michael@0 | 2273 | |
michael@0 | 2274 | ma_vldr(Operand(ScratchRegister, offset), VFPRegister(dest).singleOverlay()); |
michael@0 | 2275 | } |
michael@0 | 2276 | |
michael@0 | 2277 | void |
michael@0 | 2278 | MacroAssemblerARMCompat::store8(const Imm32 &imm, const Address &address) |
michael@0 | 2279 | { |
michael@0 | 2280 | ma_mov(imm, secondScratchReg_); |
michael@0 | 2281 | store8(secondScratchReg_, address); |
michael@0 | 2282 | } |
michael@0 | 2283 | |
michael@0 | 2284 | void |
michael@0 | 2285 | MacroAssemblerARMCompat::store8(const Register &src, const Address &address) |
michael@0 | 2286 | { |
michael@0 | 2287 | ma_dataTransferN(IsStore, 8, false, address.base, Imm32(address.offset), src); |
michael@0 | 2288 | } |
michael@0 | 2289 | |
michael@0 | 2290 | void |
michael@0 | 2291 | MacroAssemblerARMCompat::store8(const Imm32 &imm, const BaseIndex &dest) |
michael@0 | 2292 | { |
michael@0 | 2293 | ma_mov(imm, secondScratchReg_); |
michael@0 | 2294 | store8(secondScratchReg_, dest); |
michael@0 | 2295 | } |
michael@0 | 2296 | |
michael@0 | 2297 | void |
michael@0 | 2298 | MacroAssemblerARMCompat::store8(const Register &src, const BaseIndex &dest) |
michael@0 | 2299 | { |
michael@0 | 2300 | Register base = dest.base; |
michael@0 | 2301 | uint32_t scale = Imm32::ShiftOf(dest.scale).value; |
michael@0 | 2302 | |
michael@0 | 2303 | if (dest.offset != 0) { |
michael@0 | 2304 | ma_add(base, Imm32(dest.offset), ScratchRegister); |
michael@0 | 2305 | base = ScratchRegister; |
michael@0 | 2306 | } |
michael@0 | 2307 | ma_strb(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); |
michael@0 | 2308 | } |
michael@0 | 2309 | |
michael@0 | 2310 | void |
michael@0 | 2311 | MacroAssemblerARMCompat::store16(const Imm32 &imm, const Address &address) |
michael@0 | 2312 | { |
michael@0 | 2313 | ma_mov(imm, secondScratchReg_); |
michael@0 | 2314 | store16(secondScratchReg_, address); |
michael@0 | 2315 | } |
michael@0 | 2316 | |
michael@0 | 2317 | void |
michael@0 | 2318 | MacroAssemblerARMCompat::store16(const Register &src, const Address &address) |
michael@0 | 2319 | { |
michael@0 | 2320 | ma_dataTransferN(IsStore, 16, false, address.base, Imm32(address.offset), src); |
michael@0 | 2321 | } |
michael@0 | 2322 | |
michael@0 | 2323 | void |
michael@0 | 2324 | MacroAssemblerARMCompat::store16(const Imm32 &imm, const BaseIndex &dest) |
michael@0 | 2325 | { |
michael@0 | 2326 | ma_mov(imm, secondScratchReg_); |
michael@0 | 2327 | store16(secondScratchReg_, dest); |
michael@0 | 2328 | } |
michael@0 | 2329 | void |
michael@0 | 2330 | MacroAssemblerARMCompat::store16(const Register &src, const BaseIndex &address) |
michael@0 | 2331 | { |
michael@0 | 2332 | Register index = address.index; |
michael@0 | 2333 | |
michael@0 | 2334 | // We don't have LSL on index register yet. |
michael@0 | 2335 | if (address.scale != TimesOne) { |
michael@0 | 2336 | ma_lsl(Imm32::ShiftOf(address.scale), index, ScratchRegister); |
michael@0 | 2337 | index = ScratchRegister; |
michael@0 | 2338 | } |
michael@0 | 2339 | |
michael@0 | 2340 | if (address.offset != 0) { |
michael@0 | 2341 | ma_add(index, Imm32(address.offset), ScratchRegister); |
michael@0 | 2342 | index = ScratchRegister; |
michael@0 | 2343 | } |
michael@0 | 2344 | ma_strh(src, EDtrAddr(address.base, EDtrOffReg(index))); |
michael@0 | 2345 | } |
michael@0 | 2346 | void |
michael@0 | 2347 | MacroAssemblerARMCompat::store32(const Register &src, const AbsoluteAddress &address) |
michael@0 | 2348 | { |
michael@0 | 2349 | storePtr(src, address); |
michael@0 | 2350 | } |
michael@0 | 2351 | |
michael@0 | 2352 | void |
michael@0 | 2353 | MacroAssemblerARMCompat::store32(const Register &src, const Address &address) |
michael@0 | 2354 | { |
michael@0 | 2355 | storePtr(src, address); |
michael@0 | 2356 | } |
michael@0 | 2357 | |
michael@0 | 2358 | void |
michael@0 | 2359 | MacroAssemblerARMCompat::store32(const Imm32 &src, const Address &address) |
michael@0 | 2360 | { |
michael@0 | 2361 | move32(src, secondScratchReg_); |
michael@0 | 2362 | storePtr(secondScratchReg_, address); |
michael@0 | 2363 | } |
michael@0 | 2364 | |
michael@0 | 2365 | void |
michael@0 | 2366 | MacroAssemblerARMCompat::store32(const Imm32 &imm, const BaseIndex &dest) |
michael@0 | 2367 | { |
michael@0 | 2368 | ma_mov(imm, secondScratchReg_); |
michael@0 | 2369 | store32(secondScratchReg_, dest); |
michael@0 | 2370 | } |
michael@0 | 2371 | |
michael@0 | 2372 | void |
michael@0 | 2373 | MacroAssemblerARMCompat::store32(const Register &src, const BaseIndex &dest) |
michael@0 | 2374 | { |
michael@0 | 2375 | Register base = dest.base; |
michael@0 | 2376 | uint32_t scale = Imm32::ShiftOf(dest.scale).value; |
michael@0 | 2377 | |
michael@0 | 2378 | if (dest.offset != 0) { |
michael@0 | 2379 | ma_add(base, Imm32(dest.offset), ScratchRegister); |
michael@0 | 2380 | base = ScratchRegister; |
michael@0 | 2381 | } |
michael@0 | 2382 | ma_str(src, DTRAddr(base, DtrRegImmShift(dest.index, LSL, scale))); |
michael@0 | 2383 | } |
michael@0 | 2384 | |
michael@0 | 2385 | void |
michael@0 | 2386 | MacroAssemblerARMCompat::storePtr(ImmWord imm, const Address &address) |
michael@0 | 2387 | { |
michael@0 | 2388 | movePtr(imm, ScratchRegister); |
michael@0 | 2389 | storePtr(ScratchRegister, address); |
michael@0 | 2390 | } |
michael@0 | 2391 | |
michael@0 | 2392 | void |
michael@0 | 2393 | MacroAssemblerARMCompat::storePtr(ImmPtr imm, const Address &address) |
michael@0 | 2394 | { |
michael@0 | 2395 | storePtr(ImmWord(uintptr_t(imm.value)), address); |
michael@0 | 2396 | } |
michael@0 | 2397 | |
michael@0 | 2398 | void |
michael@0 | 2399 | MacroAssemblerARMCompat::storePtr(ImmGCPtr imm, const Address &address) |
michael@0 | 2400 | { |
michael@0 | 2401 | movePtr(imm, ScratchRegister); |
michael@0 | 2402 | storePtr(ScratchRegister, address); |
michael@0 | 2403 | } |
michael@0 | 2404 | |
michael@0 | 2405 | void |
michael@0 | 2406 | MacroAssemblerARMCompat::storePtr(Register src, const Address &address) |
michael@0 | 2407 | { |
michael@0 | 2408 | ma_str(src, Operand(address)); |
michael@0 | 2409 | } |
michael@0 | 2410 | |
michael@0 | 2411 | void |
michael@0 | 2412 | MacroAssemblerARMCompat::storePtr(const Register &src, const AbsoluteAddress &dest) |
michael@0 | 2413 | { |
michael@0 | 2414 | movePtr(ImmWord(uintptr_t(dest.addr)), ScratchRegister); |
michael@0 | 2415 | storePtr(src, Address(ScratchRegister, 0x0)); |
michael@0 | 2416 | } |
michael@0 | 2417 | |
michael@0 | 2418 | // Note: this function clobbers the input register. |
michael@0 | 2419 | void |
michael@0 | 2420 | MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) |
michael@0 | 2421 | { |
michael@0 | 2422 | JS_ASSERT(input != ScratchFloatReg); |
michael@0 | 2423 | ma_vimm(0.5, ScratchFloatReg); |
michael@0 | 2424 | if (hasVFPv3()) { |
michael@0 | 2425 | Label notSplit; |
michael@0 | 2426 | ma_vadd(input, ScratchFloatReg, ScratchFloatReg); |
michael@0 | 2427 | // Convert the double into an unsigned fixed point value with 24 bits of |
michael@0 | 2428 | // precision. The resulting number will look like 0xII.DDDDDD |
michael@0 | 2429 | as_vcvtFixed(ScratchFloatReg, false, 24, true); |
michael@0 | 2430 | // Move the fixed point value into an integer register |
michael@0 | 2431 | as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore); |
michael@0 | 2432 | // see if this value *might* have been an exact integer after adding 0.5 |
michael@0 | 2433 | // This tests the 1/2 through 1/16,777,216th places, but 0.5 needs to be tested out to |
michael@0 | 2434 | // the 1/140,737,488,355,328th place. |
michael@0 | 2435 | ma_tst(output, Imm32(0x00ffffff)); |
michael@0 | 2436 | // convert to a uint8 by shifting out all of the fraction bits |
michael@0 | 2437 | ma_lsr(Imm32(24), output, output); |
michael@0 | 2438 | // If any of the bottom 24 bits were non-zero, then we're good, since this number |
michael@0 | 2439 | // can't be exactly XX.0 |
michael@0 | 2440 | ma_b(¬Split, NonZero); |
michael@0 | 2441 | as_vxfer(ScratchRegister, InvalidReg, input, FloatToCore); |
michael@0 | 2442 | ma_cmp(ScratchRegister, Imm32(0)); |
michael@0 | 2443 | // If the lower 32 bits of the double were 0, then this was an exact number, |
michael@0 | 2444 | // and it should be even. |
michael@0 | 2445 | ma_bic(Imm32(1), output, NoSetCond, Zero); |
michael@0 | 2446 | bind(¬Split); |
michael@0 | 2447 | } else { |
michael@0 | 2448 | Label outOfRange; |
michael@0 | 2449 | ma_vcmpz(input); |
michael@0 | 2450 | // do the add, in place so we can reference it later |
michael@0 | 2451 | ma_vadd(input, ScratchFloatReg, input); |
michael@0 | 2452 | // do the conversion to an integer. |
michael@0 | 2453 | as_vcvt(VFPRegister(ScratchFloatReg).uintOverlay(), VFPRegister(input)); |
michael@0 | 2454 | // copy the converted value out |
michael@0 | 2455 | as_vxfer(output, InvalidReg, ScratchFloatReg, FloatToCore); |
michael@0 | 2456 | as_vmrs(pc); |
michael@0 | 2457 | ma_mov(Imm32(0), output, NoSetCond, Overflow); // NaN => 0 |
michael@0 | 2458 | ma_b(&outOfRange, Overflow); // NaN |
michael@0 | 2459 | ma_cmp(output, Imm32(0xff)); |
michael@0 | 2460 | ma_mov(Imm32(0xff), output, NoSetCond, Above); |
michael@0 | 2461 | ma_b(&outOfRange, Above); |
michael@0 | 2462 | // convert it back to see if we got the same value back |
michael@0 | 2463 | as_vcvt(ScratchFloatReg, VFPRegister(ScratchFloatReg).uintOverlay()); |
michael@0 | 2464 | // do the check |
michael@0 | 2465 | as_vcmp(ScratchFloatReg, input); |
michael@0 | 2466 | as_vmrs(pc); |
michael@0 | 2467 | ma_bic(Imm32(1), output, NoSetCond, Zero); |
michael@0 | 2468 | bind(&outOfRange); |
michael@0 | 2469 | } |
michael@0 | 2470 | } |
michael@0 | 2471 | |
michael@0 | 2472 | void |
michael@0 | 2473 | MacroAssemblerARMCompat::cmp32(const Register &lhs, const Imm32 &rhs) |
michael@0 | 2474 | { |
michael@0 | 2475 | JS_ASSERT(lhs != ScratchRegister); |
michael@0 | 2476 | ma_cmp(lhs, rhs); |
michael@0 | 2477 | } |
michael@0 | 2478 | |
michael@0 | 2479 | void |
michael@0 | 2480 | MacroAssemblerARMCompat::cmp32(const Operand &lhs, const Register &rhs) |
michael@0 | 2481 | { |
michael@0 | 2482 | ma_cmp(lhs.toReg(), rhs); |
michael@0 | 2483 | } |
michael@0 | 2484 | |
michael@0 | 2485 | void |
michael@0 | 2486 | MacroAssemblerARMCompat::cmp32(const Operand &lhs, const Imm32 &rhs) |
michael@0 | 2487 | { |
michael@0 | 2488 | JS_ASSERT(lhs.toReg() != ScratchRegister); |
michael@0 | 2489 | ma_cmp(lhs.toReg(), rhs); |
michael@0 | 2490 | } |
michael@0 | 2491 | |
michael@0 | 2492 | void |
michael@0 | 2493 | MacroAssemblerARMCompat::cmp32(const Register &lhs, const Register &rhs) |
michael@0 | 2494 | { |
michael@0 | 2495 | ma_cmp(lhs, rhs); |
michael@0 | 2496 | } |
michael@0 | 2497 | |
michael@0 | 2498 | void |
michael@0 | 2499 | MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmWord &rhs) |
michael@0 | 2500 | { |
michael@0 | 2501 | JS_ASSERT(lhs != ScratchRegister); |
michael@0 | 2502 | ma_cmp(lhs, Imm32(rhs.value)); |
michael@0 | 2503 | } |
michael@0 | 2504 | |
michael@0 | 2505 | void |
michael@0 | 2506 | MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmPtr &rhs) |
michael@0 | 2507 | { |
michael@0 | 2508 | return cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); |
michael@0 | 2509 | } |
michael@0 | 2510 | |
michael@0 | 2511 | void |
michael@0 | 2512 | MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const Register &rhs) |
michael@0 | 2513 | { |
michael@0 | 2514 | ma_cmp(lhs, rhs); |
michael@0 | 2515 | } |
michael@0 | 2516 | |
michael@0 | 2517 | void |
michael@0 | 2518 | MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const ImmGCPtr &rhs) |
michael@0 | 2519 | { |
michael@0 | 2520 | ma_cmp(lhs, rhs); |
michael@0 | 2521 | } |
michael@0 | 2522 | |
michael@0 | 2523 | void |
michael@0 | 2524 | MacroAssemblerARMCompat::cmpPtr(const Register &lhs, const Imm32 &rhs) |
michael@0 | 2525 | { |
michael@0 | 2526 | ma_cmp(lhs, rhs); |
michael@0 | 2527 | } |
michael@0 | 2528 | |
michael@0 | 2529 | void |
michael@0 | 2530 | MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const Register &rhs) |
michael@0 | 2531 | { |
michael@0 | 2532 | loadPtr(lhs, ScratchRegister); |
michael@0 | 2533 | cmpPtr(ScratchRegister, rhs); |
michael@0 | 2534 | } |
michael@0 | 2535 | |
michael@0 | 2536 | void |
michael@0 | 2537 | MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const ImmWord &rhs) |
michael@0 | 2538 | { |
michael@0 | 2539 | loadPtr(lhs, secondScratchReg_); |
michael@0 | 2540 | ma_cmp(secondScratchReg_, Imm32(rhs.value)); |
michael@0 | 2541 | } |
michael@0 | 2542 | |
michael@0 | 2543 | void |
michael@0 | 2544 | MacroAssemblerARMCompat::cmpPtr(const Address &lhs, const ImmPtr &rhs) |
michael@0 | 2545 | { |
michael@0 | 2546 | cmpPtr(lhs, ImmWord(uintptr_t(rhs.value))); |
michael@0 | 2547 | } |
michael@0 | 2548 | |
michael@0 | 2549 | void |
michael@0 | 2550 | MacroAssemblerARMCompat::setStackArg(const Register ®, uint32_t arg) |
michael@0 | 2551 | { |
michael@0 | 2552 | ma_dataTransferN(IsStore, 32, true, sp, Imm32(arg * sizeof(intptr_t)), reg); |
michael@0 | 2553 | |
michael@0 | 2554 | } |
michael@0 | 2555 | |
michael@0 | 2556 | void |
michael@0 | 2557 | MacroAssemblerARMCompat::subPtr(Imm32 imm, const Register dest) |
michael@0 | 2558 | { |
michael@0 | 2559 | ma_sub(imm, dest); |
michael@0 | 2560 | } |
michael@0 | 2561 | |
michael@0 | 2562 | void |
michael@0 | 2563 | MacroAssemblerARMCompat::subPtr(const Address &addr, const Register dest) |
michael@0 | 2564 | { |
michael@0 | 2565 | loadPtr(addr, ScratchRegister); |
michael@0 | 2566 | ma_sub(ScratchRegister, dest); |
michael@0 | 2567 | } |
michael@0 | 2568 | |
michael@0 | 2569 | void |
michael@0 | 2570 | MacroAssemblerARMCompat::subPtr(const Register &src, const Register &dest) |
michael@0 | 2571 | { |
michael@0 | 2572 | ma_sub(src, dest); |
michael@0 | 2573 | } |
michael@0 | 2574 | |
michael@0 | 2575 | void |
michael@0 | 2576 | MacroAssemblerARMCompat::subPtr(const Register &src, const Address &dest) |
michael@0 | 2577 | { |
michael@0 | 2578 | loadPtr(dest, ScratchRegister); |
michael@0 | 2579 | ma_sub(src, ScratchRegister); |
michael@0 | 2580 | storePtr(ScratchRegister, dest); |
michael@0 | 2581 | } |
michael@0 | 2582 | |
michael@0 | 2583 | void |
michael@0 | 2584 | MacroAssemblerARMCompat::addPtr(Imm32 imm, const Register dest) |
michael@0 | 2585 | { |
michael@0 | 2586 | ma_add(imm, dest); |
michael@0 | 2587 | } |
michael@0 | 2588 | |
michael@0 | 2589 | void |
michael@0 | 2590 | MacroAssemblerARMCompat::addPtr(Imm32 imm, const Address &dest) |
michael@0 | 2591 | { |
michael@0 | 2592 | loadPtr(dest, ScratchRegister); |
michael@0 | 2593 | addPtr(imm, ScratchRegister); |
michael@0 | 2594 | storePtr(ScratchRegister, dest); |
michael@0 | 2595 | } |
michael@0 | 2596 | |
michael@0 | 2597 | void |
michael@0 | 2598 | MacroAssemblerARMCompat::compareDouble(FloatRegister lhs, FloatRegister rhs) |
michael@0 | 2599 | { |
michael@0 | 2600 | // Compare the doubles, setting vector status flags. |
michael@0 | 2601 | if (rhs == InvalidFloatReg) |
michael@0 | 2602 | ma_vcmpz(lhs); |
michael@0 | 2603 | else |
michael@0 | 2604 | ma_vcmp(lhs, rhs); |
michael@0 | 2605 | |
michael@0 | 2606 | // Move vector status bits to normal status flags. |
michael@0 | 2607 | as_vmrs(pc); |
michael@0 | 2608 | } |
michael@0 | 2609 | |
michael@0 | 2610 | void |
michael@0 | 2611 | MacroAssemblerARMCompat::branchDouble(DoubleCondition cond, const FloatRegister &lhs, |
michael@0 | 2612 | const FloatRegister &rhs, Label *label) |
michael@0 | 2613 | { |
michael@0 | 2614 | compareDouble(lhs, rhs); |
michael@0 | 2615 | |
michael@0 | 2616 | if (cond == DoubleNotEqual) { |
michael@0 | 2617 | // Force the unordered cases not to jump. |
michael@0 | 2618 | Label unordered; |
michael@0 | 2619 | ma_b(&unordered, VFP_Unordered); |
michael@0 | 2620 | ma_b(label, VFP_NotEqualOrUnordered); |
michael@0 | 2621 | bind(&unordered); |
michael@0 | 2622 | return; |
michael@0 | 2623 | } |
michael@0 | 2624 | |
michael@0 | 2625 | if (cond == DoubleEqualOrUnordered) { |
michael@0 | 2626 | ma_b(label, VFP_Unordered); |
michael@0 | 2627 | ma_b(label, VFP_Equal); |
michael@0 | 2628 | return; |
michael@0 | 2629 | } |
michael@0 | 2630 | |
michael@0 | 2631 | ma_b(label, ConditionFromDoubleCondition(cond)); |
michael@0 | 2632 | } |
michael@0 | 2633 | |
michael@0 | 2634 | void |
michael@0 | 2635 | MacroAssemblerARMCompat::compareFloat(FloatRegister lhs, FloatRegister rhs) |
michael@0 | 2636 | { |
michael@0 | 2637 | // Compare the doubles, setting vector status flags. |
michael@0 | 2638 | if (rhs == InvalidFloatReg) |
michael@0 | 2639 | as_vcmpz(VFPRegister(lhs).singleOverlay()); |
michael@0 | 2640 | else |
michael@0 | 2641 | as_vcmp(VFPRegister(lhs).singleOverlay(), VFPRegister(rhs).singleOverlay()); |
michael@0 | 2642 | |
michael@0 | 2643 | // Move vector status bits to normal status flags. |
michael@0 | 2644 | as_vmrs(pc); |
michael@0 | 2645 | } |
michael@0 | 2646 | |
michael@0 | 2647 | void |
michael@0 | 2648 | MacroAssemblerARMCompat::branchFloat(DoubleCondition cond, const FloatRegister &lhs, |
michael@0 | 2649 | const FloatRegister &rhs, Label *label) |
michael@0 | 2650 | { |
michael@0 | 2651 | compareFloat(lhs, rhs); |
michael@0 | 2652 | |
michael@0 | 2653 | if (cond == DoubleNotEqual) { |
michael@0 | 2654 | // Force the unordered cases not to jump. |
michael@0 | 2655 | Label unordered; |
michael@0 | 2656 | ma_b(&unordered, VFP_Unordered); |
michael@0 | 2657 | ma_b(label, VFP_NotEqualOrUnordered); |
michael@0 | 2658 | bind(&unordered); |
michael@0 | 2659 | return; |
michael@0 | 2660 | } |
michael@0 | 2661 | |
michael@0 | 2662 | if (cond == DoubleEqualOrUnordered) { |
michael@0 | 2663 | ma_b(label, VFP_Unordered); |
michael@0 | 2664 | ma_b(label, VFP_Equal); |
michael@0 | 2665 | return; |
michael@0 | 2666 | } |
michael@0 | 2667 | |
michael@0 | 2668 | ma_b(label, ConditionFromDoubleCondition(cond)); |
michael@0 | 2669 | } |
michael@0 | 2670 | |
michael@0 | 2671 | Assembler::Condition |
michael@0 | 2672 | MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2673 | { |
michael@0 | 2674 | JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
michael@0 | 2675 | ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_INT32)); |
michael@0 | 2676 | return cond; |
michael@0 | 2677 | } |
michael@0 | 2678 | |
michael@0 | 2679 | Assembler::Condition |
michael@0 | 2680 | MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2681 | { |
michael@0 | 2682 | JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
michael@0 | 2683 | ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN)); |
michael@0 | 2684 | return cond; |
michael@0 | 2685 | } |
michael@0 | 2686 | Assembler::Condition |
michael@0 | 2687 | MacroAssemblerARMCompat::testDouble(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2688 | { |
michael@0 | 2689 | JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
michael@0 | 2690 | Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
michael@0 | 2691 | ma_cmp(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR)); |
michael@0 | 2692 | return actual; |
michael@0 | 2693 | } |
michael@0 | 2694 | |
michael@0 | 2695 | Assembler::Condition |
michael@0 | 2696 | MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2697 | { |
michael@0 | 2698 | JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
michael@0 | 2699 | ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_NULL)); |
michael@0 | 2700 | return cond; |
michael@0 | 2701 | } |
michael@0 | 2702 | |
michael@0 | 2703 | Assembler::Condition |
michael@0 | 2704 | MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2705 | { |
michael@0 | 2706 | JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
michael@0 | 2707 | ma_cmp(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED)); |
michael@0 | 2708 | return cond; |
michael@0 | 2709 | } |
michael@0 | 2710 | |
michael@0 | 2711 | Assembler::Condition |
michael@0 | 2712 | MacroAssemblerARMCompat::testString(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2713 | { |
michael@0 | 2714 | return testString(cond, value.typeReg()); |
michael@0 | 2715 | } |
michael@0 | 2716 | |
michael@0 | 2717 | Assembler::Condition |
michael@0 | 2718 | MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2719 | { |
michael@0 | 2720 | return testObject(cond, value.typeReg()); |
michael@0 | 2721 | } |
michael@0 | 2722 | |
michael@0 | 2723 | Assembler::Condition |
michael@0 | 2724 | MacroAssemblerARMCompat::testNumber(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2725 | { |
michael@0 | 2726 | return testNumber(cond, value.typeReg()); |
michael@0 | 2727 | } |
michael@0 | 2728 | |
michael@0 | 2729 | Assembler::Condition |
michael@0 | 2730 | MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2731 | { |
michael@0 | 2732 | return testMagic(cond, value.typeReg()); |
michael@0 | 2733 | } |
michael@0 | 2734 | |
michael@0 | 2735 | Assembler::Condition |
michael@0 | 2736 | MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const ValueOperand &value) |
michael@0 | 2737 | { |
michael@0 | 2738 | return testPrimitive(cond, value.typeReg()); |
michael@0 | 2739 | } |
michael@0 | 2740 | |
michael@0 | 2741 | // Register-based tests. |
michael@0 | 2742 | Assembler::Condition |
michael@0 | 2743 | MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Register &tag) |
michael@0 | 2744 | { |
michael@0 | 2745 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2746 | ma_cmp(tag, ImmTag(JSVAL_TAG_INT32)); |
michael@0 | 2747 | return cond; |
michael@0 | 2748 | } |
michael@0 | 2749 | |
michael@0 | 2750 | Assembler::Condition |
michael@0 | 2751 | MacroAssemblerARMCompat::testBoolean(Assembler::Condition cond, const Register &tag) |
michael@0 | 2752 | { |
michael@0 | 2753 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2754 | ma_cmp(tag, ImmTag(JSVAL_TAG_BOOLEAN)); |
michael@0 | 2755 | return cond; |
michael@0 | 2756 | } |
michael@0 | 2757 | |
michael@0 | 2758 | Assembler::Condition |
michael@0 | 2759 | MacroAssemblerARMCompat::testNull(Assembler::Condition cond, const Register &tag) { |
michael@0 | 2760 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2761 | ma_cmp(tag, ImmTag(JSVAL_TAG_NULL)); |
michael@0 | 2762 | return cond; |
michael@0 | 2763 | } |
michael@0 | 2764 | |
michael@0 | 2765 | Assembler::Condition |
michael@0 | 2766 | MacroAssemblerARMCompat::testUndefined(Assembler::Condition cond, const Register &tag) { |
michael@0 | 2767 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2768 | ma_cmp(tag, ImmTag(JSVAL_TAG_UNDEFINED)); |
michael@0 | 2769 | return cond; |
michael@0 | 2770 | } |
michael@0 | 2771 | |
michael@0 | 2772 | Assembler::Condition |
michael@0 | 2773 | MacroAssemblerARMCompat::testString(Assembler::Condition cond, const Register &tag) { |
michael@0 | 2774 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2775 | ma_cmp(tag, ImmTag(JSVAL_TAG_STRING)); |
michael@0 | 2776 | return cond; |
michael@0 | 2777 | } |
michael@0 | 2778 | |
michael@0 | 2779 | Assembler::Condition |
michael@0 | 2780 | MacroAssemblerARMCompat::testObject(Assembler::Condition cond, const Register &tag) |
michael@0 | 2781 | { |
michael@0 | 2782 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2783 | ma_cmp(tag, ImmTag(JSVAL_TAG_OBJECT)); |
michael@0 | 2784 | return cond; |
michael@0 | 2785 | } |
michael@0 | 2786 | |
michael@0 | 2787 | Assembler::Condition |
michael@0 | 2788 | MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Register &tag) |
michael@0 | 2789 | { |
michael@0 | 2790 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2791 | ma_cmp(tag, ImmTag(JSVAL_TAG_MAGIC)); |
michael@0 | 2792 | return cond; |
michael@0 | 2793 | } |
michael@0 | 2794 | |
michael@0 | 2795 | Assembler::Condition |
michael@0 | 2796 | MacroAssemblerARMCompat::testPrimitive(Assembler::Condition cond, const Register &tag) |
michael@0 | 2797 | { |
michael@0 | 2798 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2799 | ma_cmp(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET)); |
michael@0 | 2800 | return cond == Equal ? Below : AboveOrEqual; |
michael@0 | 2801 | } |
michael@0 | 2802 | |
michael@0 | 2803 | Assembler::Condition |
michael@0 | 2804 | MacroAssemblerARMCompat::testGCThing(Assembler::Condition cond, const Address &address) |
michael@0 | 2805 | { |
michael@0 | 2806 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2807 | extractTag(address, ScratchRegister); |
michael@0 | 2808 | ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); |
michael@0 | 2809 | return cond == Equal ? AboveOrEqual : Below; |
michael@0 | 2810 | } |
michael@0 | 2811 | |
michael@0 | 2812 | Assembler::Condition |
michael@0 | 2813 | MacroAssemblerARMCompat::testMagic(Assembler::Condition cond, const Address &address) |
michael@0 | 2814 | { |
michael@0 | 2815 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2816 | extractTag(address, ScratchRegister); |
michael@0 | 2817 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_MAGIC)); |
michael@0 | 2818 | return cond; |
michael@0 | 2819 | } |
michael@0 | 2820 | |
michael@0 | 2821 | Assembler::Condition |
michael@0 | 2822 | MacroAssemblerARMCompat::testInt32(Assembler::Condition cond, const Address &address) |
michael@0 | 2823 | { |
michael@0 | 2824 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2825 | extractTag(address, ScratchRegister); |
michael@0 | 2826 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_INT32)); |
michael@0 | 2827 | return cond; |
michael@0 | 2828 | } |
michael@0 | 2829 | |
michael@0 | 2830 | Assembler::Condition |
michael@0 | 2831 | MacroAssemblerARMCompat::testDouble(Condition cond, const Address &address) |
michael@0 | 2832 | { |
michael@0 | 2833 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2834 | extractTag(address, ScratchRegister); |
michael@0 | 2835 | return testDouble(cond, ScratchRegister); |
michael@0 | 2836 | } |
michael@0 | 2837 | |
michael@0 | 2838 | Assembler::Condition |
michael@0 | 2839 | MacroAssemblerARMCompat::testBoolean(Condition cond, const Address &address) |
michael@0 | 2840 | { |
michael@0 | 2841 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2842 | extractTag(address, ScratchRegister); |
michael@0 | 2843 | return testBoolean(cond, ScratchRegister); |
michael@0 | 2844 | } |
michael@0 | 2845 | |
michael@0 | 2846 | Assembler::Condition |
michael@0 | 2847 | MacroAssemblerARMCompat::testNull(Condition cond, const Address &address) |
michael@0 | 2848 | { |
michael@0 | 2849 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2850 | extractTag(address, ScratchRegister); |
michael@0 | 2851 | return testNull(cond, ScratchRegister); |
michael@0 | 2852 | } |
michael@0 | 2853 | |
michael@0 | 2854 | Assembler::Condition |
michael@0 | 2855 | MacroAssemblerARMCompat::testUndefined(Condition cond, const Address &address) |
michael@0 | 2856 | { |
michael@0 | 2857 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2858 | extractTag(address, ScratchRegister); |
michael@0 | 2859 | return testUndefined(cond, ScratchRegister); |
michael@0 | 2860 | } |
michael@0 | 2861 | |
michael@0 | 2862 | Assembler::Condition |
michael@0 | 2863 | MacroAssemblerARMCompat::testString(Condition cond, const Address &address) |
michael@0 | 2864 | { |
michael@0 | 2865 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2866 | extractTag(address, ScratchRegister); |
michael@0 | 2867 | return testString(cond, ScratchRegister); |
michael@0 | 2868 | } |
michael@0 | 2869 | |
michael@0 | 2870 | Assembler::Condition |
michael@0 | 2871 | MacroAssemblerARMCompat::testObject(Condition cond, const Address &address) |
michael@0 | 2872 | { |
michael@0 | 2873 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2874 | extractTag(address, ScratchRegister); |
michael@0 | 2875 | return testObject(cond, ScratchRegister); |
michael@0 | 2876 | } |
michael@0 | 2877 | |
michael@0 | 2878 | Assembler::Condition |
michael@0 | 2879 | MacroAssemblerARMCompat::testNumber(Condition cond, const Address &address) |
michael@0 | 2880 | { |
michael@0 | 2881 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2882 | extractTag(address, ScratchRegister); |
michael@0 | 2883 | return testNumber(cond, ScratchRegister); |
michael@0 | 2884 | } |
michael@0 | 2885 | |
michael@0 | 2886 | Assembler::Condition |
michael@0 | 2887 | MacroAssemblerARMCompat::testDouble(Condition cond, const Register &tag) |
michael@0 | 2888 | { |
michael@0 | 2889 | JS_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); |
michael@0 | 2890 | Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
michael@0 | 2891 | ma_cmp(tag, ImmTag(JSVAL_TAG_CLEAR)); |
michael@0 | 2892 | return actual; |
michael@0 | 2893 | } |
michael@0 | 2894 | |
michael@0 | 2895 | Assembler::Condition |
michael@0 | 2896 | MacroAssemblerARMCompat::testNumber(Condition cond, const Register &tag) |
michael@0 | 2897 | { |
michael@0 | 2898 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2899 | ma_cmp(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET)); |
michael@0 | 2900 | return cond == Equal ? BelowOrEqual : Above; |
michael@0 | 2901 | } |
michael@0 | 2902 | |
michael@0 | 2903 | Assembler::Condition |
michael@0 | 2904 | MacroAssemblerARMCompat::testUndefined(Condition cond, const BaseIndex &src) |
michael@0 | 2905 | { |
michael@0 | 2906 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2907 | extractTag(src, ScratchRegister); |
michael@0 | 2908 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_UNDEFINED)); |
michael@0 | 2909 | return cond; |
michael@0 | 2910 | } |
michael@0 | 2911 | |
michael@0 | 2912 | Assembler::Condition |
michael@0 | 2913 | MacroAssemblerARMCompat::testNull(Condition cond, const BaseIndex &src) |
michael@0 | 2914 | { |
michael@0 | 2915 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2916 | extractTag(src, ScratchRegister); |
michael@0 | 2917 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_NULL)); |
michael@0 | 2918 | return cond; |
michael@0 | 2919 | } |
michael@0 | 2920 | |
michael@0 | 2921 | Assembler::Condition |
michael@0 | 2922 | MacroAssemblerARMCompat::testBoolean(Condition cond, const BaseIndex &src) |
michael@0 | 2923 | { |
michael@0 | 2924 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2925 | extractTag(src, ScratchRegister); |
michael@0 | 2926 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_BOOLEAN)); |
michael@0 | 2927 | return cond; |
michael@0 | 2928 | } |
michael@0 | 2929 | |
michael@0 | 2930 | Assembler::Condition |
michael@0 | 2931 | MacroAssemblerARMCompat::testString(Condition cond, const BaseIndex &src) |
michael@0 | 2932 | { |
michael@0 | 2933 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2934 | extractTag(src, ScratchRegister); |
michael@0 | 2935 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_STRING)); |
michael@0 | 2936 | return cond; |
michael@0 | 2937 | } |
michael@0 | 2938 | |
michael@0 | 2939 | Assembler::Condition |
michael@0 | 2940 | MacroAssemblerARMCompat::testInt32(Condition cond, const BaseIndex &src) |
michael@0 | 2941 | { |
michael@0 | 2942 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2943 | extractTag(src, ScratchRegister); |
michael@0 | 2944 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_INT32)); |
michael@0 | 2945 | return cond; |
michael@0 | 2946 | } |
michael@0 | 2947 | |
michael@0 | 2948 | Assembler::Condition |
michael@0 | 2949 | MacroAssemblerARMCompat::testObject(Condition cond, const BaseIndex &src) |
michael@0 | 2950 | { |
michael@0 | 2951 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2952 | extractTag(src, ScratchRegister); |
michael@0 | 2953 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_OBJECT)); |
michael@0 | 2954 | return cond; |
michael@0 | 2955 | } |
michael@0 | 2956 | |
michael@0 | 2957 | Assembler::Condition |
michael@0 | 2958 | MacroAssemblerARMCompat::testDouble(Condition cond, const BaseIndex &src) |
michael@0 | 2959 | { |
michael@0 | 2960 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2961 | Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual; |
michael@0 | 2962 | extractTag(src, ScratchRegister); |
michael@0 | 2963 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_CLEAR)); |
michael@0 | 2964 | return actual; |
michael@0 | 2965 | } |
michael@0 | 2966 | |
michael@0 | 2967 | Assembler::Condition |
michael@0 | 2968 | MacroAssemblerARMCompat::testMagic(Condition cond, const BaseIndex &address) |
michael@0 | 2969 | { |
michael@0 | 2970 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2971 | extractTag(address, ScratchRegister); |
michael@0 | 2972 | ma_cmp(ScratchRegister, ImmTag(JSVAL_TAG_MAGIC)); |
michael@0 | 2973 | return cond; |
michael@0 | 2974 | } |
michael@0 | 2975 | |
michael@0 | 2976 | Assembler::Condition |
michael@0 | 2977 | MacroAssemblerARMCompat::testGCThing(Condition cond, const BaseIndex &address) |
michael@0 | 2978 | { |
michael@0 | 2979 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 2980 | extractTag(address, ScratchRegister); |
michael@0 | 2981 | ma_cmp(ScratchRegister, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET)); |
michael@0 | 2982 | return cond == Equal ? AboveOrEqual : Below; |
michael@0 | 2983 | } |
michael@0 | 2984 | |
michael@0 | 2985 | void |
michael@0 | 2986 | MacroAssemblerARMCompat::branchTestValue(Condition cond, const ValueOperand &value, const Value &v, |
michael@0 | 2987 | Label *label) |
michael@0 | 2988 | { |
michael@0 | 2989 | // If cond == NotEqual, branch when a.payload != b.payload || a.tag != b.tag. |
michael@0 | 2990 | // If the payloads are equal, compare the tags. If the payloads are not equal, |
michael@0 | 2991 | // short circuit true (NotEqual). |
michael@0 | 2992 | // |
michael@0 | 2993 | // If cand == Equal, branch when a.payload == b.payload && a.tag == b.tag. |
michael@0 | 2994 | // If the payloads are equal, compare the tags. If the payloads are not equal, |
michael@0 | 2995 | // short circuit false (NotEqual). |
michael@0 | 2996 | jsval_layout jv = JSVAL_TO_IMPL(v); |
michael@0 | 2997 | if (v.isMarkable()) |
michael@0 | 2998 | ma_cmp(value.payloadReg(), ImmGCPtr(reinterpret_cast<gc::Cell *>(v.toGCThing()))); |
michael@0 | 2999 | else |
michael@0 | 3000 | ma_cmp(value.payloadReg(), Imm32(jv.s.payload.i32)); |
michael@0 | 3001 | ma_cmp(value.typeReg(), Imm32(jv.s.tag), Equal); |
michael@0 | 3002 | ma_b(label, cond); |
michael@0 | 3003 | } |
michael@0 | 3004 | |
michael@0 | 3005 | void |
michael@0 | 3006 | MacroAssemblerARMCompat::branchTestValue(Condition cond, const Address &valaddr, |
michael@0 | 3007 | const ValueOperand &value, Label *label) |
michael@0 | 3008 | { |
michael@0 | 3009 | JS_ASSERT(cond == Equal || cond == NotEqual); |
michael@0 | 3010 | |
michael@0 | 3011 | // Check payload before tag, since payload is more likely to differ. |
michael@0 | 3012 | if (cond == NotEqual) { |
michael@0 | 3013 | ma_ldr(payloadOf(valaddr), ScratchRegister); |
michael@0 | 3014 | branchPtr(NotEqual, ScratchRegister, value.payloadReg(), label); |
michael@0 | 3015 | |
michael@0 | 3016 | ma_ldr(tagOf(valaddr), ScratchRegister); |
michael@0 | 3017 | branchPtr(NotEqual, ScratchRegister, value.typeReg(), label); |
michael@0 | 3018 | |
michael@0 | 3019 | } else { |
michael@0 | 3020 | Label fallthrough; |
michael@0 | 3021 | |
michael@0 | 3022 | ma_ldr(payloadOf(valaddr), ScratchRegister); |
michael@0 | 3023 | branchPtr(NotEqual, ScratchRegister, value.payloadReg(), &fallthrough); |
michael@0 | 3024 | |
michael@0 | 3025 | ma_ldr(tagOf(valaddr), ScratchRegister); |
michael@0 | 3026 | branchPtr(Equal, ScratchRegister, value.typeReg(), label); |
michael@0 | 3027 | |
michael@0 | 3028 | bind(&fallthrough); |
michael@0 | 3029 | } |
michael@0 | 3030 | } |
michael@0 | 3031 | |
michael@0 | 3032 | // unboxing code |
michael@0 | 3033 | void |
michael@0 | 3034 | MacroAssemblerARMCompat::unboxInt32(const ValueOperand &operand, const Register &dest) |
michael@0 | 3035 | { |
michael@0 | 3036 | ma_mov(operand.payloadReg(), dest); |
michael@0 | 3037 | } |
michael@0 | 3038 | |
michael@0 | 3039 | void |
michael@0 | 3040 | MacroAssemblerARMCompat::unboxInt32(const Address &src, const Register &dest) |
michael@0 | 3041 | { |
michael@0 | 3042 | ma_ldr(payloadOf(src), dest); |
michael@0 | 3043 | } |
michael@0 | 3044 | |
michael@0 | 3045 | void |
michael@0 | 3046 | MacroAssemblerARMCompat::unboxBoolean(const ValueOperand &operand, const Register &dest) |
michael@0 | 3047 | { |
michael@0 | 3048 | ma_mov(operand.payloadReg(), dest); |
michael@0 | 3049 | } |
michael@0 | 3050 | |
michael@0 | 3051 | void |
michael@0 | 3052 | MacroAssemblerARMCompat::unboxBoolean(const Address &src, const Register &dest) |
michael@0 | 3053 | { |
michael@0 | 3054 | ma_ldr(payloadOf(src), dest); |
michael@0 | 3055 | } |
michael@0 | 3056 | |
michael@0 | 3057 | void |
michael@0 | 3058 | MacroAssemblerARMCompat::unboxDouble(const ValueOperand &operand, const FloatRegister &dest) |
michael@0 | 3059 | { |
michael@0 | 3060 | JS_ASSERT(dest != ScratchFloatReg); |
michael@0 | 3061 | as_vxfer(operand.payloadReg(), operand.typeReg(), |
michael@0 | 3062 | VFPRegister(dest), CoreToFloat); |
michael@0 | 3063 | } |
michael@0 | 3064 | |
michael@0 | 3065 | void |
michael@0 | 3066 | MacroAssemblerARMCompat::unboxDouble(const Address &src, const FloatRegister &dest) |
michael@0 | 3067 | { |
michael@0 | 3068 | ma_vldr(Operand(src), dest); |
michael@0 | 3069 | } |
michael@0 | 3070 | |
michael@0 | 3071 | void |
michael@0 | 3072 | MacroAssemblerARMCompat::unboxString(const ValueOperand &operand, const Register &dest) |
michael@0 | 3073 | { |
michael@0 | 3074 | ma_mov(operand.payloadReg(), dest); |
michael@0 | 3075 | } |
michael@0 | 3076 | |
michael@0 | 3077 | void |
michael@0 | 3078 | MacroAssemblerARMCompat::unboxString(const Address &src, const Register &dest) |
michael@0 | 3079 | { |
michael@0 | 3080 | ma_ldr(payloadOf(src), dest); |
michael@0 | 3081 | } |
michael@0 | 3082 | |
michael@0 | 3083 | void |
michael@0 | 3084 | MacroAssemblerARMCompat::unboxObject(const ValueOperand &src, const Register &dest) |
michael@0 | 3085 | { |
michael@0 | 3086 | ma_mov(src.payloadReg(), dest); |
michael@0 | 3087 | } |
michael@0 | 3088 | |
michael@0 | 3089 | void |
michael@0 | 3090 | MacroAssemblerARMCompat::unboxValue(const ValueOperand &src, AnyRegister dest) |
michael@0 | 3091 | { |
michael@0 | 3092 | if (dest.isFloat()) { |
michael@0 | 3093 | Label notInt32, end; |
michael@0 | 3094 | branchTestInt32(Assembler::NotEqual, src, ¬Int32); |
michael@0 | 3095 | convertInt32ToDouble(src.payloadReg(), dest.fpu()); |
michael@0 | 3096 | ma_b(&end); |
michael@0 | 3097 | bind(¬Int32); |
michael@0 | 3098 | unboxDouble(src, dest.fpu()); |
michael@0 | 3099 | bind(&end); |
michael@0 | 3100 | } else if (src.payloadReg() != dest.gpr()) { |
michael@0 | 3101 | as_mov(dest.gpr(), O2Reg(src.payloadReg())); |
michael@0 | 3102 | } |
michael@0 | 3103 | } |
michael@0 | 3104 | |
michael@0 | 3105 | void |
michael@0 | 3106 | MacroAssemblerARMCompat::unboxPrivate(const ValueOperand &src, Register dest) |
michael@0 | 3107 | { |
michael@0 | 3108 | ma_mov(src.payloadReg(), dest); |
michael@0 | 3109 | } |
michael@0 | 3110 | |
michael@0 | 3111 | void |
michael@0 | 3112 | MacroAssemblerARMCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest) |
michael@0 | 3113 | { |
michael@0 | 3114 | as_vxfer(dest.payloadReg(), dest.typeReg(), VFPRegister(src), FloatToCore); |
michael@0 | 3115 | } |
michael@0 | 3116 | |
michael@0 | 3117 | void |
michael@0 | 3118 | MacroAssemblerARMCompat::boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest) { |
michael@0 | 3119 | if (src != dest.payloadReg()) |
michael@0 | 3120 | ma_mov(src, dest.payloadReg()); |
michael@0 | 3121 | ma_mov(ImmType(type), dest.typeReg()); |
michael@0 | 3122 | } |
michael@0 | 3123 | |
michael@0 | 3124 | void |
michael@0 | 3125 | MacroAssemblerARMCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest) |
michael@0 | 3126 | { |
michael@0 | 3127 | VFPRegister d = VFPRegister(dest); |
michael@0 | 3128 | ma_vimm(1.0, dest); |
michael@0 | 3129 | ma_cmp(operand.payloadReg(), Imm32(0)); |
michael@0 | 3130 | // If the source is 0, then subtract the dest from itself, producing 0. |
michael@0 | 3131 | as_vsub(d, d, d, Equal); |
michael@0 | 3132 | } |
michael@0 | 3133 | |
michael@0 | 3134 | void |
michael@0 | 3135 | MacroAssemblerARMCompat::int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest) |
michael@0 | 3136 | { |
michael@0 | 3137 | // transfer the integral value to a floating point register |
michael@0 | 3138 | VFPRegister vfpdest = VFPRegister(dest); |
michael@0 | 3139 | as_vxfer(operand.payloadReg(), InvalidReg, |
michael@0 | 3140 | vfpdest.sintOverlay(), CoreToFloat); |
michael@0 | 3141 | // convert the value to a double. |
michael@0 | 3142 | as_vcvt(vfpdest, vfpdest.sintOverlay()); |
michael@0 | 3143 | } |
michael@0 | 3144 | |
michael@0 | 3145 | void |
michael@0 | 3146 | MacroAssemblerARMCompat::boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest) |
michael@0 | 3147 | { |
michael@0 | 3148 | VFPRegister d = VFPRegister(dest).singleOverlay(); |
michael@0 | 3149 | ma_vimm_f32(1.0, dest); |
michael@0 | 3150 | ma_cmp(operand.payloadReg(), Imm32(0)); |
michael@0 | 3151 | // If the source is 0, then subtract the dest from itself, producing 0. |
michael@0 | 3152 | as_vsub(d, d, d, Equal); |
michael@0 | 3153 | } |
michael@0 | 3154 | |
michael@0 | 3155 | void |
michael@0 | 3156 | MacroAssemblerARMCompat::int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest) |
michael@0 | 3157 | { |
michael@0 | 3158 | // transfer the integral value to a floating point register |
michael@0 | 3159 | VFPRegister vfpdest = VFPRegister(dest).singleOverlay(); |
michael@0 | 3160 | as_vxfer(operand.payloadReg(), InvalidReg, |
michael@0 | 3161 | vfpdest.sintOverlay(), CoreToFloat); |
michael@0 | 3162 | // convert the value to a float. |
michael@0 | 3163 | as_vcvt(vfpdest, vfpdest.sintOverlay()); |
michael@0 | 3164 | } |
michael@0 | 3165 | |
michael@0 | 3166 | void |
michael@0 | 3167 | MacroAssemblerARMCompat::loadConstantFloat32(float f, const FloatRegister &dest) |
michael@0 | 3168 | { |
michael@0 | 3169 | ma_vimm_f32(f, dest); |
michael@0 | 3170 | } |
michael@0 | 3171 | |
michael@0 | 3172 | void |
michael@0 | 3173 | MacroAssemblerARMCompat::loadInt32OrDouble(const Operand &src, const FloatRegister &dest) |
michael@0 | 3174 | { |
michael@0 | 3175 | Label notInt32, end; |
michael@0 | 3176 | // If it's an int, convert it to double. |
michael@0 | 3177 | ma_ldr(ToType(src), ScratchRegister); |
michael@0 | 3178 | branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); |
michael@0 | 3179 | ma_ldr(ToPayload(src), ScratchRegister); |
michael@0 | 3180 | convertInt32ToDouble(ScratchRegister, dest); |
michael@0 | 3181 | ma_b(&end); |
michael@0 | 3182 | |
michael@0 | 3183 | // Not an int, just load as double. |
michael@0 | 3184 | bind(¬Int32); |
michael@0 | 3185 | ma_vldr(src, dest); |
michael@0 | 3186 | bind(&end); |
michael@0 | 3187 | } |
michael@0 | 3188 | |
michael@0 | 3189 | void |
michael@0 | 3190 | MacroAssemblerARMCompat::loadInt32OrDouble(Register base, Register index, const FloatRegister &dest, int32_t shift) |
michael@0 | 3191 | { |
michael@0 | 3192 | Label notInt32, end; |
michael@0 | 3193 | |
michael@0 | 3194 | JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); |
michael@0 | 3195 | |
michael@0 | 3196 | // If it's an int, convert it to double. |
michael@0 | 3197 | ma_alu(base, lsl(index, shift), ScratchRegister, op_add); |
michael@0 | 3198 | |
michael@0 | 3199 | // Since we only have one scratch register, we need to stomp over it with the tag |
michael@0 | 3200 | ma_ldr(Address(ScratchRegister, NUNBOX32_TYPE_OFFSET), ScratchRegister); |
michael@0 | 3201 | branchTestInt32(Assembler::NotEqual, ScratchRegister, ¬Int32); |
michael@0 | 3202 | |
michael@0 | 3203 | // Implicitly requires NUNBOX32_PAYLOAD_OFFSET == 0: no offset provided |
michael@0 | 3204 | ma_ldr(DTRAddr(base, DtrRegImmShift(index, LSL, shift)), ScratchRegister); |
michael@0 | 3205 | convertInt32ToDouble(ScratchRegister, dest); |
michael@0 | 3206 | ma_b(&end); |
michael@0 | 3207 | |
michael@0 | 3208 | // Not an int, just load as double. |
michael@0 | 3209 | bind(¬Int32); |
michael@0 | 3210 | // First, recompute the offset that had been stored in the scratch register |
michael@0 | 3211 | // since the scratch register was overwritten loading in the type. |
michael@0 | 3212 | ma_alu(base, lsl(index, shift), ScratchRegister, op_add); |
michael@0 | 3213 | ma_vldr(Address(ScratchRegister, 0), dest); |
michael@0 | 3214 | bind(&end); |
michael@0 | 3215 | } |
michael@0 | 3216 | |
michael@0 | 3217 | void |
michael@0 | 3218 | MacroAssemblerARMCompat::loadConstantDouble(double dp, const FloatRegister &dest) |
michael@0 | 3219 | { |
michael@0 | 3220 | as_FImm64Pool(dest, dp); |
michael@0 | 3221 | } |
michael@0 | 3222 | |
michael@0 | 3223 | // treat the value as a boolean, and set condition codes accordingly |
michael@0 | 3224 | |
michael@0 | 3225 | Assembler::Condition |
michael@0 | 3226 | MacroAssemblerARMCompat::testInt32Truthy(bool truthy, const ValueOperand &operand) |
michael@0 | 3227 | { |
michael@0 | 3228 | ma_tst(operand.payloadReg(), operand.payloadReg()); |
michael@0 | 3229 | return truthy ? NonZero : Zero; |
michael@0 | 3230 | } |
michael@0 | 3231 | |
michael@0 | 3232 | Assembler::Condition |
michael@0 | 3233 | MacroAssemblerARMCompat::testBooleanTruthy(bool truthy, const ValueOperand &operand) |
michael@0 | 3234 | { |
michael@0 | 3235 | ma_tst(operand.payloadReg(), operand.payloadReg()); |
michael@0 | 3236 | return truthy ? NonZero : Zero; |
michael@0 | 3237 | } |
michael@0 | 3238 | |
michael@0 | 3239 | Assembler::Condition |
michael@0 | 3240 | MacroAssemblerARMCompat::testDoubleTruthy(bool truthy, const FloatRegister ®) |
michael@0 | 3241 | { |
michael@0 | 3242 | as_vcmpz(VFPRegister(reg)); |
michael@0 | 3243 | as_vmrs(pc); |
michael@0 | 3244 | as_cmp(r0, O2Reg(r0), Overflow); |
michael@0 | 3245 | return truthy ? NonZero : Zero; |
michael@0 | 3246 | } |
michael@0 | 3247 | |
michael@0 | 3248 | Register |
michael@0 | 3249 | MacroAssemblerARMCompat::extractObject(const Address &address, Register scratch) |
michael@0 | 3250 | { |
michael@0 | 3251 | ma_ldr(payloadOf(address), scratch); |
michael@0 | 3252 | return scratch; |
michael@0 | 3253 | } |
michael@0 | 3254 | |
michael@0 | 3255 | Register |
michael@0 | 3256 | MacroAssemblerARMCompat::extractTag(const Address &address, Register scratch) |
michael@0 | 3257 | { |
michael@0 | 3258 | ma_ldr(tagOf(address), scratch); |
michael@0 | 3259 | return scratch; |
michael@0 | 3260 | } |
michael@0 | 3261 | |
michael@0 | 3262 | Register |
michael@0 | 3263 | MacroAssemblerARMCompat::extractTag(const BaseIndex &address, Register scratch) |
michael@0 | 3264 | { |
michael@0 | 3265 | ma_alu(address.base, lsl(address.index, address.scale), scratch, op_add, NoSetCond); |
michael@0 | 3266 | return extractTag(Address(scratch, address.offset), scratch); |
michael@0 | 3267 | } |
michael@0 | 3268 | |
michael@0 | 3269 | void |
michael@0 | 3270 | MacroAssemblerARMCompat::moveValue(const Value &val, Register type, Register data) |
michael@0 | 3271 | { |
michael@0 | 3272 | jsval_layout jv = JSVAL_TO_IMPL(val); |
michael@0 | 3273 | ma_mov(Imm32(jv.s.tag), type); |
michael@0 | 3274 | if (val.isMarkable()) |
michael@0 | 3275 | ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())), data); |
michael@0 | 3276 | else |
michael@0 | 3277 | ma_mov(Imm32(jv.s.payload.i32), data); |
michael@0 | 3278 | } |
michael@0 | 3279 | void |
michael@0 | 3280 | MacroAssemblerARMCompat::moveValue(const Value &val, const ValueOperand &dest) |
michael@0 | 3281 | { |
michael@0 | 3282 | moveValue(val, dest.typeReg(), dest.payloadReg()); |
michael@0 | 3283 | } |
michael@0 | 3284 | |
michael@0 | 3285 | ///////////////////////////////////////////////////////////////// |
michael@0 | 3286 | // X86/X64-common (ARM too now) interface. |
michael@0 | 3287 | ///////////////////////////////////////////////////////////////// |
michael@0 | 3288 | void |
michael@0 | 3289 | MacroAssemblerARMCompat::storeValue(ValueOperand val, Operand dst) |
michael@0 | 3290 | { |
michael@0 | 3291 | ma_str(val.payloadReg(), ToPayload(dst)); |
michael@0 | 3292 | ma_str(val.typeReg(), ToType(dst)); |
michael@0 | 3293 | } |
michael@0 | 3294 | |
michael@0 | 3295 | void |
michael@0 | 3296 | MacroAssemblerARMCompat::storeValue(ValueOperand val, const BaseIndex &dest) |
michael@0 | 3297 | { |
michael@0 | 3298 | if (isValueDTRDCandidate(val) && Abs(dest.offset) <= 255) { |
michael@0 | 3299 | Register tmpIdx; |
michael@0 | 3300 | if (dest.offset == 0) { |
michael@0 | 3301 | if (dest.scale == TimesOne) { |
michael@0 | 3302 | tmpIdx = dest.index; |
michael@0 | 3303 | } else { |
michael@0 | 3304 | ma_lsl(Imm32(dest.scale), dest.index, ScratchRegister); |
michael@0 | 3305 | tmpIdx = ScratchRegister; |
michael@0 | 3306 | } |
michael@0 | 3307 | ma_strd(val.payloadReg(), val.typeReg(), EDtrAddr(dest.base, EDtrOffReg(tmpIdx))); |
michael@0 | 3308 | } else { |
michael@0 | 3309 | ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); |
michael@0 | 3310 | ma_strd(val.payloadReg(), val.typeReg(), |
michael@0 | 3311 | EDtrAddr(ScratchRegister, EDtrOffImm(dest.offset))); |
michael@0 | 3312 | } |
michael@0 | 3313 | } else { |
michael@0 | 3314 | ma_alu(dest.base, lsl(dest.index, dest.scale), ScratchRegister, op_add); |
michael@0 | 3315 | storeValue(val, Address(ScratchRegister, dest.offset)); |
michael@0 | 3316 | } |
michael@0 | 3317 | } |
michael@0 | 3318 | |
michael@0 | 3319 | void |
michael@0 | 3320 | MacroAssemblerARMCompat::loadValue(const BaseIndex &addr, ValueOperand val) |
michael@0 | 3321 | { |
michael@0 | 3322 | if (isValueDTRDCandidate(val) && Abs(addr.offset) <= 255) { |
michael@0 | 3323 | Register tmpIdx; |
michael@0 | 3324 | if (addr.offset == 0) { |
michael@0 | 3325 | if (addr.scale == TimesOne) { |
michael@0 | 3326 | tmpIdx = addr.index; |
michael@0 | 3327 | } else { |
michael@0 | 3328 | ma_lsl(Imm32(addr.scale), addr.index, ScratchRegister); |
michael@0 | 3329 | tmpIdx = ScratchRegister; |
michael@0 | 3330 | } |
michael@0 | 3331 | ma_ldrd(EDtrAddr(addr.base, EDtrOffReg(tmpIdx)), val.payloadReg(), val.typeReg()); |
michael@0 | 3332 | } else { |
michael@0 | 3333 | ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); |
michael@0 | 3334 | ma_ldrd(EDtrAddr(ScratchRegister, EDtrOffImm(addr.offset)), |
michael@0 | 3335 | val.payloadReg(), val.typeReg()); |
michael@0 | 3336 | } |
michael@0 | 3337 | } else { |
michael@0 | 3338 | ma_alu(addr.base, lsl(addr.index, addr.scale), ScratchRegister, op_add); |
michael@0 | 3339 | loadValue(Address(ScratchRegister, addr.offset), val); |
michael@0 | 3340 | } |
michael@0 | 3341 | } |
michael@0 | 3342 | |
michael@0 | 3343 | void |
michael@0 | 3344 | MacroAssemblerARMCompat::loadValue(Address src, ValueOperand val) |
michael@0 | 3345 | { |
michael@0 | 3346 | Operand srcOp = Operand(src); |
michael@0 | 3347 | Operand payload = ToPayload(srcOp); |
michael@0 | 3348 | Operand type = ToType(srcOp); |
michael@0 | 3349 | // TODO: copy this code into a generic function that acts on all sequences of memory accesses |
michael@0 | 3350 | if (isValueDTRDCandidate(val)) { |
michael@0 | 3351 | // If the value we want is in two consecutive registers starting with an even register, |
michael@0 | 3352 | // they can be combined as a single ldrd. |
michael@0 | 3353 | int offset = srcOp.disp(); |
michael@0 | 3354 | if (offset < 256 && offset > -256) { |
michael@0 | 3355 | ma_ldrd(EDtrAddr(Register::FromCode(srcOp.base()), EDtrOffImm(srcOp.disp())), val.payloadReg(), val.typeReg()); |
michael@0 | 3356 | return; |
michael@0 | 3357 | } |
michael@0 | 3358 | } |
michael@0 | 3359 | // if the value is lower than the type, then we may be able to use an ldm instruction |
michael@0 | 3360 | |
michael@0 | 3361 | if (val.payloadReg().code() < val.typeReg().code()) { |
michael@0 | 3362 | if (srcOp.disp() <= 4 && srcOp.disp() >= -8 && (srcOp.disp() & 3) == 0) { |
michael@0 | 3363 | // turns out each of the 4 value -8, -4, 0, 4 corresponds exactly with one of |
michael@0 | 3364 | // LDM{DB, DA, IA, IB} |
michael@0 | 3365 | DTMMode mode; |
michael@0 | 3366 | switch(srcOp.disp()) { |
michael@0 | 3367 | case -8: |
michael@0 | 3368 | mode = DB; |
michael@0 | 3369 | break; |
michael@0 | 3370 | case -4: |
michael@0 | 3371 | mode = DA; |
michael@0 | 3372 | break; |
michael@0 | 3373 | case 0: |
michael@0 | 3374 | mode = IA; |
michael@0 | 3375 | break; |
michael@0 | 3376 | case 4: |
michael@0 | 3377 | mode = IB; |
michael@0 | 3378 | break; |
michael@0 | 3379 | default: |
michael@0 | 3380 | MOZ_ASSUME_UNREACHABLE("Bogus Offset for LoadValue as DTM"); |
michael@0 | 3381 | } |
michael@0 | 3382 | startDataTransferM(IsLoad, Register::FromCode(srcOp.base()), mode); |
michael@0 | 3383 | transferReg(val.payloadReg()); |
michael@0 | 3384 | transferReg(val.typeReg()); |
michael@0 | 3385 | finishDataTransfer(); |
michael@0 | 3386 | return; |
michael@0 | 3387 | } |
michael@0 | 3388 | } |
michael@0 | 3389 | // Ensure that loading the payload does not erase the pointer to the |
michael@0 | 3390 | // Value in memory. |
michael@0 | 3391 | if (Register::FromCode(type.base()) != val.payloadReg()) { |
michael@0 | 3392 | ma_ldr(payload, val.payloadReg()); |
michael@0 | 3393 | ma_ldr(type, val.typeReg()); |
michael@0 | 3394 | } else { |
michael@0 | 3395 | ma_ldr(type, val.typeReg()); |
michael@0 | 3396 | ma_ldr(payload, val.payloadReg()); |
michael@0 | 3397 | } |
michael@0 | 3398 | } |
michael@0 | 3399 | |
michael@0 | 3400 | void |
michael@0 | 3401 | MacroAssemblerARMCompat::tagValue(JSValueType type, Register payload, ValueOperand dest) |
michael@0 | 3402 | { |
michael@0 | 3403 | JS_ASSERT(dest.typeReg() != dest.payloadReg()); |
michael@0 | 3404 | if (payload != dest.payloadReg()) |
michael@0 | 3405 | ma_mov(payload, dest.payloadReg()); |
michael@0 | 3406 | ma_mov(ImmType(type), dest.typeReg()); |
michael@0 | 3407 | } |
michael@0 | 3408 | |
michael@0 | 3409 | void |
michael@0 | 3410 | MacroAssemblerARMCompat::pushValue(ValueOperand val) { |
michael@0 | 3411 | ma_push(val.typeReg()); |
michael@0 | 3412 | ma_push(val.payloadReg()); |
michael@0 | 3413 | } |
michael@0 | 3414 | void |
michael@0 | 3415 | MacroAssemblerARMCompat::pushValue(const Address &addr) |
michael@0 | 3416 | { |
michael@0 | 3417 | JS_ASSERT(addr.base != StackPointer); |
michael@0 | 3418 | Operand srcOp = Operand(addr); |
michael@0 | 3419 | Operand payload = ToPayload(srcOp); |
michael@0 | 3420 | Operand type = ToType(srcOp); |
michael@0 | 3421 | |
michael@0 | 3422 | ma_ldr(type, ScratchRegister); |
michael@0 | 3423 | ma_push(ScratchRegister); |
michael@0 | 3424 | ma_ldr(payload, ScratchRegister); |
michael@0 | 3425 | ma_push(ScratchRegister); |
michael@0 | 3426 | } |
michael@0 | 3427 | |
michael@0 | 3428 | void |
michael@0 | 3429 | MacroAssemblerARMCompat::popValue(ValueOperand val) { |
michael@0 | 3430 | ma_pop(val.payloadReg()); |
michael@0 | 3431 | ma_pop(val.typeReg()); |
michael@0 | 3432 | } |
michael@0 | 3433 | void |
michael@0 | 3434 | MacroAssemblerARMCompat::storePayload(const Value &val, Operand dest) |
michael@0 | 3435 | { |
michael@0 | 3436 | jsval_layout jv = JSVAL_TO_IMPL(val); |
michael@0 | 3437 | if (val.isMarkable()) |
michael@0 | 3438 | ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), secondScratchReg_); |
michael@0 | 3439 | else |
michael@0 | 3440 | ma_mov(Imm32(jv.s.payload.i32), secondScratchReg_); |
michael@0 | 3441 | ma_str(secondScratchReg_, ToPayload(dest)); |
michael@0 | 3442 | } |
michael@0 | 3443 | void |
michael@0 | 3444 | MacroAssemblerARMCompat::storePayload(Register src, Operand dest) |
michael@0 | 3445 | { |
michael@0 | 3446 | if (dest.getTag() == Operand::MEM) { |
michael@0 | 3447 | ma_str(src, ToPayload(dest)); |
michael@0 | 3448 | return; |
michael@0 | 3449 | } |
michael@0 | 3450 | MOZ_ASSUME_UNREACHABLE("why do we do all of these things?"); |
michael@0 | 3451 | |
michael@0 | 3452 | } |
michael@0 | 3453 | |
michael@0 | 3454 | void |
michael@0 | 3455 | MacroAssemblerARMCompat::storePayload(const Value &val, Register base, Register index, int32_t shift) |
michael@0 | 3456 | { |
michael@0 | 3457 | jsval_layout jv = JSVAL_TO_IMPL(val); |
michael@0 | 3458 | if (val.isMarkable()) |
michael@0 | 3459 | ma_mov(ImmGCPtr((gc::Cell *)jv.s.payload.ptr), ScratchRegister); |
michael@0 | 3460 | else |
michael@0 | 3461 | ma_mov(Imm32(jv.s.payload.i32), ScratchRegister); |
michael@0 | 3462 | JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); |
michael@0 | 3463 | // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] |
michael@0 | 3464 | // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. |
michael@0 | 3465 | as_dtr(IsStore, 32, Offset, ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); |
michael@0 | 3466 | } |
michael@0 | 3467 | void |
michael@0 | 3468 | MacroAssemblerARMCompat::storePayload(Register src, Register base, Register index, int32_t shift) |
michael@0 | 3469 | { |
michael@0 | 3470 | JS_ASSERT((shift < 32) && (shift >= 0)); |
michael@0 | 3471 | // If NUNBOX32_PAYLOAD_OFFSET is not zero, the memory operand [base + index << shift + imm] |
michael@0 | 3472 | // cannot be encoded into a single instruction, and cannot be integrated into the as_dtr call. |
michael@0 | 3473 | JS_STATIC_ASSERT(NUNBOX32_PAYLOAD_OFFSET == 0); |
michael@0 | 3474 | // Technically, shift > -32 can be handle by changing LSL to ASR, but should never come up, |
michael@0 | 3475 | // and this is one less code path to get wrong. |
michael@0 | 3476 | as_dtr(IsStore, 32, Offset, src, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); |
michael@0 | 3477 | } |
michael@0 | 3478 | |
michael@0 | 3479 | void |
michael@0 | 3480 | MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Operand dest) { |
michael@0 | 3481 | if (dest.getTag() == Operand::MEM) { |
michael@0 | 3482 | ma_mov(tag, secondScratchReg_); |
michael@0 | 3483 | ma_str(secondScratchReg_, ToType(dest)); |
michael@0 | 3484 | return; |
michael@0 | 3485 | } |
michael@0 | 3486 | |
michael@0 | 3487 | MOZ_ASSUME_UNREACHABLE("why do we do all of these things?"); |
michael@0 | 3488 | |
michael@0 | 3489 | } |
michael@0 | 3490 | |
michael@0 | 3491 | void |
michael@0 | 3492 | MacroAssemblerARMCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift) { |
michael@0 | 3493 | JS_ASSERT(base != ScratchRegister); |
michael@0 | 3494 | JS_ASSERT(index != ScratchRegister); |
michael@0 | 3495 | // A value needs to be store a value int base + index << shift + 4. |
michael@0 | 3496 | // Arm cannot handle this in a single operand, so a temp register is required. |
michael@0 | 3497 | // However, the scratch register is presently in use to hold the immediate that |
michael@0 | 3498 | // is being stored into said memory location. Work around this by modifying |
michael@0 | 3499 | // the base so the valid [base + index << shift] format can be used, then |
michael@0 | 3500 | // restore it. |
michael@0 | 3501 | ma_add(base, Imm32(NUNBOX32_TYPE_OFFSET), base); |
michael@0 | 3502 | ma_mov(tag, ScratchRegister); |
michael@0 | 3503 | ma_str(ScratchRegister, DTRAddr(base, DtrRegImmShift(index, LSL, shift))); |
michael@0 | 3504 | ma_sub(base, Imm32(NUNBOX32_TYPE_OFFSET), base); |
michael@0 | 3505 | } |
michael@0 | 3506 | |
michael@0 | 3507 | void |
michael@0 | 3508 | MacroAssemblerARMCompat::linkExitFrame() { |
michael@0 | 3509 | uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop(); |
michael@0 | 3510 | movePtr(ImmPtr(dest), ScratchRegister); |
michael@0 | 3511 | ma_str(StackPointer, Operand(ScratchRegister, 0)); |
michael@0 | 3512 | } |
michael@0 | 3513 | |
michael@0 | 3514 | void |
michael@0 | 3515 | MacroAssemblerARMCompat::linkParallelExitFrame(const Register &pt) |
michael@0 | 3516 | { |
michael@0 | 3517 | ma_str(StackPointer, Operand(pt, offsetof(PerThreadData, ionTop))); |
michael@0 | 3518 | } |
michael@0 | 3519 | |
michael@0 | 3520 | // ARM says that all reads of pc will return 8 higher than the |
michael@0 | 3521 | // address of the currently executing instruction. This means we are |
michael@0 | 3522 | // correctly storing the address of the instruction after the call |
michael@0 | 3523 | // in the register. |
michael@0 | 3524 | // Also ION is breaking the ARM EABI here (sort of). The ARM EABI |
michael@0 | 3525 | // says that a function call should move the pc into the link register, |
michael@0 | 3526 | // then branch to the function, and *sp is data that is owned by the caller, |
michael@0 | 3527 | // not the callee. The ION ABI says *sp should be the address that |
michael@0 | 3528 | // we will return to when leaving this function |
michael@0 | 3529 | void |
michael@0 | 3530 | MacroAssemblerARM::ma_callIon(const Register r) |
michael@0 | 3531 | { |
michael@0 | 3532 | // When the stack is 8 byte aligned, |
michael@0 | 3533 | // we want to decrement sp by 8, and write pc+8 into the new sp. |
michael@0 | 3534 | // when we return from this call, sp will be its present value minus 4. |
michael@0 | 3535 | AutoForbidPools afp(this); |
michael@0 | 3536 | as_dtr(IsStore, 32, PreIndex, pc, DTRAddr(sp, DtrOffImm(-8))); |
michael@0 | 3537 | as_blx(r); |
michael@0 | 3538 | } |
michael@0 | 3539 | void |
michael@0 | 3540 | MacroAssemblerARM::ma_callIonNoPush(const Register r) |
michael@0 | 3541 | { |
michael@0 | 3542 | // Since we just write the return address into the stack, which is |
michael@0 | 3543 | // popped on return, the net effect is removing 4 bytes from the stack |
michael@0 | 3544 | AutoForbidPools afp(this); |
michael@0 | 3545 | as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); |
michael@0 | 3546 | as_blx(r); |
michael@0 | 3547 | } |
michael@0 | 3548 | |
michael@0 | 3549 | void |
michael@0 | 3550 | MacroAssemblerARM::ma_callIonHalfPush(const Register r) |
michael@0 | 3551 | { |
michael@0 | 3552 | // The stack is unaligned by 4 bytes. |
michael@0 | 3553 | // We push the pc to the stack to align the stack before the call, when we |
michael@0 | 3554 | // return the pc is poped and the stack is restored to its unaligned state. |
michael@0 | 3555 | AutoForbidPools afp(this); |
michael@0 | 3556 | ma_push(pc); |
michael@0 | 3557 | as_blx(r); |
michael@0 | 3558 | } |
michael@0 | 3559 | |
michael@0 | 3560 | void |
michael@0 | 3561 | MacroAssemblerARM::ma_call(ImmPtr dest) |
michael@0 | 3562 | { |
michael@0 | 3563 | RelocStyle rs; |
michael@0 | 3564 | if (hasMOVWT()) |
michael@0 | 3565 | rs = L_MOVWT; |
michael@0 | 3566 | else |
michael@0 | 3567 | rs = L_LDR; |
michael@0 | 3568 | |
michael@0 | 3569 | ma_movPatchable(dest, CallReg, Always, rs); |
michael@0 | 3570 | as_blx(CallReg); |
michael@0 | 3571 | } |
michael@0 | 3572 | |
michael@0 | 3573 | void |
michael@0 | 3574 | MacroAssemblerARM::ma_callAndStoreRet(const Register r, uint32_t stackArgBytes) |
michael@0 | 3575 | { |
michael@0 | 3576 | // Note: this function stores the return address to sp[0]. The caller must |
michael@0 | 3577 | // anticipate this by pushing additional space on the stack. The ABI does |
michael@0 | 3578 | // not provide space for a return address so this function may only be |
michael@0 | 3579 | // called if no argument are passed. |
michael@0 | 3580 | JS_ASSERT(stackArgBytes == 0); |
michael@0 | 3581 | AutoForbidPools afp(this); |
michael@0 | 3582 | as_dtr(IsStore, 32, Offset, pc, DTRAddr(sp, DtrOffImm(0))); |
michael@0 | 3583 | as_blx(r); |
michael@0 | 3584 | } |
michael@0 | 3585 | |
michael@0 | 3586 | void |
michael@0 | 3587 | MacroAssemblerARMCompat::breakpoint() |
michael@0 | 3588 | { |
michael@0 | 3589 | as_bkpt(); |
michael@0 | 3590 | } |
michael@0 | 3591 | |
michael@0 | 3592 | void |
michael@0 | 3593 | MacroAssemblerARMCompat::ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure) |
michael@0 | 3594 | { |
michael@0 | 3595 | Label isDouble, done; |
michael@0 | 3596 | branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble); |
michael@0 | 3597 | branchTestInt32(Assembler::NotEqual, source.typeReg(), failure); |
michael@0 | 3598 | |
michael@0 | 3599 | convertInt32ToDouble(source.payloadReg(), dest); |
michael@0 | 3600 | jump(&done); |
michael@0 | 3601 | |
michael@0 | 3602 | bind(&isDouble); |
michael@0 | 3603 | unboxDouble(source, dest); |
michael@0 | 3604 | |
michael@0 | 3605 | bind(&done); |
michael@0 | 3606 | } |
michael@0 | 3607 | |
michael@0 | 3608 | void |
michael@0 | 3609 | MacroAssemblerARMCompat::breakpoint(Condition cc) |
michael@0 | 3610 | { |
michael@0 | 3611 | ma_ldr(DTRAddr(r12, DtrRegImmShift(r12, LSL, 0, IsDown)), r12, Offset, cc); |
michael@0 | 3612 | } |
michael@0 | 3613 | |
michael@0 | 3614 | void |
michael@0 | 3615 | MacroAssemblerARMCompat::setupABICall(uint32_t args) |
michael@0 | 3616 | { |
michael@0 | 3617 | JS_ASSERT(!inCall_); |
michael@0 | 3618 | inCall_ = true; |
michael@0 | 3619 | args_ = args; |
michael@0 | 3620 | passedArgs_ = 0; |
michael@0 | 3621 | passedArgTypes_ = 0; |
michael@0 | 3622 | usedIntSlots_ = 0; |
michael@0 | 3623 | #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
michael@0 | 3624 | usedFloatSlots_ = 0; |
michael@0 | 3625 | usedFloat32_ = false; |
michael@0 | 3626 | padding_ = 0; |
michael@0 | 3627 | #endif |
michael@0 | 3628 | floatArgsInGPR[0] = MoveOperand(); |
michael@0 | 3629 | floatArgsInGPR[1] = MoveOperand(); |
michael@0 | 3630 | floatArgsInGPRValid[0] = false; |
michael@0 | 3631 | floatArgsInGPRValid[1] = false; |
michael@0 | 3632 | } |
michael@0 | 3633 | |
michael@0 | 3634 | void |
michael@0 | 3635 | MacroAssemblerARMCompat::setupAlignedABICall(uint32_t args) |
michael@0 | 3636 | { |
michael@0 | 3637 | setupABICall(args); |
michael@0 | 3638 | |
michael@0 | 3639 | dynamicAlignment_ = false; |
michael@0 | 3640 | } |
michael@0 | 3641 | |
michael@0 | 3642 | void |
michael@0 | 3643 | MacroAssemblerARMCompat::setupUnalignedABICall(uint32_t args, const Register &scratch) |
michael@0 | 3644 | { |
michael@0 | 3645 | setupABICall(args); |
michael@0 | 3646 | dynamicAlignment_ = true; |
michael@0 | 3647 | |
michael@0 | 3648 | ma_mov(sp, scratch); |
michael@0 | 3649 | |
michael@0 | 3650 | // Force sp to be aligned |
michael@0 | 3651 | ma_and(Imm32(~(StackAlignment - 1)), sp, sp); |
michael@0 | 3652 | ma_push(scratch); |
michael@0 | 3653 | } |
michael@0 | 3654 | |
michael@0 | 3655 | #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
michael@0 | 3656 | void |
michael@0 | 3657 | MacroAssemblerARMCompat::passHardFpABIArg(const MoveOperand &from, MoveOp::Type type) |
michael@0 | 3658 | { |
michael@0 | 3659 | MoveOperand to; |
michael@0 | 3660 | ++passedArgs_; |
michael@0 | 3661 | if (!enoughMemory_) |
michael@0 | 3662 | return; |
michael@0 | 3663 | switch (type) { |
michael@0 | 3664 | case MoveOp::FLOAT32: |
michael@0 | 3665 | case MoveOp::DOUBLE: { |
michael@0 | 3666 | // N.B. this isn't a limitation of the ABI, it is a limitation of the compiler right now. |
michael@0 | 3667 | // There isn't a good way to handle odd numbered single registers, so everything goes to hell |
michael@0 | 3668 | // when we try. Current fix is to never use more than one float in a function call. |
michael@0 | 3669 | // Fix coming along with complete float32 support in bug 957504. |
michael@0 | 3670 | JS_ASSERT(!usedFloat32_); |
michael@0 | 3671 | if (type == MoveOp::FLOAT32) |
michael@0 | 3672 | usedFloat32_ = true; |
michael@0 | 3673 | FloatRegister fr; |
michael@0 | 3674 | if (GetFloatArgReg(usedIntSlots_, usedFloatSlots_, &fr)) { |
michael@0 | 3675 | if (from.isFloatReg() && from.floatReg() == fr) { |
michael@0 | 3676 | // Nothing to do; the value is in the right register already |
michael@0 | 3677 | usedFloatSlots_++; |
michael@0 | 3678 | if (type == MoveOp::FLOAT32) |
michael@0 | 3679 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; |
michael@0 | 3680 | else |
michael@0 | 3681 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; |
michael@0 | 3682 | return; |
michael@0 | 3683 | } |
michael@0 | 3684 | to = MoveOperand(fr); |
michael@0 | 3685 | } else { |
michael@0 | 3686 | // If (and only if) the integer registers have started spilling, do we |
michael@0 | 3687 | // need to take the register's alignment into account |
michael@0 | 3688 | uint32_t disp = INT_MAX; |
michael@0 | 3689 | if (type == MoveOp::FLOAT32) |
michael@0 | 3690 | disp = GetFloat32ArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); |
michael@0 | 3691 | else |
michael@0 | 3692 | disp = GetDoubleArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); |
michael@0 | 3693 | to = MoveOperand(sp, disp); |
michael@0 | 3694 | } |
michael@0 | 3695 | usedFloatSlots_++; |
michael@0 | 3696 | if (type == MoveOp::FLOAT32) |
michael@0 | 3697 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; |
michael@0 | 3698 | else |
michael@0 | 3699 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; |
michael@0 | 3700 | break; |
michael@0 | 3701 | } |
michael@0 | 3702 | case MoveOp::GENERAL: { |
michael@0 | 3703 | Register r; |
michael@0 | 3704 | if (GetIntArgReg(usedIntSlots_, usedFloatSlots_, &r)) { |
michael@0 | 3705 | if (from.isGeneralReg() && from.reg() == r) { |
michael@0 | 3706 | // Nothing to do; the value is in the right register already |
michael@0 | 3707 | usedIntSlots_++; |
michael@0 | 3708 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; |
michael@0 | 3709 | return; |
michael@0 | 3710 | } |
michael@0 | 3711 | to = MoveOperand(r); |
michael@0 | 3712 | } else { |
michael@0 | 3713 | uint32_t disp = GetIntArgStackDisp(usedIntSlots_, usedFloatSlots_, &padding_); |
michael@0 | 3714 | to = MoveOperand(sp, disp); |
michael@0 | 3715 | } |
michael@0 | 3716 | usedIntSlots_++; |
michael@0 | 3717 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; |
michael@0 | 3718 | break; |
michael@0 | 3719 | } |
michael@0 | 3720 | default: |
michael@0 | 3721 | MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); |
michael@0 | 3722 | } |
michael@0 | 3723 | |
michael@0 | 3724 | enoughMemory_ = moveResolver_.addMove(from, to, type); |
michael@0 | 3725 | } |
michael@0 | 3726 | #endif |
michael@0 | 3727 | |
michael@0 | 3728 | #if !defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
michael@0 | 3729 | void |
michael@0 | 3730 | MacroAssemblerARMCompat::passSoftFpABIArg(const MoveOperand &from, MoveOp::Type type) |
michael@0 | 3731 | { |
michael@0 | 3732 | MoveOperand to; |
michael@0 | 3733 | uint32_t increment = 1; |
michael@0 | 3734 | bool useResolver = true; |
michael@0 | 3735 | ++passedArgs_; |
michael@0 | 3736 | switch (type) { |
michael@0 | 3737 | case MoveOp::DOUBLE: |
michael@0 | 3738 | // Double arguments need to be rounded up to the nearest doubleword |
michael@0 | 3739 | // boundary, even if it is in a register! |
michael@0 | 3740 | usedIntSlots_ = (usedIntSlots_ + 1) & ~1; |
michael@0 | 3741 | increment = 2; |
michael@0 | 3742 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Double; |
michael@0 | 3743 | break; |
michael@0 | 3744 | case MoveOp::FLOAT32: |
michael@0 | 3745 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_Float32; |
michael@0 | 3746 | break; |
michael@0 | 3747 | case MoveOp::GENERAL: |
michael@0 | 3748 | passedArgTypes_ = (passedArgTypes_ << ArgType_Shift) | ArgType_General; |
michael@0 | 3749 | break; |
michael@0 | 3750 | default: |
michael@0 | 3751 | MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); |
michael@0 | 3752 | } |
michael@0 | 3753 | |
michael@0 | 3754 | Register destReg; |
michael@0 | 3755 | MoveOperand dest; |
michael@0 | 3756 | if (GetIntArgReg(usedIntSlots_, 0, &destReg)) { |
michael@0 | 3757 | if (type == MoveOp::DOUBLE || type == MoveOp::FLOAT32) { |
michael@0 | 3758 | floatArgsInGPR[destReg.code() >> 1] = from; |
michael@0 | 3759 | floatArgsInGPRValid[destReg.code() >> 1] = true; |
michael@0 | 3760 | useResolver = false; |
michael@0 | 3761 | } else if (from.isGeneralReg() && from.reg() == destReg) { |
michael@0 | 3762 | // No need to move anything |
michael@0 | 3763 | useResolver = false; |
michael@0 | 3764 | } else { |
michael@0 | 3765 | dest = MoveOperand(destReg); |
michael@0 | 3766 | } |
michael@0 | 3767 | } else { |
michael@0 | 3768 | uint32_t disp = GetArgStackDisp(usedIntSlots_); |
michael@0 | 3769 | dest = MoveOperand(sp, disp); |
michael@0 | 3770 | } |
michael@0 | 3771 | |
michael@0 | 3772 | if (useResolver) |
michael@0 | 3773 | enoughMemory_ = enoughMemory_ && moveResolver_.addMove(from, dest, type); |
michael@0 | 3774 | usedIntSlots_ += increment; |
michael@0 | 3775 | } |
michael@0 | 3776 | #endif |
michael@0 | 3777 | |
michael@0 | 3778 | void |
michael@0 | 3779 | MacroAssemblerARMCompat::passABIArg(const MoveOperand &from, MoveOp::Type type) |
michael@0 | 3780 | { |
michael@0 | 3781 | #if defined(JS_ARM_SIMULATOR) |
michael@0 | 3782 | if (useHardFpABI()) |
michael@0 | 3783 | MacroAssemblerARMCompat::passHardFpABIArg(from, type); |
michael@0 | 3784 | else |
michael@0 | 3785 | MacroAssemblerARMCompat::passSoftFpABIArg(from, type); |
michael@0 | 3786 | #elif defined(JS_CODEGEN_ARM_HARDFP) |
michael@0 | 3787 | MacroAssemblerARMCompat::passHardFpABIArg(from, type); |
michael@0 | 3788 | #else |
michael@0 | 3789 | MacroAssemblerARMCompat::passSoftFpABIArg(from, type); |
michael@0 | 3790 | #endif |
michael@0 | 3791 | } |
michael@0 | 3792 | |
michael@0 | 3793 | void |
michael@0 | 3794 | MacroAssemblerARMCompat::passABIArg(const Register ®) |
michael@0 | 3795 | { |
michael@0 | 3796 | passABIArg(MoveOperand(reg), MoveOp::GENERAL); |
michael@0 | 3797 | } |
michael@0 | 3798 | |
michael@0 | 3799 | void |
michael@0 | 3800 | MacroAssemblerARMCompat::passABIArg(const FloatRegister &freg, MoveOp::Type type) |
michael@0 | 3801 | { |
michael@0 | 3802 | passABIArg(MoveOperand(freg), type); |
michael@0 | 3803 | } |
michael@0 | 3804 | |
michael@0 | 3805 | void MacroAssemblerARMCompat::checkStackAlignment() |
michael@0 | 3806 | { |
michael@0 | 3807 | #ifdef DEBUG |
michael@0 | 3808 | ma_tst(sp, Imm32(StackAlignment - 1)); |
michael@0 | 3809 | breakpoint(NonZero); |
michael@0 | 3810 | #endif |
michael@0 | 3811 | } |
michael@0 | 3812 | |
michael@0 | 3813 | void |
michael@0 | 3814 | MacroAssemblerARMCompat::callWithABIPre(uint32_t *stackAdjust) |
michael@0 | 3815 | { |
michael@0 | 3816 | JS_ASSERT(inCall_); |
michael@0 | 3817 | |
michael@0 | 3818 | *stackAdjust = ((usedIntSlots_ > NumIntArgRegs) ? usedIntSlots_ - NumIntArgRegs : 0) * sizeof(intptr_t); |
michael@0 | 3819 | #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_ARM_SIMULATOR) |
michael@0 | 3820 | if (useHardFpABI()) |
michael@0 | 3821 | *stackAdjust += 2*((usedFloatSlots_ > NumFloatArgRegs) ? usedFloatSlots_ - NumFloatArgRegs : 0) * sizeof(intptr_t); |
michael@0 | 3822 | #endif |
michael@0 | 3823 | if (!dynamicAlignment_) { |
michael@0 | 3824 | *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust, StackAlignment); |
michael@0 | 3825 | } else { |
michael@0 | 3826 | // sizeof(intptr_t) account for the saved stack pointer pushed by setupUnalignedABICall |
michael@0 | 3827 | *stackAdjust += ComputeByteAlignment(*stackAdjust + sizeof(intptr_t), StackAlignment); |
michael@0 | 3828 | } |
michael@0 | 3829 | |
michael@0 | 3830 | reserveStack(*stackAdjust); |
michael@0 | 3831 | |
michael@0 | 3832 | // Position all arguments. |
michael@0 | 3833 | { |
michael@0 | 3834 | enoughMemory_ = enoughMemory_ && moveResolver_.resolve(); |
michael@0 | 3835 | if (!enoughMemory_) |
michael@0 | 3836 | return; |
michael@0 | 3837 | |
michael@0 | 3838 | MoveEmitter emitter(*this); |
michael@0 | 3839 | emitter.emit(moveResolver_); |
michael@0 | 3840 | emitter.finish(); |
michael@0 | 3841 | } |
michael@0 | 3842 | for (int i = 0; i < 2; i++) { |
michael@0 | 3843 | if (floatArgsInGPRValid[i]) { |
michael@0 | 3844 | MoveOperand from = floatArgsInGPR[i]; |
michael@0 | 3845 | Register to0 = Register::FromCode(i * 2), to1 = Register::FromCode(i * 2 + 1); |
michael@0 | 3846 | |
michael@0 | 3847 | if (from.isFloatReg()) { |
michael@0 | 3848 | ma_vxfer(VFPRegister(from.floatReg()), to0, to1); |
michael@0 | 3849 | } else { |
michael@0 | 3850 | JS_ASSERT(from.isMemory()); |
michael@0 | 3851 | // Note: We can safely use the MoveOperand's displacement here, |
michael@0 | 3852 | // even if the base is SP: MoveEmitter::toOperand adjusts |
michael@0 | 3853 | // SP-relative operands by the difference between the current |
michael@0 | 3854 | // stack usage and stackAdjust, which emitter.finish() resets |
michael@0 | 3855 | // to 0. |
michael@0 | 3856 | // |
michael@0 | 3857 | // Warning: if the offset isn't within [-255,+255] then this |
michael@0 | 3858 | // will assert-fail (or, if non-debug, load the wrong words). |
michael@0 | 3859 | // Nothing uses such an offset at the time of this writing. |
michael@0 | 3860 | ma_ldrd(EDtrAddr(from.base(), EDtrOffImm(from.disp())), to0, to1); |
michael@0 | 3861 | } |
michael@0 | 3862 | } |
michael@0 | 3863 | } |
michael@0 | 3864 | checkStackAlignment(); |
michael@0 | 3865 | |
michael@0 | 3866 | // Save the lr register if we need to preserve it. |
michael@0 | 3867 | if (secondScratchReg_ != lr) |
michael@0 | 3868 | ma_mov(lr, secondScratchReg_); |
michael@0 | 3869 | } |
michael@0 | 3870 | |
michael@0 | 3871 | void |
michael@0 | 3872 | MacroAssemblerARMCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) |
michael@0 | 3873 | { |
michael@0 | 3874 | if (secondScratchReg_ != lr) |
michael@0 | 3875 | ma_mov(secondScratchReg_, lr); |
michael@0 | 3876 | |
michael@0 | 3877 | switch (result) { |
michael@0 | 3878 | case MoveOp::DOUBLE: |
michael@0 | 3879 | if (!useHardFpABI()) { |
michael@0 | 3880 | // Move double from r0/r1 to ReturnFloatReg. |
michael@0 | 3881 | as_vxfer(r0, r1, ReturnFloatReg, CoreToFloat); |
michael@0 | 3882 | break; |
michael@0 | 3883 | } |
michael@0 | 3884 | case MoveOp::FLOAT32: |
michael@0 | 3885 | if (!useHardFpABI()) { |
michael@0 | 3886 | // Move float32 from r0 to ReturnFloatReg. |
michael@0 | 3887 | as_vxfer(r0, InvalidReg, VFPRegister(d0).singleOverlay(), CoreToFloat); |
michael@0 | 3888 | break; |
michael@0 | 3889 | } |
michael@0 | 3890 | case MoveOp::GENERAL: |
michael@0 | 3891 | break; |
michael@0 | 3892 | |
michael@0 | 3893 | default: |
michael@0 | 3894 | MOZ_ASSUME_UNREACHABLE("unexpected callWithABI result"); |
michael@0 | 3895 | } |
michael@0 | 3896 | |
michael@0 | 3897 | freeStack(stackAdjust); |
michael@0 | 3898 | |
michael@0 | 3899 | if (dynamicAlignment_) { |
michael@0 | 3900 | // x86 supports pop esp. on arm, that isn't well defined, so just |
michael@0 | 3901 | // do it manually |
michael@0 | 3902 | as_dtr(IsLoad, 32, Offset, sp, DTRAddr(sp, DtrOffImm(0))); |
michael@0 | 3903 | } |
michael@0 | 3904 | |
michael@0 | 3905 | JS_ASSERT(inCall_); |
michael@0 | 3906 | inCall_ = false; |
michael@0 | 3907 | } |
michael@0 | 3908 | |
michael@0 | 3909 | #if defined(DEBUG) && defined(JS_ARM_SIMULATOR) |
michael@0 | 3910 | static void |
michael@0 | 3911 | AssertValidABIFunctionType(uint32_t passedArgTypes) |
michael@0 | 3912 | { |
michael@0 | 3913 | switch (passedArgTypes) { |
michael@0 | 3914 | case Args_General0: |
michael@0 | 3915 | case Args_General1: |
michael@0 | 3916 | case Args_General2: |
michael@0 | 3917 | case Args_General3: |
michael@0 | 3918 | case Args_General4: |
michael@0 | 3919 | case Args_General5: |
michael@0 | 3920 | case Args_General6: |
michael@0 | 3921 | case Args_General7: |
michael@0 | 3922 | case Args_General8: |
michael@0 | 3923 | case Args_Double_None: |
michael@0 | 3924 | case Args_Int_Double: |
michael@0 | 3925 | case Args_Float32_Float32: |
michael@0 | 3926 | case Args_Double_Double: |
michael@0 | 3927 | case Args_Double_Int: |
michael@0 | 3928 | case Args_Double_DoubleInt: |
michael@0 | 3929 | case Args_Double_DoubleDouble: |
michael@0 | 3930 | case Args_Double_IntDouble: |
michael@0 | 3931 | case Args_Int_IntDouble: |
michael@0 | 3932 | break; |
michael@0 | 3933 | default: |
michael@0 | 3934 | MOZ_ASSUME_UNREACHABLE("Unexpected type"); |
michael@0 | 3935 | } |
michael@0 | 3936 | } |
michael@0 | 3937 | #endif |
michael@0 | 3938 | |
michael@0 | 3939 | void |
michael@0 | 3940 | MacroAssemblerARMCompat::callWithABI(void *fun, MoveOp::Type result) |
michael@0 | 3941 | { |
michael@0 | 3942 | #ifdef JS_ARM_SIMULATOR |
michael@0 | 3943 | MOZ_ASSERT(passedArgs_ <= 15); |
michael@0 | 3944 | passedArgTypes_ <<= ArgType_Shift; |
michael@0 | 3945 | switch (result) { |
michael@0 | 3946 | case MoveOp::GENERAL: passedArgTypes_ |= ArgType_General; break; |
michael@0 | 3947 | case MoveOp::DOUBLE: passedArgTypes_ |= ArgType_Double; break; |
michael@0 | 3948 | case MoveOp::FLOAT32: passedArgTypes_ |= ArgType_Float32; break; |
michael@0 | 3949 | default: MOZ_ASSUME_UNREACHABLE("Invalid return type"); |
michael@0 | 3950 | } |
michael@0 | 3951 | #ifdef DEBUG |
michael@0 | 3952 | AssertValidABIFunctionType(passedArgTypes_); |
michael@0 | 3953 | #endif |
michael@0 | 3954 | ABIFunctionType type = ABIFunctionType(passedArgTypes_); |
michael@0 | 3955 | fun = Simulator::RedirectNativeFunction(fun, type); |
michael@0 | 3956 | #endif |
michael@0 | 3957 | |
michael@0 | 3958 | uint32_t stackAdjust; |
michael@0 | 3959 | callWithABIPre(&stackAdjust); |
michael@0 | 3960 | ma_call(ImmPtr(fun)); |
michael@0 | 3961 | callWithABIPost(stackAdjust, result); |
michael@0 | 3962 | } |
michael@0 | 3963 | |
michael@0 | 3964 | void |
michael@0 | 3965 | MacroAssemblerARMCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result) |
michael@0 | 3966 | { |
michael@0 | 3967 | uint32_t stackAdjust; |
michael@0 | 3968 | callWithABIPre(&stackAdjust); |
michael@0 | 3969 | call(imm); |
michael@0 | 3970 | callWithABIPost(stackAdjust, result); |
michael@0 | 3971 | } |
michael@0 | 3972 | |
michael@0 | 3973 | void |
michael@0 | 3974 | MacroAssemblerARMCompat::callWithABI(const Address &fun, MoveOp::Type result) |
michael@0 | 3975 | { |
michael@0 | 3976 | // Load the callee in r12, no instruction between the ldr and call |
michael@0 | 3977 | // should clobber it. Note that we can't use fun.base because it may |
michael@0 | 3978 | // be one of the IntArg registers clobbered before the call. |
michael@0 | 3979 | ma_ldr(fun, r12); |
michael@0 | 3980 | uint32_t stackAdjust; |
michael@0 | 3981 | callWithABIPre(&stackAdjust); |
michael@0 | 3982 | call(r12); |
michael@0 | 3983 | callWithABIPost(stackAdjust, result); |
michael@0 | 3984 | } |
michael@0 | 3985 | |
michael@0 | 3986 | void |
michael@0 | 3987 | MacroAssemblerARMCompat::handleFailureWithHandler(void *handler) |
michael@0 | 3988 | { |
michael@0 | 3989 | // Reserve space for exception information. |
michael@0 | 3990 | int size = (sizeof(ResumeFromException) + 7) & ~7; |
michael@0 | 3991 | ma_sub(Imm32(size), sp); |
michael@0 | 3992 | ma_mov(sp, r0); |
michael@0 | 3993 | |
michael@0 | 3994 | // Ask for an exception handler. |
michael@0 | 3995 | setupUnalignedABICall(1, r1); |
michael@0 | 3996 | passABIArg(r0); |
michael@0 | 3997 | callWithABI(handler); |
michael@0 | 3998 | |
michael@0 | 3999 | JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail(); |
michael@0 | 4000 | branch(excTail); |
michael@0 | 4001 | } |
michael@0 | 4002 | |
michael@0 | 4003 | void |
michael@0 | 4004 | MacroAssemblerARMCompat::handleFailureWithHandlerTail() |
michael@0 | 4005 | { |
michael@0 | 4006 | Label entryFrame; |
michael@0 | 4007 | Label catch_; |
michael@0 | 4008 | Label finally; |
michael@0 | 4009 | Label return_; |
michael@0 | 4010 | Label bailout; |
michael@0 | 4011 | |
michael@0 | 4012 | ma_ldr(Operand(sp, offsetof(ResumeFromException, kind)), r0); |
michael@0 | 4013 | branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame); |
michael@0 | 4014 | branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_CATCH), &catch_); |
michael@0 | 4015 | branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FINALLY), &finally); |
michael@0 | 4016 | branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_); |
michael@0 | 4017 | branch32(Assembler::Equal, r0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); |
michael@0 | 4018 | |
michael@0 | 4019 | breakpoint(); // Invalid kind. |
michael@0 | 4020 | |
michael@0 | 4021 | // No exception handler. Load the error value, load the new stack pointer |
michael@0 | 4022 | // and return from the entry frame. |
michael@0 | 4023 | bind(&entryFrame); |
michael@0 | 4024 | moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); |
michael@0 | 4025 | ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
michael@0 | 4026 | |
michael@0 | 4027 | // We're going to be returning by the ion calling convention, which returns |
michael@0 | 4028 | // by ??? (for now, I think ldr pc, [sp]!) |
michael@0 | 4029 | as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4))); |
michael@0 | 4030 | |
michael@0 | 4031 | // If we found a catch handler, this must be a baseline frame. Restore state |
michael@0 | 4032 | // and jump to the catch block. |
michael@0 | 4033 | bind(&catch_); |
michael@0 | 4034 | ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0); |
michael@0 | 4035 | ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); |
michael@0 | 4036 | ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
michael@0 | 4037 | jump(r0); |
michael@0 | 4038 | |
michael@0 | 4039 | // If we found a finally block, this must be a baseline frame. Push |
michael@0 | 4040 | // two values expected by JSOP_RETSUB: BooleanValue(true) and the |
michael@0 | 4041 | // exception. |
michael@0 | 4042 | bind(&finally); |
michael@0 | 4043 | ValueOperand exception = ValueOperand(r1, r2); |
michael@0 | 4044 | loadValue(Operand(sp, offsetof(ResumeFromException, exception)), exception); |
michael@0 | 4045 | |
michael@0 | 4046 | ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r0); |
michael@0 | 4047 | ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); |
michael@0 | 4048 | ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
michael@0 | 4049 | |
michael@0 | 4050 | pushValue(BooleanValue(true)); |
michael@0 | 4051 | pushValue(exception); |
michael@0 | 4052 | jump(r0); |
michael@0 | 4053 | |
michael@0 | 4054 | // Only used in debug mode. Return BaselineFrame->returnValue() to the caller. |
michael@0 | 4055 | bind(&return_); |
michael@0 | 4056 | ma_ldr(Operand(sp, offsetof(ResumeFromException, framePointer)), r11); |
michael@0 | 4057 | ma_ldr(Operand(sp, offsetof(ResumeFromException, stackPointer)), sp); |
michael@0 | 4058 | loadValue(Address(r11, BaselineFrame::reverseOffsetOfReturnValue()), JSReturnOperand); |
michael@0 | 4059 | ma_mov(r11, sp); |
michael@0 | 4060 | pop(r11); |
michael@0 | 4061 | ret(); |
michael@0 | 4062 | |
michael@0 | 4063 | // If we are bailing out to baseline to handle an exception, jump to |
michael@0 | 4064 | // the bailout tail stub. |
michael@0 | 4065 | bind(&bailout); |
michael@0 | 4066 | ma_ldr(Operand(sp, offsetof(ResumeFromException, bailoutInfo)), r2); |
michael@0 | 4067 | ma_mov(Imm32(BAILOUT_RETURN_OK), r0); |
michael@0 | 4068 | ma_ldr(Operand(sp, offsetof(ResumeFromException, target)), r1); |
michael@0 | 4069 | jump(r1); |
michael@0 | 4070 | } |
michael@0 | 4071 | |
michael@0 | 4072 | Assembler::Condition |
michael@0 | 4073 | MacroAssemblerARMCompat::testStringTruthy(bool truthy, const ValueOperand &value) |
michael@0 | 4074 | { |
michael@0 | 4075 | Register string = value.payloadReg(); |
michael@0 | 4076 | |
michael@0 | 4077 | size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT); |
michael@0 | 4078 | ma_dtr(IsLoad, string, Imm32(JSString::offsetOfLengthAndFlags()), ScratchRegister); |
michael@0 | 4079 | // Bit clear into the scratch register. This is done because there is performs the operation |
michael@0 | 4080 | // dest <- src1 & ~ src2. There is no instruction that does this without writing |
michael@0 | 4081 | // the result somewhere, so the Scratch Register is sacrificed. |
michael@0 | 4082 | ma_bic(Imm32(~mask), ScratchRegister, SetCond); |
michael@0 | 4083 | return truthy ? Assembler::NonZero : Assembler::Zero; |
michael@0 | 4084 | } |
michael@0 | 4085 | |
michael@0 | 4086 | void |
michael@0 | 4087 | MacroAssemblerARMCompat::floor(FloatRegister input, Register output, Label *bail) |
michael@0 | 4088 | { |
michael@0 | 4089 | Label handleZero; |
michael@0 | 4090 | Label handleNeg; |
michael@0 | 4091 | Label fin; |
michael@0 | 4092 | compareDouble(input, InvalidFloatReg); |
michael@0 | 4093 | ma_b(&handleZero, Assembler::Equal); |
michael@0 | 4094 | ma_b(&handleNeg, Assembler::Signed); |
michael@0 | 4095 | // NaN is always a bail condition, just bail directly. |
michael@0 | 4096 | ma_b(bail, Assembler::Overflow); |
michael@0 | 4097 | |
michael@0 | 4098 | // The argument is a positive number, truncation is the path to glory; |
michael@0 | 4099 | // Since it is known to be > 0.0, explicitly convert to a larger range, |
michael@0 | 4100 | // then a value that rounds to INT_MAX is explicitly different from an |
michael@0 | 4101 | // argument that clamps to INT_MAX |
michael@0 | 4102 | ma_vcvt_F64_U32(input, ScratchFloatReg); |
michael@0 | 4103 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4104 | ma_mov(output, output, SetCond); |
michael@0 | 4105 | ma_b(bail, Signed); |
michael@0 | 4106 | ma_b(&fin); |
michael@0 | 4107 | |
michael@0 | 4108 | bind(&handleZero); |
michael@0 | 4109 | // Move the top word of the double into the output reg, if it is non-zero, |
michael@0 | 4110 | // then the original value was -0.0 |
michael@0 | 4111 | as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); |
michael@0 | 4112 | ma_cmp(output, Imm32(0)); |
michael@0 | 4113 | ma_b(bail, NonZero); |
michael@0 | 4114 | ma_b(&fin); |
michael@0 | 4115 | |
michael@0 | 4116 | bind(&handleNeg); |
michael@0 | 4117 | // Negative case, negate, then start dancing |
michael@0 | 4118 | ma_vneg(input, input); |
michael@0 | 4119 | ma_vcvt_F64_U32(input, ScratchFloatReg); |
michael@0 | 4120 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4121 | ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg); |
michael@0 | 4122 | compareDouble(ScratchFloatReg, input); |
michael@0 | 4123 | ma_add(output, Imm32(1), output, NoSetCond, NotEqual); |
michael@0 | 4124 | // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
michael@0 | 4125 | // the result will still be a negative number |
michael@0 | 4126 | ma_rsb(output, Imm32(0), output, SetCond); |
michael@0 | 4127 | // Flip the negated input back to its original value. |
michael@0 | 4128 | ma_vneg(input, input); |
michael@0 | 4129 | // If the result looks non-negative, then this value didn't actually fit into |
michael@0 | 4130 | // the int range, and special handling is required. |
michael@0 | 4131 | // zero is also caught by this case, but floor of a negative number |
michael@0 | 4132 | // should never be zero. |
michael@0 | 4133 | ma_b(bail, NotSigned); |
michael@0 | 4134 | |
michael@0 | 4135 | bind(&fin); |
michael@0 | 4136 | } |
michael@0 | 4137 | |
michael@0 | 4138 | void |
michael@0 | 4139 | MacroAssemblerARMCompat::floorf(FloatRegister input, Register output, Label *bail) |
michael@0 | 4140 | { |
michael@0 | 4141 | Label handleZero; |
michael@0 | 4142 | Label handleNeg; |
michael@0 | 4143 | Label fin; |
michael@0 | 4144 | compareFloat(input, InvalidFloatReg); |
michael@0 | 4145 | ma_b(&handleZero, Assembler::Equal); |
michael@0 | 4146 | ma_b(&handleNeg, Assembler::Signed); |
michael@0 | 4147 | // NaN is always a bail condition, just bail directly. |
michael@0 | 4148 | ma_b(bail, Assembler::Overflow); |
michael@0 | 4149 | |
michael@0 | 4150 | // The argument is a positive number, truncation is the path to glory; |
michael@0 | 4151 | // Since it is known to be > 0.0, explicitly convert to a larger range, |
michael@0 | 4152 | // then a value that rounds to INT_MAX is explicitly different from an |
michael@0 | 4153 | // argument that clamps to INT_MAX |
michael@0 | 4154 | ma_vcvt_F32_U32(input, ScratchFloatReg); |
michael@0 | 4155 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4156 | ma_mov(output, output, SetCond); |
michael@0 | 4157 | ma_b(bail, Signed); |
michael@0 | 4158 | ma_b(&fin); |
michael@0 | 4159 | |
michael@0 | 4160 | bind(&handleZero); |
michael@0 | 4161 | // Move the top word of the double into the output reg, if it is non-zero, |
michael@0 | 4162 | // then the original value was -0.0 |
michael@0 | 4163 | as_vxfer(output, InvalidReg, VFPRegister(input).singleOverlay(), FloatToCore, Always, 0); |
michael@0 | 4164 | ma_cmp(output, Imm32(0)); |
michael@0 | 4165 | ma_b(bail, NonZero); |
michael@0 | 4166 | ma_b(&fin); |
michael@0 | 4167 | |
michael@0 | 4168 | bind(&handleNeg); |
michael@0 | 4169 | // Negative case, negate, then start dancing |
michael@0 | 4170 | ma_vneg_f32(input, input); |
michael@0 | 4171 | ma_vcvt_F32_U32(input, ScratchFloatReg); |
michael@0 | 4172 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4173 | ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg); |
michael@0 | 4174 | compareFloat(ScratchFloatReg, input); |
michael@0 | 4175 | ma_add(output, Imm32(1), output, NoSetCond, NotEqual); |
michael@0 | 4176 | // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
michael@0 | 4177 | // the result will still be a negative number |
michael@0 | 4178 | ma_rsb(output, Imm32(0), output, SetCond); |
michael@0 | 4179 | // Flip the negated input back to its original value. |
michael@0 | 4180 | ma_vneg_f32(input, input); |
michael@0 | 4181 | // If the result looks non-negative, then this value didn't actually fit into |
michael@0 | 4182 | // the int range, and special handling is required. |
michael@0 | 4183 | // zero is also caught by this case, but floor of a negative number |
michael@0 | 4184 | // should never be zero. |
michael@0 | 4185 | ma_b(bail, NotSigned); |
michael@0 | 4186 | |
michael@0 | 4187 | bind(&fin); |
michael@0 | 4188 | } |
michael@0 | 4189 | |
michael@0 | 4190 | CodeOffsetLabel |
michael@0 | 4191 | MacroAssemblerARMCompat::toggledJump(Label *label) |
michael@0 | 4192 | { |
michael@0 | 4193 | // Emit a B that can be toggled to a CMP. See ToggleToJmp(), ToggleToCmp(). |
michael@0 | 4194 | |
michael@0 | 4195 | BufferOffset b = ma_b(label, Always, true); |
michael@0 | 4196 | CodeOffsetLabel ret(b.getOffset()); |
michael@0 | 4197 | return ret; |
michael@0 | 4198 | } |
michael@0 | 4199 | |
michael@0 | 4200 | CodeOffsetLabel |
michael@0 | 4201 | MacroAssemblerARMCompat::toggledCall(JitCode *target, bool enabled) |
michael@0 | 4202 | { |
michael@0 | 4203 | BufferOffset bo = nextOffset(); |
michael@0 | 4204 | CodeOffsetLabel offset(bo.getOffset()); |
michael@0 | 4205 | addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE); |
michael@0 | 4206 | ma_movPatchable(ImmPtr(target->raw()), ScratchRegister, Always, hasMOVWT() ? L_MOVWT : L_LDR); |
michael@0 | 4207 | if (enabled) |
michael@0 | 4208 | ma_blx(ScratchRegister); |
michael@0 | 4209 | else |
michael@0 | 4210 | ma_nop(); |
michael@0 | 4211 | JS_ASSERT(nextOffset().getOffset() - offset.offset() == ToggledCallSize()); |
michael@0 | 4212 | return offset; |
michael@0 | 4213 | } |
michael@0 | 4214 | |
michael@0 | 4215 | void |
michael@0 | 4216 | MacroAssemblerARMCompat::round(FloatRegister input, Register output, Label *bail, FloatRegister tmp) |
michael@0 | 4217 | { |
michael@0 | 4218 | Label handleZero; |
michael@0 | 4219 | Label handleNeg; |
michael@0 | 4220 | Label fin; |
michael@0 | 4221 | // Do a compare based on the original value, then do most other things based on the |
michael@0 | 4222 | // shifted value. |
michael@0 | 4223 | ma_vcmpz(input); |
michael@0 | 4224 | // Adding 0.5 is technically incorrect! |
michael@0 | 4225 | // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. |
michael@0 | 4226 | ma_vimm(0.5, ScratchFloatReg); |
michael@0 | 4227 | // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. |
michael@0 | 4228 | ma_vabs(input, tmp); |
michael@0 | 4229 | // Add 0.5, storing the result into tmp. |
michael@0 | 4230 | ma_vadd(ScratchFloatReg, tmp, tmp); |
michael@0 | 4231 | as_vmrs(pc); |
michael@0 | 4232 | ma_b(&handleZero, Assembler::Equal); |
michael@0 | 4233 | ma_b(&handleNeg, Assembler::Signed); |
michael@0 | 4234 | // NaN is always a bail condition, just bail directly. |
michael@0 | 4235 | ma_b(bail, Assembler::Overflow); |
michael@0 | 4236 | |
michael@0 | 4237 | // The argument is a positive number, truncation is the path to glory; |
michael@0 | 4238 | // Since it is known to be > 0.0, explicitly convert to a larger range, |
michael@0 | 4239 | // then a value that rounds to INT_MAX is explicitly different from an |
michael@0 | 4240 | // argument that clamps to INT_MAX |
michael@0 | 4241 | ma_vcvt_F64_U32(tmp, ScratchFloatReg); |
michael@0 | 4242 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4243 | ma_mov(output, output, SetCond); |
michael@0 | 4244 | ma_b(bail, Signed); |
michael@0 | 4245 | ma_b(&fin); |
michael@0 | 4246 | |
michael@0 | 4247 | bind(&handleZero); |
michael@0 | 4248 | // Move the top word of the double into the output reg, if it is non-zero, |
michael@0 | 4249 | // then the original value was -0.0 |
michael@0 | 4250 | as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); |
michael@0 | 4251 | ma_cmp(output, Imm32(0)); |
michael@0 | 4252 | ma_b(bail, NonZero); |
michael@0 | 4253 | ma_b(&fin); |
michael@0 | 4254 | |
michael@0 | 4255 | bind(&handleNeg); |
michael@0 | 4256 | // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 |
michael@0 | 4257 | ma_vcvt_F64_U32(tmp, ScratchFloatReg); |
michael@0 | 4258 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4259 | |
michael@0 | 4260 | // -output is now a correctly rounded value, unless the original value was exactly |
michael@0 | 4261 | // halfway between two integers, at which point, it has been rounded away from zero, when |
michael@0 | 4262 | // it should be rounded towards \infty. |
michael@0 | 4263 | ma_vcvt_U32_F64(ScratchFloatReg, ScratchFloatReg); |
michael@0 | 4264 | compareDouble(ScratchFloatReg, tmp); |
michael@0 | 4265 | ma_sub(output, Imm32(1), output, NoSetCond, Equal); |
michael@0 | 4266 | // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
michael@0 | 4267 | // the result will still be a negative number |
michael@0 | 4268 | ma_rsb(output, Imm32(0), output, SetCond); |
michael@0 | 4269 | |
michael@0 | 4270 | // If the result looks non-negative, then this value didn't actually fit into |
michael@0 | 4271 | // the int range, and special handling is required, or it was zero, which means |
michael@0 | 4272 | // the result is actually -0.0 which also requires special handling. |
michael@0 | 4273 | ma_b(bail, NotSigned); |
michael@0 | 4274 | |
michael@0 | 4275 | bind(&fin); |
michael@0 | 4276 | } |
michael@0 | 4277 | |
michael@0 | 4278 | void |
michael@0 | 4279 | MacroAssemblerARMCompat::roundf(FloatRegister input, Register output, Label *bail, FloatRegister tmp) |
michael@0 | 4280 | { |
michael@0 | 4281 | Label handleZero; |
michael@0 | 4282 | Label handleNeg; |
michael@0 | 4283 | Label fin; |
michael@0 | 4284 | // Do a compare based on the original value, then do most other things based on the |
michael@0 | 4285 | // shifted value. |
michael@0 | 4286 | ma_vcmpz_f32(input); |
michael@0 | 4287 | // Adding 0.5 is technically incorrect! |
michael@0 | 4288 | // We want to add 0.5 to negative numbers, and 0.49999999999999999 to positive numbers. |
michael@0 | 4289 | ma_vimm_f32(0.5f, ScratchFloatReg); |
michael@0 | 4290 | // Since we already know the sign bit, flip all numbers to be positive, stored in tmp. |
michael@0 | 4291 | ma_vabs_f32(input, tmp); |
michael@0 | 4292 | // Add 0.5, storing the result into tmp. |
michael@0 | 4293 | ma_vadd_f32(ScratchFloatReg, tmp, tmp); |
michael@0 | 4294 | as_vmrs(pc); |
michael@0 | 4295 | ma_b(&handleZero, Assembler::Equal); |
michael@0 | 4296 | ma_b(&handleNeg, Assembler::Signed); |
michael@0 | 4297 | // NaN is always a bail condition, just bail directly. |
michael@0 | 4298 | ma_b(bail, Assembler::Overflow); |
michael@0 | 4299 | |
michael@0 | 4300 | // The argument is a positive number, truncation is the path to glory; |
michael@0 | 4301 | // Since it is known to be > 0.0, explicitly convert to a larger range, |
michael@0 | 4302 | // then a value that rounds to INT_MAX is explicitly different from an |
michael@0 | 4303 | // argument that clamps to INT_MAX |
michael@0 | 4304 | ma_vcvt_F32_U32(tmp, ScratchFloatReg); |
michael@0 | 4305 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4306 | ma_mov(output, output, SetCond); |
michael@0 | 4307 | ma_b(bail, Signed); |
michael@0 | 4308 | ma_b(&fin); |
michael@0 | 4309 | |
michael@0 | 4310 | bind(&handleZero); |
michael@0 | 4311 | // Move the top word of the double into the output reg, if it is non-zero, |
michael@0 | 4312 | // then the original value was -0.0 |
michael@0 | 4313 | as_vxfer(output, InvalidReg, input, FloatToCore, Always, 1); |
michael@0 | 4314 | ma_cmp(output, Imm32(0)); |
michael@0 | 4315 | ma_b(bail, NonZero); |
michael@0 | 4316 | ma_b(&fin); |
michael@0 | 4317 | |
michael@0 | 4318 | bind(&handleNeg); |
michael@0 | 4319 | // Negative case, negate, then start dancing. This number may be positive, since we added 0.5 |
michael@0 | 4320 | ma_vcvt_F32_U32(tmp, ScratchFloatReg); |
michael@0 | 4321 | ma_vxfer(VFPRegister(ScratchFloatReg).uintOverlay(), output); |
michael@0 | 4322 | |
michael@0 | 4323 | // -output is now a correctly rounded value, unless the original value was exactly |
michael@0 | 4324 | // halfway between two integers, at which point, it has been rounded away from zero, when |
michael@0 | 4325 | // it should be rounded towards \infty. |
michael@0 | 4326 | ma_vcvt_U32_F32(ScratchFloatReg, ScratchFloatReg); |
michael@0 | 4327 | compareFloat(ScratchFloatReg, tmp); |
michael@0 | 4328 | ma_sub(output, Imm32(1), output, NoSetCond, Equal); |
michael@0 | 4329 | // Negate the output. Since INT_MIN < -INT_MAX, even after adding 1, |
michael@0 | 4330 | // the result will still be a negative number |
michael@0 | 4331 | ma_rsb(output, Imm32(0), output, SetCond); |
michael@0 | 4332 | |
michael@0 | 4333 | // If the result looks non-negative, then this value didn't actually fit into |
michael@0 | 4334 | // the int range, and special handling is required, or it was zero, which means |
michael@0 | 4335 | // the result is actually -0.0 which also requires special handling. |
michael@0 | 4336 | ma_b(bail, NotSigned); |
michael@0 | 4337 | |
michael@0 | 4338 | bind(&fin); |
michael@0 | 4339 | } |
michael@0 | 4340 | |
michael@0 | 4341 | CodeOffsetJump |
michael@0 | 4342 | MacroAssemblerARMCompat::jumpWithPatch(RepatchLabel *label, Condition cond) |
michael@0 | 4343 | { |
michael@0 | 4344 | ARMBuffer::PoolEntry pe; |
michael@0 | 4345 | BufferOffset bo = as_BranchPool(0xdeadbeef, label, &pe, cond); |
michael@0 | 4346 | // Fill in a new CodeOffset with both the load and the |
michael@0 | 4347 | // pool entry that the instruction loads from. |
michael@0 | 4348 | CodeOffsetJump ret(bo.getOffset(), pe.encode()); |
michael@0 | 4349 | return ret; |
michael@0 | 4350 | } |
michael@0 | 4351 | |
michael@0 | 4352 | #ifdef JSGC_GENERATIONAL |
michael@0 | 4353 | |
michael@0 | 4354 | void |
michael@0 | 4355 | MacroAssemblerARMCompat::branchPtrInNurseryRange(Register ptr, Register temp, Label *label) |
michael@0 | 4356 | { |
michael@0 | 4357 | JS_ASSERT(ptr != temp); |
michael@0 | 4358 | JS_ASSERT(ptr != secondScratchReg_); |
michael@0 | 4359 | |
michael@0 | 4360 | const Nursery &nursery = GetIonContext()->runtime->gcNursery(); |
michael@0 | 4361 | uintptr_t startChunk = nursery.start() >> Nursery::ChunkShift; |
michael@0 | 4362 | |
michael@0 | 4363 | ma_mov(Imm32(startChunk), secondScratchReg_); |
michael@0 | 4364 | as_rsb(secondScratchReg_, secondScratchReg_, lsr(ptr, Nursery::ChunkShift)); |
michael@0 | 4365 | branch32(Assembler::Below, secondScratchReg_, Imm32(Nursery::NumNurseryChunks), label); |
michael@0 | 4366 | } |
michael@0 | 4367 | |
michael@0 | 4368 | void |
michael@0 | 4369 | MacroAssemblerARMCompat::branchValueIsNurseryObject(ValueOperand value, Register temp, Label *label) |
michael@0 | 4370 | { |
michael@0 | 4371 | Label done; |
michael@0 | 4372 | |
michael@0 | 4373 | branchTestObject(Assembler::NotEqual, value, &done); |
michael@0 | 4374 | branchPtrInNurseryRange(value.payloadReg(), temp, label); |
michael@0 | 4375 | |
michael@0 | 4376 | bind(&done); |
michael@0 | 4377 | } |
michael@0 | 4378 | |
michael@0 | 4379 | #endif |